summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--.mailmap11
-rw-r--r--Documentation/ABI/obsolete/o2cb4
-rw-r--r--Documentation/ABI/removed/o2cb4
-rw-r--r--Documentation/ABI/stable/o2cb4
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter54
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl48
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb10
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-aw200xx5
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-qmi2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-hisi_ptt52
-rw-r--r--Documentation/ABI/testing/sysfs-driver-eud2
-rw-r--r--Documentation/ABI/testing/sysfs-ocfs212
-rw-r--r--Documentation/admin-guide/acpi/ssdt-overlays.rst2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/admin-guide/media/rkisp1.rst4
-rw-r--r--Documentation/admin-guide/perf/cxl.rst68
-rw-r--r--Documentation/admin-guide/perf/index.rst1
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst3
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-cti.yaml18
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml73
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml71
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt94
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53pll.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8953.yaml73
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gpucc.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml32
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml15
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8350-videocc.yaml68
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml4
-rw-r--r--Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml59
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vip.yaml41
-rw-r--r--Documentation/devicetree/bindings/dma/stericsson,dma40.yaml36
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml30
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml6
-rw-r--r--Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml14
-rw-r--r--Documentation/devicetree/bindings/extcon/wlf,arizona.yaml14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mediatek,mt2701-auxadc.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml28
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/light/ti,opt4001.yaml68
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/renesas,x9250.yaml78
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml104
-rw-r--r--Documentation/devicetree/bindings/iio/st,st-sensors.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/melexis,mlx90614.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/ti,tmp006.yaml42
-rw-r--r--Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml126
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/kinetic,ktz8866.yaml30
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lp855x-backlight.yaml149
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lp855x.txt72
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml1
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp55xx.yaml10
-rw-r--r--Documentation/devicetree/bindings/leds/leds-mt6323.txt5
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml30
-rw-r--r--Documentation/devicetree/bindings/leds/leds-sgm3140.yaml1
-rw-r--r--Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml2
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt26
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.yaml40
-rw-r--r--Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/adi,max77541.yaml68
-rw-r--r--Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml10
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/richtek,rt5033.yaml138
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml25
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,tps6594.yaml193
-rw-r--r--Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml15
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml26
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml31
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml50
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml5
-rw-r--r--Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml8
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml61
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml6
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml6
-rw-r--r--Documentation/devicetree/bindings/nvmem/rmem.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml122
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-otp.txt25
-rw-r--r--Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml6
-rw-r--r--Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt15
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml36
-rw-r--r--Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml128
-rw-r--r--Documentation/devicetree/bindings/phy/intel,combo-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml9
-rw-r--r--Documentation/devicetree/bindings/phy/mxs-usb-phy.txt33
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml18
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml27
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml55
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml65
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml51
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/reset/atmel,at91sam9260-shdwc.yaml82
-rw-r--r--Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml114
-rw-r--r--Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt26
-rw-r--r--Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml52
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.yaml20
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq256xx.yaml18
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml82
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml65
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt30
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml43
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/adi,max77541-regulator.yaml38
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml10
-rw-r--r--Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml44
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt55
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml52
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml43
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml250
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl1208.txt38
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl1208.yaml100
-rw-r--r--Documentation/devicetree/bindings/rtc/loongson,rtc.yaml57
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/slimbus/slimbus.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml2
-rw-r--r--Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml39
-rw-r--r--Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml9
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,imx8qm-cdns3.yaml103
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml17
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb5744.yaml107
-rw-r--r--Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml141
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml24
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,pmic-typec.yaml190
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/starfive,jh7110-usb.yaml115
-rw-r--r--Documentation/devicetree/bindings/usb/ti,am62-usb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt15
-rw-r--r--Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.yaml41
-rw-r--r--Documentation/devicetree/bindings/watchdog/cadence-wdt.txt23
-rw-r--r--Documentation/devicetree/bindings/watchdog/cdns,wdt-r1p2.yaml62
-rw-r--r--Documentation/devicetree/bindings/watchdog/watchdog.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/xlnx,versal-wwdt.yaml50
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--Documentation/driver-api/media/camera-sensor.rst22
-rw-r--r--Documentation/driver-api/ntb.rst4
-rw-r--r--Documentation/fault-injection/provoke-crashes.rst2
-rw-r--r--Documentation/filesystems/dlmfs.rst2
-rw-r--r--Documentation/filesystems/f2fs.rst16
-rw-r--r--Documentation/filesystems/ocfs2.rst2
-rw-r--r--Documentation/i2c/busses/i2c-i801.rst2
-rw-r--r--Documentation/kbuild/kbuild.rst6
-rw-r--r--Documentation/leds/index.rst1
-rw-r--r--Documentation/leds/leds-cht-wcove.rst38
-rw-r--r--Documentation/leds/well-known-leds.txt3
-rw-r--r--Documentation/misc-devices/index.rst1
-rw-r--r--Documentation/misc-devices/tps6594-pfsm.rst87
-rw-r--r--Documentation/networking/af_xdp.rst9
-rw-r--r--Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst7
-rw-r--r--Documentation/networking/device_drivers/ethernet/amd/pds_vdpa.rst85
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/process/6.Followthrough.rst7
-rw-r--r--Documentation/process/changes.rst14
-rw-r--r--Documentation/process/maintainer-handbooks.rst1
-rw-r--r--Documentation/process/maintainer-kvm-x86.rst390
-rw-r--r--Documentation/process/maintainer-netdev.rst7
-rw-r--r--Documentation/process/maintainer-tip.rst2
-rw-r--r--Documentation/riscv/acpi.rst10
-rw-r--r--Documentation/riscv/index.rst1
-rw-r--r--Documentation/riscv/vector.rst8
-rw-r--r--Documentation/target/scripts.rst6
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py656
-rw-r--r--Documentation/tools/rtla/common_options.rst8
-rw-r--r--Documentation/tools/rtla/common_timerlat_aa.rst7
-rw-r--r--Documentation/tools/rtla/common_timerlat_options.rst7
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-hist.rst7
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-top.rst7
-rw-r--r--Documentation/trace/coresight/coresight-dummy.rst32
-rw-r--r--Documentation/trace/hisi-ptt.rst12
-rw-r--r--Documentation/translations/zh_CN/process/2.Process.rst2
-rw-r--r--Documentation/translations/zh_CN/process/3.Early-stage.rst2
-rw-r--r--Documentation/translations/zh_CN/process/4.Coding.rst2
-rw-r--r--Documentation/translations/zh_CN/process/7.AdvancedTopics.rst2
-rw-r--r--Documentation/translations/zh_TW/process/2.Process.rst2
-rw-r--r--Documentation/translations/zh_TW/process/3.Early-stage.rst2
-rw-r--r--Documentation/translations/zh_TW/process/4.Coding.rst2
-rw-r--r--Documentation/translations/zh_TW/process/7.AdvancedTopics.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/media/frontend.h.rst.exceptions4
-rw-r--r--Documentation/userspace-api/media/v4l/biblio.rst9
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-camera.rst3
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst1216
-rw-r--r--Documentation/userspace-api/media/v4l/meta-formats.rst14
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-d4xx.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-d4xx.rst)55
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-uvc.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-uvc.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-vivid.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-vivid.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-vsp1-hgo.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgo.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-vsp1-hgt.rst (renamed from Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgt.rst)0
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-compressed.rst16
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst16
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst20
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst24
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst2
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions4
-rw-r--r--Documentation/virt/kvm/api.rst27
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst2
-rw-r--r--Documentation/virt/kvm/x86/mmu.rst2
-rw-r--r--MAINTAINERS189
-rw-r--r--Makefile100
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/mm/init.c2
-rw-r--r--arch/arm/boot/dts/st/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/st/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stih407-family.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stih407-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/st/stm32f429-disco.dts2
-rw-r--r--arch/arm/boot/dts/st/stm32f746-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stm32f769-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi6
-rw-r--r--arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi4
-rw-r--r--arch/arm/boot/dts/st/stm32mp157a-icore-stm32mp1.dtsi6
-rw-r--r--arch/arm/boot/dts/st/stm32mp157a-microgea-stm32mp1.dtsi6
-rw-r--r--arch/arm/common/sharpsl_param.c2
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/memory.h17
-rw-r--r--arch/arm/include/asm/page.h4
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/proc-fns.h2
-rw-r--r--arch/arm/include/asm/sparsemem.h2
-rw-r--r--arch/arm/include/asm/uaccess-asm.h2
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/head-nommu.S3
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hibernate.c2
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/tcm.c2
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S3
-rw-r--r--arch/arm/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm/mach-berlin/platsmp.c2
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-omap2/sleep33xx.S2
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S2
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-rockchip/sleep.S2
-rw-r--r--arch/arm/mach-sa1100/pm.c2
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S2
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-socfpga/headsmp.S2
-rw-r--r--arch/arm/mach-spear/spear.h2
-rw-r--r--arch/arm/mm/cache-fa.S1
-rw-r--r--arch/arm/mm/cache-v4wb.S1
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/dump.c2
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/kasan_init.c1
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/physaddr.c2
-rw-r--r--arch/arm/mm/pmsa-v8.c2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm/mm/pv-fixup-asm.S2
-rw-r--r--arch/arm64/Kconfig19
-rw-r--r--arch/arm64/include/asm/cpufeature.h6
-rw-r--r--arch/arm64/include/asm/el2_setup.h27
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h6
-rw-r--r--arch/arm64/include/asm/kvm_arm.h7
-rw-r--r--arch/arm64/include/asm/kvm_asm.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h46
-rw-r--r--arch/arm64/include/asm/kvm_host.h61
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h37
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h79
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h21
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/sysreg.h1
-rw-r--r--arch/arm64/include/asm/virt.h12
-rw-r--r--arch/arm64/include/uapi/asm/bitsperlong.h24
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c7
-rw-r--r--arch/arm64/kernel/cpufeature.c34
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/hyp-stub.S10
-rw-r--r--arch/arm64/kernel/idreg-override.c25
-rw-r--r--arch/arm64/kernel/kaslr.c6
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c14
-rw-r--r--arch/arm64/kvm/arm.c201
-rw-r--r--arch/arm64/kvm/fpsimd.c4
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h101
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/ffa.h17
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c762
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S36
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S32
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c19
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c74
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c28
-rw-r--r--arch/arm64/kvm/hyp/nvhe/timer-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c52
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c228
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c32
-rw-r--r--arch/arm64/kvm/mmu.c207
-rw-r--r--arch/arm64/kvm/pkvm.c1
-rw-r--r--arch/arm64/kvm/reset.c58
-rw-r--r--arch/arm64/kvm/sys_regs.c505
-rw-r--r--arch/arm64/kvm/sys_regs.h22
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/tools/cpucaps3
-rw-r--r--arch/csky/kernel/probes/uprobes.c3
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/export.h3
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/loongarch/include/uapi/asm/bitsperlong.h9
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h3
-rw-r--r--arch/m68k/include/asm/page_mm.h11
-rw-r--r--arch/m68k/include/asm/page_no.h11
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h4
-rw-r--r--arch/m68k/mm/mcfmmu.c3
-rw-r--r--arch/m68k/mm/motorola.c4
-rw-r--r--arch/m68k/mm/sun3mmu.c2
-rw-r--r--arch/m68k/sun3/dvma.c2
-rw-r--r--arch/m68k/sun3x/dvma.c2
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/alchemy/common/platform.c10
-rw-r--r--arch/parisc/Kconfig12
-rw-r--r--arch/parisc/Kconfig.debug11
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig54
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig47
-rw-r--r--arch/parisc/include/asm/irqflags.h5
-rw-r--r--arch/parisc/include/asm/pdc.h4
-rw-r--r--arch/parisc/include/asm/processor.h37
-rw-r--r--arch/parisc/include/uapi/asm/cachectl.h12
-rw-r--r--arch/parisc/kernel/Makefile3
-rw-r--r--arch/parisc/kernel/audit.c9
-rw-r--r--arch/parisc/kernel/cache.c49
-rw-r--r--arch/parisc/kernel/compat_audit.c16
-rw-r--r--arch/parisc/kernel/drivers.c33
-rw-r--r--arch/parisc/kernel/firmware.c61
-rw-r--r--arch/parisc/kernel/irq.c5
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/pdc_chassis.c17
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/perf.c2
-rw-r--r--arch/parisc/kernel/processor.c2
-rw-r--r--arch/parisc/kernel/setup.c23
-rw-r--r--arch/parisc/kernel/signal.c10
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c3
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/parisc/kernel/traps.c7
-rw-r--r--arch/parisc/kernel/unaligned.c1
-rw-r--r--arch/parisc/kernel/unwind.c10
-rw-r--r--arch/parisc/math-emu/Makefile3
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig.debug2
-rw-r--r--arch/powerpc/boot/dts/turris1x.dts6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c14
-rw-r--r--arch/powerpc/kernel/vdso/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c1
-rw-r--r--arch/riscv/Kconfig20
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/errata/thead/errata.c7
-rw-r--r--arch/riscv/include/asm/csr.h2
-rw-r--r--arch/riscv/include/asm/kvm_aia.h107
-rw-r--r--arch/riscv/include/asm/kvm_aia_aplic.h58
-rw-r--r--arch/riscv/include/asm/kvm_aia_imsic.h38
-rw-r--r--arch/riscv/include/asm/kvm_host.h4
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h11
-rw-r--r--arch/riscv/include/asm/vector.h34
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h14
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h73
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h4
-rw-r--r--arch/riscv/kernel/smp.c1
-rw-r--r--arch/riscv/kernel/smpboot.c5
-rw-r--r--arch/riscv/kernel/traps.c10
-rw-r--r--arch/riscv/kernel/vdso.c1
-rw-r--r--arch/riscv/kernel/vector.c1
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S6
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S6
-rw-r--r--arch/riscv/kvm/Kconfig4
-rw-r--r--arch/riscv/kvm/Makefile3
-rw-r--r--arch/riscv/kvm/aia.c274
-rw-r--r--arch/riscv/kvm/aia_aplic.c619
-rw-r--r--arch/riscv/kvm/aia_device.c673
-rw-r--r--arch/riscv/kvm/aia_imsic.c1084
-rw-r--r--arch/riscv/kvm/main.c3
-rw-r--r--arch/riscv/kvm/tlb.c2
-rw-r--r--arch/riscv/kvm/vcpu.c4
-rw-r--r--arch/riscv/kvm/vcpu_exit.c2
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c80
-rw-r--r--arch/riscv/kvm/vm.c118
-rw-r--r--arch/riscv/mm/cacheflush.c8
-rw-r--r--arch/riscv/mm/dma-noncoherent.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c6
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/appldata/appldata_base.c4
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/boot/head.S16
-rw-r--r--arch/s390/boot/head_kdump.S6
-rw-r--r--arch/s390/boot/uv.c4
-rw-r--r--arch/s390/crypto/crc32be-vx.S2
-rw-r--r--arch/s390/include/asm/ap.h2
-rw-r--r--arch/s390/include/asm/appldata.h2
-rw-r--r--arch/s390/include/asm/asm-extable.h4
-rw-r--r--arch/s390/include/asm/dma.h2
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/page.h6
-rw-r--r--arch/s390/include/asm/ptrace.h52
-rw-r--r--arch/s390/include/asm/uv.h32
-rw-r--r--arch/s390/include/uapi/asm/cmb.h2
-rw-r--r--arch/s390/include/uapi/asm/dasd.h2
-rw-r--r--arch/s390/include/uapi/asm/pkey.h6
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h121
-rw-r--r--arch/s390/include/uapi/asm/uvdevice.h53
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/cpcmd.c2
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/entry.S14
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/kprobes_insn_page.S2
-rw-r--r--arch/s390/kernel/nospec-branch.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c32
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c58
-rw-r--r--arch/s390/kernel/perf_pai_ext.c4
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/uv.c108
-rw-r--r--arch/s390/kernel/vdso32/Makefile6
-rw-r--r--arch/s390/kernel/vdso64/Makefile6
-rw-r--r--arch/s390/kvm/diag.c8
-rw-r--r--arch/s390/kvm/gaccess.c4
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/pci.c2
-rw-r--r--arch/s390/kvm/priv.c3
-rw-r--r--arch/s390/kvm/pv.c2
-rw-r--r--arch/s390/kvm/sigp.c2
-rw-r--r--arch/s390/kvm/vsie.c10
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/maccess.c2
-rw-r--r--arch/s390/mm/vmem.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/s390/pci/pci_irq.c6
-rw-r--r--arch/s390/purgatory/head.S2
-rw-r--r--arch/sh/Kbuild2
-rw-r--r--arch/sh/Makefile26
-rw-r--r--arch/sh/boards/Makefile19
-rw-r--r--arch/sh/drivers/dma/Kconfig14
-rw-r--r--arch/sh/drivers/dma/dma-sh.c37
-rw-r--r--arch/sh/include/asm/io.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma.h1
-rw-r--r--arch/sh/include/mach-common/mach/highlander.h2
-rw-r--r--arch/sh/include/mach-common/mach/r2d.h2
-rw-r--r--arch/sh/include/mach-dreamcast/mach/sysasic.h2
-rw-r--r--arch/sh/include/mach-se/mach/se7724.h2
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S4
-rw-r--r--arch/sparc/video/fbdev.c3
-rw-r--r--arch/um/os-Linux/user_syms.c7
-rw-r--r--arch/x86/include/asm/kvm-x86-pmu-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/cpuid.c43
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu/mmu.c53
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c5
-rw-r--r--arch/x86/kvm/mtrr.c64
-rw-r--r--arch/x86/kvm/pmu.c92
-rw-r--r--arch/x86/kvm/pmu.h56
-rw-r--r--arch/x86/kvm/reverse_cpuid.h7
-rw-r--r--arch/x86/kvm/svm/pmu.c68
-rw-r--r--arch/x86/kvm/svm/sev.c19
-rw-r--r--arch/x86/kvm/svm/svm.c56
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--arch/x86/kvm/vmx/capabilities.h4
-rw-r--r--arch/x86/kvm/vmx/nested.c7
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c79
-rw-r--r--arch/x86/kvm/vmx/sgx.c15
-rw-r--r--arch/x86/kvm/vmx/vmenter.S2
-rw-r--r--arch/x86/kvm/vmx/vmx.c77
-rw-r--r--arch/x86/kvm/vmx/vmx.h12
-rw-r--r--arch/x86/kvm/x86.c80
-rw-r--r--arch/x86/kvm/x86.h1
-rw-r--r--arch/x86/platform/efi/efi_64.c6
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-iocost.c7
-rw-r--r--block/blk-mq.c54
-rw-r--r--block/blk-sysfs.c181
-rw-r--r--block/blk-throttle.c6
-rw-r--r--block/blk-throttle.h9
-rw-r--r--block/blk-wbt.c21
-rw-r--r--block/blk-wbt.h19
-rw-r--r--drivers/accessibility/speakup/Kconfig1
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/acpi_platform.c31
-rw-r--r--drivers/acpi/bus.c31
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/scan.c24
-rw-r--r--drivers/android/binder.c15
-rw-r--r--drivers/android/binder_internal.h3
-rw-r--r--drivers/ata/ahci_platform.c8
-rw-r--r--drivers/base/isa.c7
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/property.c12
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c18
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/virtio_bt.c1
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c6
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c2
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/cdx/cdx.c18
-rw-r--r--drivers/cdx/controller/Kconfig10
-rw-r--r--drivers/cdx/controller/mcdi.c86
-rw-r--r--drivers/cdx/controller/mcdi.h6
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/bsr.c21
-rw-r--r--drivers/char/dsp56k.c18
-rw-r--r--drivers/char/lp.c18
-rw-r--r--drivers/char/mem.c15
-rw-r--r--drivers/char/misc.c39
-rw-r--r--drivers/char/ppdev.c19
-rw-r--r--drivers/char/virtio_console.c25
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c52
-rw-r--r--drivers/char/xillybus/xillybus_class.c21
-rw-r--r--drivers/clk/at91/at91rm9200.c14
-rw-r--r--drivers/clk/at91/at91sam9260.c14
-rw-r--r--drivers/clk/at91/at91sam9g45.c16
-rw-r--r--drivers/clk/at91/at91sam9n12.c14
-rw-r--r--drivers/clk/at91/at91sam9rl.c14
-rw-r--r--drivers/clk/at91/at91sam9x5.c20
-rw-r--r--drivers/clk/at91/clk-generated.c11
-rw-r--r--drivers/clk/at91/clk-main.c32
-rw-r--r--drivers/clk/at91/clk-master.c28
-rw-r--r--drivers/clk/at91/clk-peripheral.c22
-rw-r--r--drivers/clk/at91/clk-programmable.c11
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c17
-rw-r--r--drivers/clk/at91/clk-system.c12
-rw-r--r--drivers/clk/at91/clk-utmi.c24
-rw-r--r--drivers/clk/at91/dt-compat.c23
-rw-r--r--drivers/clk/at91/pmc.h36
-rw-r--r--drivers/clk/at91/sam9x60.c20
-rw-r--r--drivers/clk/at91/sama5d2.c20
-rw-r--r--drivers/clk/at91/sama5d3.c16
-rw-r--r--drivers/clk/at91/sama5d4.c18
-rw-r--r--drivers/clk/at91/sama7g5.c796
-rw-r--r--drivers/clk/at91/sckc.c75
-rw-r--r--drivers/clk/qcom/Kconfig133
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c19
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c19
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c21
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h3
-rw-r--r--drivers/clk/qcom/clk-branch.c6
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c60
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c2
-rw-r--r--drivers/clk/qcom/clk-rpmh.c19
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c58
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c12
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c6
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c45
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c506
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c62
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c18
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/gcc-sdx75.c2970
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c39
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c19
-rw-r--r--drivers/clk/qcom/gpucc-sm6375.c19
-rw-r--r--drivers/clk/qcom/gpucc-sm8450.c766
-rw-r--r--drivers/clk/qcom/gpucc-sm8550.c611
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c87
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c23
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c552
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c463
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c470
-rw-r--r--drivers/clk/tegra/clk-tegra-super-cclk.c15
-rw-r--r--drivers/comedi/Kconfig103
-rw-r--r--drivers/comedi/comedi_fops.c47
-rw-r--r--drivers/comedi/drivers/comedi_test.c23
-rw-r--r--drivers/counter/104-quad-8.c802
-rw-r--r--drivers/counter/Kconfig23
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter-sysfs.c8
-rw-r--r--drivers/counter/i8254.c447
-rw-r--r--drivers/counter/stm32-timer-cnt.c3
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c16
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c30
-rw-r--r--drivers/cpufreq/intel_pstate.c58
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c13
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c58
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c58
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c6
-rw-r--r--drivers/cpufreq/ti-cpufreq.c1
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c1
-rw-r--r--drivers/cxl/Kconfig14
-rw-r--r--drivers/cxl/acpi.c208
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h11
-rw-r--r--drivers/cxl/core/hdm.c48
-rw-r--r--drivers/cxl/core/mbox.c339
-rw-r--r--drivers/cxl/core/memdev.c503
-rw-r--r--drivers/cxl/core/pci.c31
-rw-r--r--drivers/cxl/core/pmem.c2
-rw-r--r--drivers/cxl/core/pmu.c68
-rw-r--r--drivers/cxl/core/port.c163
-rw-r--r--drivers/cxl/core/region.c168
-rw-r--r--drivers/cxl/core/regs.c182
-rw-r--r--drivers/cxl/cxl.h104
-rw-r--r--drivers/cxl/cxlmem.h229
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/mem.c26
-rw-r--r--drivers/cxl/pci.c486
-rw-r--r--drivers/cxl/pmem.c35
-rw-r--r--drivers/cxl/pmu.h28
-rw-r--r--drivers/cxl/port.c21
-rw-r--r--drivers/cxl/security.c27
-rw-r--r--drivers/dax/bus.c64
-rw-r--r--drivers/dax/bus.h8
-rw-r--r--drivers/dax/cxl.c8
-rw-r--r--drivers/dax/dax-private.h11
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/dax/hmem/hmem.c8
-rw-r--r--drivers/dax/kmem.c2
-rw-r--r--drivers/dax/pmem.c7
-rw-r--r--drivers/dax/super.c5
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dma-axi-dmac.c1
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c1
-rw-r--r--drivers/dma/dw-edma/Makefile8
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c86
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h58
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c85
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h14
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c296
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.h17
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-debugfs.c170
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-debugfs.h22
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-regs.h129
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/plx_dma.c1
-rw-r--r--drivers/dma/qcom/Kconfig1
-rw-r--r--drivers/dma/qcom/bam_dma.c18
-rw-r--r--drivers/dma/qcom/hidma.c1
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/ste_dma40.c338
-rw-r--r--drivers/dma/ste_dma40.h (renamed from include/linux/platform_data/dma-ste-dma40.h)101
-rw-r--r--drivers/dma/ste_dma40_ll.c3
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c11
-rw-r--r--drivers/dma/ti/k3-udma.c25
-rw-r--r--drivers/extcon/Kconfig1
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-palmas.c1
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c4
-rw-r--r--drivers/extcon/extcon-rt8973a.c2
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c155
-rw-r--r--drivers/extcon/extcon.c368
-rw-r--r--drivers/extcon/extcon.h8
-rw-r--r--drivers/firewire/.kunitconfig4
-rw-r--r--drivers/firewire/Kconfig16
-rw-r--r--drivers/firewire/Makefile3
-rw-r--r--drivers/firewire/core-cdev.c252
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c93
-rw-r--r--drivers/firewire/core.h7
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firewire/ohci.c191
-rw-r--r--drivers/firewire/uapi-test.c89
-rw-r--r--drivers/firmware/dmi-sysfs.c4
-rw-r--r--drivers/firmware/stratix10-svc.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.h2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c2
-rw-r--r--drivers/fpga/dfl-fme-main.c4
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c47
-rw-r--r--drivers/fpga/zynq-fpga.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c156
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c11
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c81
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c48
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c27
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c45
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c6
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c14
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c1
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c6
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c605
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c52
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c163
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c269
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h36
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c110
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c3
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c11
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c444
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.h56
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-altera.c6
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c5
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c6
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c6
-rw-r--r--drivers/i2c/busses/i2c-au1550.c5
-rw-r--r--drivers/i2c/busses/i2c-axxia.c6
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c6
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c6
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c5
-rw-r--r--drivers/i2c/busses/i2c-cadence.c6
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c6
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c6
-rw-r--r--drivers/i2c/busses/i2c-davinci.c29
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h4
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c89
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c21
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c6
-rw-r--r--drivers/i2c/busses/i2c-dln2.c6
-rw-r--r--drivers/i2c/busses/i2c-emev2.c6
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c6
-rw-r--r--drivers/i2c/busses/i2c-gpio.c6
-rw-r--r--drivers/i2c/busses/i2c-gxp.c6
-rw-r--r--drivers/i2c/busses/i2c-highlander.c6
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c23
-rw-r--r--drivers/i2c/busses/i2c-i801.c128
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c10
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c8
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c6
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-kempld.c6
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c28
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c6
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c6
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c12
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c6
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c26
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c6
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c50
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c5
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c70
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c6
-rw-r--r--drivers/i2c/busses/i2c-opal.c6
-rw-r--r--drivers/i2c/busses/i2c-owl.c18
-rw-r--r--drivers/i2c/busses/i2c-pasemi-platform.c25
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c6
-rw-r--r--drivers/i2c/busses/i2c-pnx.c6
-rw-r--r--drivers/i2c/busses/i2c-powermac.c6
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c9
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c5
-rw-r--r--drivers/i2c/busses/i2c-qup.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c6
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c6
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c49
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-scmi.c9
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c9
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c5
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/busses/i2c-st.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c6
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c23
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c114
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c27
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c27
-rw-r--r--drivers/i2c/busses/i2c-versatile.c9
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c6
-rw-r--r--drivers/i2c/busses/i2c-wmt.c10
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c6
-rw-r--r--drivers/i2c/busses/i2c-xiic.c28
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c6
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-core-base.c19
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c5
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c5
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c6
-rw-r--r--drivers/i3c/master/svc-i3c-master.c51
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/adxl313_i2c.c2
-rw-r--r--drivers/iio/accel/adxl345_i2c.c2
-rw-r--r--drivers/iio/accel/adxl355_i2c.c2
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/accel/adxl372_i2c.c2
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma400_core.c3
-rw-r--r--drivers/iio/accel/bma400_i2c.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c2
-rw-r--r--drivers/iio/accel/da280.c2
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/dmard06.c2
-rw-r--r--drivers/iio/accel/dmard09.c2
-rw-r--r--drivers/iio/accel/dmard10.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c8
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c3
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c1
-rw-r--r--drivers/iio/accel/kionix-kx022a.c13
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c2
-rw-r--r--drivers/iio/accel/mc3230.c2
-rw-r--r--drivers/iio/accel/mma7455_i2c.c2
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/mma9551.c2
-rw-r--r--drivers/iio/accel/mma9553.c2
-rw-r--r--drivers/iio/accel/msa311.c2
-rw-r--r--drivers/iio/accel/mxc4005.c2
-rw-r--r--drivers/iio/accel/mxc6255.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/accel/st_accel_i2c.c2
-rw-r--r--drivers/iio/accel/stk8312.c2
-rw-r--r--drivers/iio/accel/stk8ba50.c2
-rw-r--r--drivers/iio/adc/Kconfig13
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7091r5.c2
-rw-r--r--drivers/iio/adc/ad7192.c8
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/ltc2471.c2
-rw-r--r--drivers/iio/adc/ltc2485.c2
-rw-r--r--drivers/iio/adc/ltc2497.c2
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/adc/max77541-adc.c194
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/adc/mcp3422.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c2
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/adc/palmas_gpadc.c1
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c15
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c19
-rw-r--r--drivers/iio/adc/rockchip_saradc.c246
-rw-r--r--drivers/iio/adc/rtq6056.c2
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads1100.c2
-rw-r--r--drivers/iio/adc/ti-ads7924.c2
-rw-r--r--drivers/iio/adc/xilinx-ams.c9
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c17
-rw-r--r--drivers/iio/addac/ad74413r.c11
-rw-r--r--drivers/iio/amplifiers/ad8366.c2
-rw-r--r--drivers/iio/cdc/ad7150.c2
-rw-r--r--drivers/iio/cdc/ad7746.c2
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c2
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c2
-rw-r--r--drivers/iio/chemical/bme680_i2c.c2
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/scd30_i2c.c2
-rw-r--r--drivers/iio/chemical/scd4x.c2
-rw-r--r--drivers/iio/chemical/sgp30.c2
-rw-r--r--drivers/iio/chemical/sgp40.c2
-rw-r--r--drivers/iio/chemical/sps30_i2c.c2
-rw-r--r--drivers/iio/chemical/sunrise_co2.c2
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/dac/ad5064.c2
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5593r.c2
-rw-r--r--drivers/iio/dac/ad5696-i2c.c2
-rw-r--r--drivers/iio/dac/ds4424.c2
-rw-r--r--drivers/iio/dac/m62332.c2
-rw-r--r--drivers/iio/dac/max517.c2
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c2
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c2
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c2
-rw-r--r--drivers/iio/health/afe4404.c2
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/health/max30102.c2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/hdc2010.c2
-rw-r--r--drivers/iio/humidity/hts221_i2c.c2
-rw-r--r--drivers/iio/humidity/htu21.c2
-rw-r--r--drivers/iio/humidity/si7005.c2
-rw-r--r--drivers/iio/humidity/si7020.c2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c2
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c2
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c49
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c2
-rw-r--r--drivers/iio/imu/st_lsm9ds0/Kconfig3
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c14
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c98
-rw-r--r--drivers/iio/industrialio-trigger.c22
-rw-r--r--drivers/iio/light/Kconfig25
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/adux1020.c2
-rw-r--r--drivers/iio/light/al3010.c2
-rw-r--r--drivers/iio/light/al3320a.c10
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/as73211.c2
-rw-r--r--drivers/iio/light/bh1750.c2
-rw-r--r--drivers/iio/light/bh1780.c2
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3232.c2
-rw-r--r--drivers/iio/light/cm3323.c2
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c2
-rw-r--r--drivers/iio/light/isl29018.c2
-rw-r--r--drivers/iio/light/isl29028.c2
-rw-r--r--drivers/iio/light/isl29125.c2
-rw-r--r--drivers/iio/light/jsa1212.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/ltrf216a.c2
-rw-r--r--drivers/iio/light/lv0104cs.c2
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/light/max44009.c2
-rw-r--r--drivers/iio/light/noa1305.c2
-rw-r--r--drivers/iio/light/opt3001.c2
-rw-r--r--drivers/iio/light/opt4001.c467
-rw-r--r--drivers/iio/light/pa12203001.c2
-rw-r--r--drivers/iio/light/rohm-bu27008.c1026
-rw-r--r--drivers/iio/light/rohm-bu27034.c3
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/si1133.c2
-rw-r--r--drivers/iio/light/si1145.c2
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c2
-rw-r--r--drivers/iio/light/stk3310.c2
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/tsl2772.c2
-rw-r--r--drivers/iio/light/tsl4531.c2
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/light/vcnl4000.c2
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/light/veml6030.c2
-rw-r--r--drivers/iio/light/veml6070.c2
-rw-r--r--drivers/iio/light/vl6180.c2
-rw-r--r--drivers/iio/light/zopt2201.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/mmc35240.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-i2c.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c2
-rw-r--r--drivers/iio/magnetometer/tmag5273.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/potentiometer/Kconfig10
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/ad5110.c2
-rw-r--r--drivers/iio/potentiometer/ad5272.c2
-rw-r--r--drivers/iio/potentiometer/ds1803.c2
-rw-r--r--drivers/iio/potentiometer/max5432.c2
-rw-r--r--drivers/iio/potentiometer/mcp4018.c2
-rw-r--r--drivers/iio/potentiometer/mcp4531.c2
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/potentiometer/x9250.c220
-rw-r--r--drivers/iio/potentiostat/lmp91000.c2
-rw-r--r--drivers/iio/pressure/Kconfig13
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c2
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c2
-rw-r--r--drivers/iio/pressure/dlhl60d.c2
-rw-r--r--drivers/iio/pressure/dps310.c2
-rw-r--r--drivers/iio/pressure/hp03.c2
-rw-r--r--drivers/iio/pressure/hp206c.c2
-rw-r--r--drivers/iio/pressure/icp10100.c2
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/mprls0025pa.c450
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c2
-rw-r--r--drivers/iio/pressure/ms5637.c2
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c2
-rw-r--r--drivers/iio/pressure/t5403.c2
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c2
-rw-r--r--drivers/iio/proximity/isl29501.c2
-rw-r--r--drivers/iio/proximity/mb1232.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/proximity/rfd77402.c2
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx9310.c2
-rw-r--r--drivers/iio/proximity/sx9324.c2
-rw-r--r--drivers/iio/proximity/sx9360.c2
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/iio/proximity/vcnl3020.c2
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c2
-rw-r--r--drivers/iio/temperature/max30208.c2
-rw-r--r--drivers/iio/temperature/mlx90614.c239
-rw-r--r--drivers/iio/temperature/mlx90632.c2
-rw-r--r--drivers/iio/temperature/tmp006.c10
-rw-r--r--drivers/iio/temperature/tmp007.c2
-rw-r--r--drivers/iio/temperature/tmp117.c2
-rw-r--r--drivers/iio/temperature/tsys01.c2
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/interconnect/Kconfig6
-rw-r--r--drivers/interconnect/Makefile2
-rw-r--r--drivers/interconnect/core.c52
-rw-r--r--drivers/interconnect/icc-clk.c174
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c112
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h22
-rw-r--r--drivers/interconnect/qcom/msm8996.c35
-rw-r--r--drivers/interconnect/qcom/sdm660.c17
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig12
-rw-r--r--drivers/leds/Kconfig37
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/flash/leds-aat1290.c2
-rw-r--r--drivers/leds/flash/leds-as3645a.c6
-rw-r--r--drivers/leds/flash/leds-lm3601x.c2
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c17
-rw-r--r--drivers/leds/flash/leds-rt4505.c2
-rw-r--r--drivers/leds/flash/leds-sgm3140.c1
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c81
-rw-r--r--drivers/leds/led-triggers.c17
-rw-r--r--drivers/leds/leds-an30259a.c2
-rw-r--r--drivers/leds/leds-aw200xx.c594
-rw-r--r--drivers/leds/leds-aw2013.c2
-rw-r--r--drivers/leds/leds-bd2606mvv.c2
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-blinkm.c4
-rw-r--r--drivers/leds/leds-cht-wcove.c476
-rw-r--r--drivers/leds/leds-gpio.c18
-rw-r--r--drivers/leds/leds-is31fl319x.c2
-rw-r--r--drivers/leds/leds-is31fl32xx.c2
-rw-r--r--drivers/leds/leds-lm3530.c2
-rw-r--r--drivers/leds/leds-lm3532.c2
-rw-r--r--drivers/leds/leds-lm355x.c2
-rw-r--r--drivers/leds/leds-lm3642.c2
-rw-r--r--drivers/leds/leds-lm3692x.c2
-rw-r--r--drivers/leds/leds-lm3697.c2
-rw-r--r--drivers/leds/leds-lp3944.c2
-rw-r--r--drivers/leds/leds-lp3952.c2
-rw-r--r--drivers/leds/leds-lp50xx.c2
-rw-r--r--drivers/leds/leds-lp5521.c13
-rw-r--r--drivers/leds/leds-lp5523.c16
-rw-r--r--drivers/leds/leds-lp5562.c2
-rw-r--r--drivers/leds/leds-lp55xx-common.c9
-rw-r--r--drivers/leds/leds-lp8501.c10
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-mt6323.c446
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pca963x.c2
-rw-r--r--drivers/leds/leds-spi-byte.c2
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c2
-rw-r--r--drivers/leds/leds-turris-omnia.c2
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c23
-rw-r--r--drivers/leds/simple/Kconfig31
-rw-r--r--drivers/leds/simple/Makefile5
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c66
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-core.c104
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c66
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c139
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.h22
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c1
-rw-r--r--drivers/leds/trigger/ledtrig-disk.c9
-rw-r--r--drivers/leds/trigger/ledtrig-mtd.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c6
-rw-r--r--drivers/mailbox/tegra-hsp.c16
-rw-r--r--drivers/mailbox/ti-msgmgr.c12
-rw-r--r--drivers/md/bcache/super.c123
-rw-r--r--drivers/md/dm-verity-loadpin.c3
-rw-r--r--drivers/md/md.c32
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c62
-rw-r--r--drivers/md/raid0.h1
-rw-r--r--drivers/md/raid1-10.c2
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/media/cec/i2c/Kconfig1
-rw-r--r--drivers/media/cec/i2c/ch7322.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c6
-rw-r--r--drivers/media/dvb-core/dvbdev.c161
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/helene.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c5
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c2
-rw-r--r--drivers/media/dvb-frontends/mn88472.c2
-rw-r--r--drivers/media/dvb-frontends/mn88473.c2
-rw-r--r--drivers/media/dvb-frontends/mxl692.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c2
-rw-r--r--drivers/media/dvb-frontends/sp2.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tc90522.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c2
-rw-r--r--drivers/media/i2c/Kconfig14
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/ad5820.c2
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7170.c2
-rw-r--r--drivers/media/i2c/adv7175.c2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv7183.c2
-rw-r--r--drivers/media/i2c/adv7343.c2
-rw-r--r--drivers/media/i2c/adv7393.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c2
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ak7375.c2
-rw-r--r--drivers/media/i2c/ak881x.c2
-rw-r--r--drivers/media/i2c/ar0521.c2
-rw-r--r--drivers/media/i2c/bt819.c2
-rw-r--r--drivers/media/i2c/bt856.c2
-rw-r--r--drivers/media/i2c/bt866.c2
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c2
-rw-r--r--drivers/media/i2c/cs3308.c2
-rw-r--r--drivers/media/i2c/cs5345.c2
-rw-r--r--drivers/media/i2c/cs53l32a.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/dw9714.c2
-rw-r--r--drivers/media/i2c/dw9768.c2
-rw-r--r--drivers/media/i2c/dw9807-vcm.c2
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/hi556.c2
-rw-r--r--drivers/media/i2c/hi846.c5
-rw-r--r--drivers/media/i2c/hi847.c2
-rw-r--r--drivers/media/i2c/imx208.c2
-rw-r--r--drivers/media/i2c/imx214.c2
-rw-r--r--drivers/media/i2c/imx219.c2
-rw-r--r--drivers/media/i2c/imx258.c2
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx290.c4
-rw-r--r--drivers/media/i2c/imx296.c6
-rw-r--r--drivers/media/i2c/imx319.c2
-rw-r--r--drivers/media/i2c/imx334.c64
-rw-r--r--drivers/media/i2c/imx335.c2
-rw-r--r--drivers/media/i2c/imx355.c2
-rw-r--r--drivers/media/i2c/imx412.c2
-rw-r--r--drivers/media/i2c/imx415.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/i2c/isl7998x.c2
-rw-r--r--drivers/media/i2c/ks0127.c2
-rw-r--r--drivers/media/i2c/lm3560.c2
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/m52790.c2
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/max9286.c2
-rw-r--r--drivers/media/i2c/ml86v7667.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/mt9p031.c2
-rw-r--r--drivers/media/i2c/mt9t112.c2
-rw-r--r--drivers/media/i2c/mt9v011.c2
-rw-r--r--drivers/media/i2c/mt9v032.c2
-rw-r--r--drivers/media/i2c/mt9v111.c2
-rw-r--r--drivers/media/i2c/og01a1b.c2
-rw-r--r--drivers/media/i2c/ov01a10.c1004
-rw-r--r--drivers/media/i2c/ov02a10.c4
-rw-r--r--drivers/media/i2c/ov08d10.c2
-rw-r--r--drivers/media/i2c/ov08x40.c2
-rw-r--r--drivers/media/i2c/ov13858.c2
-rw-r--r--drivers/media/i2c/ov13b10.c2
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2659.c2
-rw-r--r--drivers/media/i2c/ov2680.c2
-rw-r--r--drivers/media/i2c/ov2685.c4
-rw-r--r--drivers/media/i2c/ov2740.c2
-rw-r--r--drivers/media/i2c/ov4689.c2
-rw-r--r--drivers/media/i2c/ov5640.c21
-rw-r--r--drivers/media/i2c/ov5645.c2
-rw-r--r--drivers/media/i2c/ov5647.c2
-rw-r--r--drivers/media/i2c/ov5648.c4
-rw-r--r--drivers/media/i2c/ov5670.c2
-rw-r--r--drivers/media/i2c/ov5675.c2
-rw-r--r--drivers/media/i2c/ov5693.c6
-rw-r--r--drivers/media/i2c/ov5695.c4
-rw-r--r--drivers/media/i2c/ov6650.c2
-rw-r--r--drivers/media/i2c/ov7251.c2
-rw-r--r--drivers/media/i2c/ov7640.c2
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/ov772x.c2
-rw-r--r--drivers/media/i2c/ov7740.c2
-rw-r--r--drivers/media/i2c/ov8856.c2
-rw-r--r--drivers/media/i2c/ov8858.c4
-rw-r--r--drivers/media/i2c/ov8865.c4
-rw-r--r--drivers/media/i2c/ov9282.c2
-rw-r--r--drivers/media/i2c/ov9640.c2
-rw-r--r--drivers/media/i2c/ov9650.c2
-rw-r--r--drivers/media/i2c/ov9734.c2
-rw-r--r--drivers/media/i2c/rdacm20.c2
-rw-r--r--drivers/media/i2c/rdacm21.c2
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c2
-rw-r--r--drivers/media/i2c/s5k6a3.c2
-rw-r--r--drivers/media/i2c/saa6588.c2
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7110.c2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/saa7185.c2
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c17
-rw-r--r--drivers/media/i2c/st-vgxy61.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/tc358746.c2
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/ths8200.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/tvaudio.c2
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/tw2804.c2
-rw-r--r--drivers/media/i2c/tw9903.c2
-rw-r--r--drivers/media/i2c/tw9906.c2
-rw-r--r--drivers/media/i2c/tw9910.c2
-rw-r--r--drivers/media/i2c/uda1342.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-entity.c18
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c4
-rw-r--r--drivers/media/pci/dm1105/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c31
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c4
-rw-r--r--drivers/media/pci/ttpci/budget-core.c4
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/platform/amphion/vdec.c7
-rw-r--r--drivers/media/platform/amphion/venc.c4
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c12
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c5
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c828
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c17
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c15
-rw-r--r--drivers/media/platform/mediatek/vcodec/Makefile8
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c215
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h74
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c26
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c106
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h8
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h26
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c2207
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c1097
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_if.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_drv_if.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c132
-rw-r--r--drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h32
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c7
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c94
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c2
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c6
-rw-r--r--drivers/media/platform/qcom/venus/core.h39
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c19
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h14
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c11
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c28
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c94
-rw-r--r--drivers/media/platform/qcom/venus/venc.c35
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c13
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c100
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c5
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c55
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c9
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c8
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h12
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c37
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h4
-rw-r--r--drivers/media/platform/st/sti/hva/hva-h264.c8
-rw-r--r--drivers/media/platform/verisilicon/Makefile3
-rw-r--r--drivers/media/platform/verisilicon/hantro.h8
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c68
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c23
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h102
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c9
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c67
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.h8
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_entropymode.c4424
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_entropymode.h272
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c401
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h36
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c2232
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h477
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c134
-rw-r--r--drivers/media/platform/video-mux.c102
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/saa7706h.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si4713/si4713.c2
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c6
-rw-r--r--drivers/media/rc/Kconfig6
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c49
-rw-r--r--drivers/media/tuners/e4000.c2
-rw-r--r--drivers/media/tuners/fc2580.c2
-rw-r--r--drivers/media/tuners/m88rs6000t.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c2
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/tuners/tda18212.c2
-rw-r--r--drivers/media/tuners/tda18250.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c6
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c19
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c3
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c34
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c26
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c3
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c20
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c3
-rw-r--r--drivers/media/usb/stk1160/Kconfig12
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c137
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c79
-rw-r--r--drivers/media/usb/uvc/uvc_video.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/tuner-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c162
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c263
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c61
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c38
-rw-r--r--drivers/memory/ti-emif-sram-pm.S2
-rw-r--r--drivers/mfd/88pm800.c2
-rw-r--r--drivers/mfd/88pm805.c2
-rw-r--r--drivers/mfd/88pm80x.c1
-rw-r--r--drivers/mfd/88pm860x-core.c3
-rw-r--r--drivers/mfd/Kconfig17
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/acer-ec-a500.c2
-rw-r--r--drivers/mfd/act8945a.c2
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/arizona-i2c.c2
-rw-r--r--drivers/mfd/as3711.c2
-rw-r--r--drivers/mfd/as3722.c2
-rw-r--r--drivers/mfd/atc260x-i2c.c2
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/axp20x.c141
-rw-r--r--drivers/mfd/bcm590xx.c2
-rw-r--r--drivers/mfd/bd9571mwv.c2
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/da9052-i2c.c2
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c2
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/da9150-core.c2
-rw-r--r--drivers/mfd/dln2.c1
-rw-r--r--drivers/mfd/ene-kb3930.c2
-rw-r--r--drivers/mfd/gateworks-gsc.c2
-rw-r--r--drivers/mfd/intel-lpss-acpi.c3
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c90
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c1
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c15
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c2
-rw-r--r--drivers/mfd/iqs62x.c2
-rw-r--r--drivers/mfd/khadas-mcu.c2
-rw-r--r--drivers/mfd/lm3533-core.c4
-rw-r--r--drivers/mfd/lochnagar-i2c.c2
-rw-r--r--drivers/mfd/lp3943.c2
-rw-r--r--drivers/mfd/lp873x.c2
-rw-r--r--drivers/mfd/lp87565.c2
-rw-r--r--drivers/mfd/lp8788.c2
-rw-r--r--drivers/mfd/madera-i2c.c2
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max77541.c224
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77650.c2
-rw-r--r--drivers/mfd/max77686.c2
-rw-r--r--drivers/mfd/max77693.c2
-rw-r--r--drivers/mfd/max77714.c2
-rw-r--r--drivers/mfd/max77843.c2
-rw-r--r--drivers/mfd/max8907.c4
-rw-r--r--drivers/mfd/max8925-i2c.c3
-rw-r--r--drivers/mfd/max8997.c2
-rw-r--r--drivers/mfd/max8998.c2
-rw-r--r--drivers/mfd/mc13xxx-i2c.c2
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/menf21bmc.c2
-rw-r--r--drivers/mfd/mfd-core.c6
-rw-r--r--drivers/mfd/mp2629.c2
-rw-r--r--drivers/mfd/mt6360-core.c2
-rw-r--r--drivers/mfd/mt6370.c2
-rw-r--r--drivers/mfd/ntxec.c2
-rw-r--r--drivers/mfd/palmas.c2
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/qcom-pm8008.c4
-rw-r--r--drivers/mfd/rc5t583-irq.c2
-rw-r--r--drivers/mfd/rc5t583.c2
-rw-r--r--drivers/mfd/retu-mfd.c2
-rw-r--r--drivers/mfd/rk8xx-i2c.c2
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rohm-bd71828.c2
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c2
-rw-r--r--drivers/mfd/rt4831.c2
-rw-r--r--drivers/mfd/rt5033.c10
-rw-r--r--drivers/mfd/rt5120.c2
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/simple-mfd-i2c.c20
-rw-r--r--drivers/mfd/sky81452.c2
-rw-r--r--drivers/mfd/smpro-core.c2
-rw-r--r--drivers/mfd/stmfx.c9
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/stpmic1.c27
-rw-r--r--drivers/mfd/stw481x.c2
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/ti-lmu.c2
-rw-r--r--drivers/mfd/tps6105x.c2
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6507x.c2
-rw-r--r--drivers/mfd/tps65086.c2
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/tps65219.c42
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/tps65912-i2c.c2
-rw-r--r--drivers/mfd/tps6594-i2c.c4
-rw-r--r--drivers/mfd/tps6594-spi.c2
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl6040.c4
-rw-r--r--drivers/mfd/wcd934x.c28
-rw-r--r--drivers/mfd/wl1273-core.c2
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mfd/wm831x-i2c.c2
-rw-r--r--drivers/mfd/wm8350-i2c.c2
-rw-r--r--drivers/mfd/wm8400-core.c4
-rw-r--r--drivers/mfd/wm8994-core.c4
-rw-r--r--drivers/misc/Kconfig23
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/altera-stapl/Makefile3
-rw-r--r--drivers/misc/altera-stapl/altera.c6
-rw-r--r--drivers/misc/apds9802als.c2
-rw-r--r--drivers/misc/apds990x.c4
-rw-r--r--drivers/misc/bh1770glc.c4
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/at24.c5
-rw-r--r--drivers/misc/eeprom/ee1004.c2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c2
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/fastrpc.c5
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/isl29003.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c2
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c4
-rw-r--r--drivers/misc/mei/bus.c9
-rw-r--r--drivers/misc/smpro-errmon.c1
-rw-r--r--drivers/misc/tps6594-esm.c132
-rw-r--r--drivers/misc/tps6594-pfsm.c306
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/uacce/uacce.c25
-rw-r--r--drivers/misc/xilinx_sdfec.c12
-rw-r--r--drivers/mmc/core/core.c15
-rw-r--r--drivers/mux/Kconfig2
-rw-r--r--drivers/mux/adg792a.c2
-rw-r--r--drivers/mux/mmio.c2
-rw-r--r--drivers/net/arcnet/arcnet.c8
-rw-r--r--drivers/net/dsa/ocelot/felix.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c48
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c21
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c3
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/ppp/pptp.c31
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireguard/queueing.c1
-rw-r--r--drivers/net/wireguard/queueing.h25
-rw-r--r--drivers/net/wireguard/receive.c2
-rw-r--r--drivers/net/wireguard/send.c2
-rw-r--r--drivers/net/wireguard/timers.c10
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c11
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c12
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c24
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c9
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/constants.c2
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/ioctl.c70
-rw-r--r--drivers/nvme/host/multipath.c10
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c5
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/brcm_nvram.c28
-rw-r--r--drivers/nvmem/core.c32
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c175
-rw-r--r--drivers/nvmem/imx-ocotp.c10
-rw-r--r--drivers/nvmem/rmem.c1
-rw-r--r--drivers/nvmem/rockchip-otp.c191
-rw-r--r--drivers/nvmem/sunplus-ocotp.c9
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c2
-rw-r--r--drivers/opp/core.c44
-rw-r--r--drivers/opp/debugfs.c1
-rw-r--r--drivers/opp/of.c40
-rw-r--r--drivers/opp/opp.h6
-rw-r--r--drivers/parisc/ccio-dma.c18
-rw-r--r--drivers/parisc/dino.c5
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/lba_pci.c8
-rw-r--r--drivers/parisc/pdc_stable.c36
-rw-r--r--drivers/parisc/sba_iommu.c32
-rw-r--r--drivers/parport/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1
-rw-r--r--drivers/pcmcia/Kconfig5
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/perf/Kconfig13
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/cxl_pmu.c990
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c15
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c15
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c34
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h8
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c95
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c720
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c218
-rw-r--r--drivers/phy/hisilicon/Kconfig2
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c62
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c356
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c426
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h1
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h106
-rw-r--r--drivers/phy/phy-core.c14
-rw-r--r--drivers/phy/qualcomm/Kconfig12
-rw-r--r--drivers/phy/qualcomm/Makefile3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c263
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c182
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-sgmii-eth.c451
-rw-r--r--drivers/phy/samsung/Kconfig2
-rw-r--r--drivers/phy/tegra/xusb.c4
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c34
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c2
-rw-r--r--drivers/platform/x86/simatic-ipc.c7
-rw-r--r--drivers/power/reset/Kconfig1
-rw-r--r--drivers/power/reset/at91-reset.c9
-rw-r--r--drivers/power/reset/gpio-restart.c7
-rw-r--r--drivers/power/reset/qcom-pon.c22
-rw-r--r--drivers/power/supply/Kconfig17
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/adp5061.c2
-rw-r--r--drivers/power/supply/axp20x_usb_power.c307
-rw-r--r--drivers/power/supply/bd99954-charger.c2
-rw-r--r--drivers/power/supply/bq2415x_charger.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/bq24735-charger.c2
-rw-r--r--drivers/power/supply/bq2515x_charger.c2
-rw-r--r--drivers/power/supply/bq256xx_charger.c30
-rw-r--r--drivers/power/supply/bq25890_charger.c2
-rw-r--r--drivers/power/supply/bq25980_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c25
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/power/supply/ds2782_battery.c2
-rw-r--r--drivers/power/supply/ip5xxx_power.c2
-rw-r--r--drivers/power/supply/lp8727_charger.c2
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c2
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max17040_battery.c2
-rw-r--r--drivers/power/supply/max17042_battery.c9
-rw-r--r--drivers/power/supply/max77976_charger.c2
-rw-r--r--drivers/power/supply/power_supply_hwmon.c2
-rw-r--r--drivers/power/supply/power_supply_leds.c5
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c1059
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/rt5033_battery.c40
-rw-r--r--drivers/power/supply/rt5033_charger.c472
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/power/supply/rt9467-charger.c4
-rw-r--r--drivers/power/supply/rt9471.c2
-rw-r--r--drivers/power/supply/sbs-battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/smb347-charger.c2
-rw-r--r--drivers/power/supply/twl4030_madc_battery.c8
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/power/supply/ug3105_battery.c2
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/pwm/Kconfig21
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-ab8500.c2
-rw-r--r--drivers/pwm/pwm-clk.c12
-rw-r--r--drivers/pwm/pwm-imx-tpm.c7
-rw-r--r--drivers/pwm/pwm-mediatek.c39
-rw-r--r--drivers/pwm/pwm-meson.c212
-rw-r--r--drivers/pwm/pwm-microchip-core.c507
-rw-r--r--drivers/pwm/pwm-mtk-disp.c13
-rw-r--r--drivers/pwm/pwm-pca9685.c2
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c551
-rw-r--r--drivers/pwm/pwm-sifive.c7
-rw-r--r--drivers/pwm/sysfs.c17
-rw-r--r--drivers/regulator/Kconfig12
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/max77541-regulator.c153
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c17
-rw-r--r--drivers/remoteproc/imx_rproc.c6
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c6
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c6
-rw-r--r--drivers/remoteproc/mtk_scp.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/pru_rproc.c6
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c6
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c6
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c6
-rw-r--r--drivers/remoteproc/qcom_wcnss.c6
-rw-r--r--drivers/remoteproc/rcar_rproc.c6
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c6
-rw-r--r--drivers/remoteproc/st_remoteproc.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c82
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c6
-rw-r--r--drivers/rtc/Kconfig27
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c4
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c4
-rw-r--r--drivers/rtc/rtc-em3027.c2
-rw-r--r--drivers/rtc/rtc-fm3130.c2
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c4
-rw-r--r--drivers/rtc/rtc-isl12026.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c132
-rw-r--r--drivers/rtc/rtc-loongson.c397
-rw-r--r--drivers/rtc/rtc-ls1x.c192
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-nct3018y.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c2
-rw-r--r--drivers/rtc/rtc-pcf85063.c2
-rw-r--r--drivers/rtc/rtc-pcf8523.c2
-rw-r--r--drivers/rtc/rtc-pcf85363.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pcf8583.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c9
-rw-r--r--drivers/rtc/rtc-rv3029c2.c2
-rw-r--r--drivers/rtc/rtc-rv3032.c4
-rw-r--r--drivers/rtc/rtc-rv8803.c2
-rw-r--r--drivers/rtc/rtc-rx6110.c2
-rw-r--r--drivers/rtc/rtc-rx8010.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-st-lpc.c16
-rw-r--r--drivers/rtc/rtc-stm32.c28
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/uvdevice.c231
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/device_fsm.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/ap_bus.h27
-rw-r--r--drivers/s390/crypto/ap_queue.c7
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c113
-rw-r--r--drivers/s390/crypto/zcrypt_api.h3
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c10
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c16
-rw-r--r--drivers/s390/net/ctcm_mpc.c10
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/sbus/char/oradax.c21
-rw-r--r--drivers/scsi/isci/scu_task_context.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h10
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/scsi.c37
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c5
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/Makefile3
-rw-r--r--drivers/soundwire/amd_manager.c9
-rw-r--r--drivers/soundwire/bus.c30
-rw-r--r--drivers/soundwire/cadence_master.c50
-rw-r--r--drivers/soundwire/cadence_master.h5
-rw-r--r--drivers/soundwire/debugfs.c12
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c10
-rw-r--r--drivers/soundwire/intel.c57
-rw-r--r--drivers/soundwire/intel.h16
-rw-r--r--drivers/soundwire/intel_ace2x.c393
-rw-r--r--drivers/soundwire/intel_ace2x_debugfs.c147
-rw-r--r--drivers/soundwire/intel_auxdevice.c17
-rw-r--r--drivers/soundwire/intel_bus_common.c92
-rw-r--r--drivers/soundwire/intel_init.c21
-rw-r--r--drivers/soundwire/qcom.c496
-rw-r--r--drivers/soundwire/stream.c161
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-bcm-qspi.c10
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c2
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c3
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO238
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c313
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c252
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h309
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h141
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c42
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h127
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c1034
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c283
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h94
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c874
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c183
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c248
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c398
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c262
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c425
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c62
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c524
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c33
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c56
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c10
-rw-r--r--drivers/staging/media/av7110/av7110_av.c4
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c2
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c45
-rw-r--r--drivers/staging/media/max96712/max96712.c40
-rw-r--r--drivers/staging/media/tegra-video/Kconfig1
-rw-r--r--drivers/staging/media/tegra-video/Makefile2
-rw-r--r--drivers/staging/media/tegra-video/csi.c48
-rw-r--r--drivers/staging/media/tegra-video/csi.h4
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c661
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c90
-rw-r--r--drivers/staging/media/tegra-video/vi.c222
-rw-r--r--drivers/staging/media/tegra-video/vi.h71
-rw-r--r--drivers/staging/media/tegra-video/video.c5
-rw-r--r--drivers/staging/media/tegra-video/video.h2
-rw-r--r--drivers/staging/media/tegra-video/vip.c287
-rw-r--r--drivers/staging/media/tegra-video/vip.h68
-rw-r--r--drivers/staging/most/i2c/i2c.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c97
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c62
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c82
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c168
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c73
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h67
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c12
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h1
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h87
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c63
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c333
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c85
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c16
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c31
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c5
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c62
-rw-r--r--drivers/staging/rts5208/rtsx.c20
-rw-r--r--drivers/staging/sm750fb/Kconfig2
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c4
-rw-r--r--drivers/staging/vt6655/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h21
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c21
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c21
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h21
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h21
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h21
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h21
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h39
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h21
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h21
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c21
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h21
-rw-r--r--drivers/staging/wlan-ng/p80211req.c21
-rw-r--r--drivers/staging/wlan-ng/p80211req.h21
-rw-r--r--drivers/staging/wlan-ng/p80211types.h21
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c21
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c21
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c73
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h21
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c21
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c21
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/acpi.c5
-rw-r--r--drivers/thunderbolt/clx.c423
-rw-r--r--drivers/thunderbolt/ctl.c28
-rw-r--r--drivers/thunderbolt/debugfs.c64
-rw-r--r--drivers/thunderbolt/dma_test.c12
-rw-r--r--drivers/thunderbolt/eeprom.c3
-rw-r--r--drivers/thunderbolt/icm.c30
-rw-r--r--drivers/thunderbolt/nhi.c42
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/nhi_regs.h19
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/quirks.c10
-rw-r--r--drivers/thunderbolt/retimer.c64
-rw-r--r--drivers/thunderbolt/switch.c594
-rw-r--r--drivers/thunderbolt/tb.c329
-rw-r--r--drivers/thunderbolt/tb.h243
-rw-r--r--drivers/thunderbolt/tb_msgs.h7
-rw-r--r--drivers/thunderbolt/tb_regs.h25
-rw-r--r--drivers/thunderbolt/test.c83
-rw-r--r--drivers/thunderbolt/tmu.c713
-rw-r--r--drivers/thunderbolt/tunnel.c239
-rw-r--r--drivers/thunderbolt/usb4.c114
-rw-r--r--drivers/thunderbolt/xdomain.c98
-rw-r--r--drivers/tty/n_tty.c25
-rw-r--r--drivers/tty/serial/8250/8250.h11
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c3
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c91
-rw-r--r--drivers/tty/serial/8250/8250_early.c21
-rw-r--r--drivers/tty/serial/8250/8250_em.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c4
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c17
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c59
-rw-r--r--drivers/tty/serial/8250/8250_of.c6
-rw-r--r--drivers/tty/serial/8250/8250_omap.c215
-rw-r--r--drivers/tty/serial/8250/8250_pci.c19
-rw-r--r--drivers/tty/serial/8250/8250_port.c167
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c2
-rw-r--r--drivers/tty/serial/8250/8250_rt288x.c136
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig16
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig23
-rw-r--r--drivers/tty/serial/Makefile6
-rw-r--r--drivers/tty/serial/amba-pl011.c7
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/clps711x.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c57
-rw-r--r--drivers/tty/serial/imx.c22
-rw-r--r--drivers/tty/serial/lantiq.c4
-rw-r--r--drivers/tty/serial/ma35d1_serial.c821
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c5
-rw-r--r--drivers/tty/serial/samsung_tty.c19
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_base.h46
-rw-r--r--drivers/tty/serial/serial_base_bus.c205
-rw-r--r--drivers/tty/serial/serial_core.c210
-rw-r--r--drivers/tty/serial/serial_ctrl.c68
-rw-r--r--drivers/tty/serial/serial_port.c105
-rw-r--r--drivers/tty/serial/st-asc.c6
-rw-r--r--drivers/tty/serial/stm32-usart.c5
-rw-r--r--drivers/tty/serial/uartlite.c12
-rw-r--r--drivers/tty/serial/xilinx_uartps.c5
-rw-r--r--drivers/tty/tty.h11
-rw-r--r--drivers/tty/tty_audit.c43
-rw-r--r--drivers/tty/tty_io.c31
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/uio/uio_dfl.c2
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c6
-rw-r--r--drivers/usb/cdns3/Kconfig11
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c15
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c30
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c5
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c246
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c11
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c6
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c38
-rw-r--r--drivers/usb/common/led.c4
-rw-r--r--drivers/usb/common/usb-conn-gpio.c6
-rw-r--r--drivers/usb/core/devio.c18
-rw-r--r--drivers/usb/core/hcd-pci.c24
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/core/hub.h4
-rw-r--r--drivers/usb/core/port.c32
-rw-r--r--drivers/usb/dwc2/params.c21
-rw-r--r--drivers/usb/dwc2/platform.c22
-rw-r--r--drivers/usb/dwc3/core.c17
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c5
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c6
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c6
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c27
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c6
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c43
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c6
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c13
-rw-r--r--drivers/usb/fotg210/fotg210-core.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c21
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/gadget/function/f_printer.c29
-rw-r--r--drivers/usb/gadget/function/u_serial.c22
-rw-r--r--drivers/usb/gadget/function/uvc_video.c50
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c4
-rw-r--r--drivers/usb/gadget/legacy/hid.c6
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c8
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c5
-rw-r--r--drivers/usb/gadget/udc/cdns2/Kconfig11
-rw-r--r--drivers/usb/gadget/udc/cdns2/Makefile7
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-debug.h203
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-ep0.c659
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c2474
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.h707
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c138
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.c11
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h605
-rw-r--r--drivers/usb/gadget/udc/core.c24
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c11
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c6
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c5
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c6
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c6
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c6
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c8
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c6
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c8
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/ehci-atmel.c6
-rw-r--r--drivers/usb/host/ehci-brcm.c5
-rw-r--r--drivers/usb/host/ehci-exynos.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c6
-rw-r--r--drivers/usb/host/ehci-grlib.c6
-rw-r--r--drivers/usb/host/ehci-mv.c6
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c6
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-orion.c6
-rw-r--r--drivers/usb/host/ehci-pci.c3
-rw-r--r--drivers/usb/host/ehci-platform.c6
-rw-r--r--drivers/usb/host/ehci-ppc-of.c6
-rw-r--r--drivers/usb/host/ehci-sh.c6
-rw-r--r--drivers/usb/host/ehci-spear.c6
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c6
-rw-r--r--drivers/usb/host/fhci-hcd.c9
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c5
-rw-r--r--drivers/usb/host/isp116x-hcd.c7
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/octeon-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c5
-rw-r--r--drivers/usb/host/ohci-da8xx.c6
-rw-r--r--drivers/usb/host/ohci-exynos.c6
-rw-r--r--drivers/usb/host/ohci-nxp.c6
-rw-r--r--drivers/usb/host/ohci-omap.c5
-rw-r--r--drivers/usb/host/ohci-pci.c8
-rw-r--r--drivers/usb/host/ohci-platform.c8
-rw-r--r--drivers/usb/host/ohci-ppc-of.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c5
-rw-r--r--drivers/usb/host/ohci-s3c2410.c5
-rw-r--r--drivers/usb/host/ohci-sm501.c6
-rw-r--r--drivers/usb/host/ohci-spear.c5
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c5
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-grlib.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h24
-rw-r--r--drivers/usb/host/uhci-pci.c7
-rw-r--r--drivers/usb/host/uhci-platform.c6
-rw-r--r--drivers/usb/host/xhci-histb.c20
-rw-r--r--drivers/usb/host/xhci-mem.c132
-rw-r--r--drivers/usb/host/xhci-mtk.c12
-rw-r--r--drivers/usb/host/xhci-pci.c24
-rw-r--r--drivers/usb/host/xhci-plat.c23
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar.c6
-rw-r--r--drivers/usb/host/xhci-ring.c137
-rw-r--r--drivers/usb/host/xhci-tegra.c12
-rw-r--r--drivers/usb/host/xhci-trace.h9
-rw-r--r--drivers/usb/host/xhci.c56
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/isp1760/isp1760-if.c6
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c6
-rw-r--r--drivers/usb/misc/qcom_eud.c10
-rw-r--r--drivers/usb/misc/usb251xb.c11
-rw-r--r--drivers/usb/misc/usb3503.c8
-rw-r--r--drivers/usb/misc/usb4604.c2
-rw-r--r--drivers/usb/mon/mon_bin.c19
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/phy/phy-tahvo.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/roles/class.c17
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c6
-rw-r--r--drivers/usb/serial/ark3116.c7
-rw-r--r--drivers/usb/serial/belkin_sa.c12
-rw-r--r--drivers/usb/serial/ch341.c37
-rw-r--r--drivers/usb/serial/cp210x.c14
-rw-r--r--drivers/usb/serial/digi_acceleport.c7
-rw-r--r--drivers/usb/serial/f81232.c4
-rw-r--r--drivers/usb/serial/f81534.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c10
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_ti.c9
-rw-r--r--drivers/usb/serial/keyspan.c5
-rw-r--r--drivers/usb/serial/keyspan_pda.c8
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c7
-rw-r--r--drivers/usb/serial/mxuport.c6
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c14
-rw-r--r--drivers/usb/serial/quatech2.c8
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/upd78f0730.c7
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/serial/usb_debug.c13
-rw-r--r--drivers/usb/serial/whiteheat.c7
-rw-r--r--drivers/usb/serial/xr_serial.c4
-rw-r--r--drivers/usb/typec/Kconfig13
-rw-r--r--drivers/usb/typec/Makefile1
-rw-r--r--drivers/usb/typec/anx7411.c2
-rw-r--r--drivers/usb/typec/class.c4
-rw-r--r--drivers/usb/typec/hd3ss3220.c4
-rw-r--r--drivers/usb/typec/mux.c61
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/fsa4480.c128
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c11
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c56
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c529
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c2
-rw-r--r--drivers/usb/typec/qcom-pmic-typec.c261
-rw-r--r--drivers/usb/typec/rt1719.c2
-rw-r--r--drivers/usb/typec/stusb160x.c2
-rw-r--r--drivers/usb/typec/tcpm/Kconfig11
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/Makefile6
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c344
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c528
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h119
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c556
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h195
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c6
-rw-r--r--drivers/usb/typec/tipd/core.c2
-rw-r--r--drivers/usb/typec/ucsi/psy.c14
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c17
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c6
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c2
-rw-r--r--drivers/usb/typec/wusb3801.c2
-rw-r--r--drivers/usb/usbip/stub_main.c4
-rw-r--r--drivers/usb/usbip/vhci_hcd.c6
-rw-r--r--drivers/vdpa/Kconfig10
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c201
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h48
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c108
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c165
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h15
-rw-r--r--drivers/vdpa/pds/Makefile10
-rw-r--r--drivers/vdpa/pds/aux_drv.c140
-rw-r--r--drivers/vdpa/pds/aux_drv.h26
-rw-r--r--drivers/vdpa/pds/cmds.c185
-rw-r--r--drivers/vdpa/pds/cmds.h18
-rw-r--r--drivers/vdpa/pds/debugfs.c289
-rw-r--r--drivers/vdpa/pds/debugfs.h17
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c769
-rw-r--r--drivers/vdpa/pds/vdpa_dev.h49
-rw-r--r--drivers/vdpa/solidrun/snet_ctrl.c6
-rw-r--r--drivers/vdpa/solidrun/snet_hwmon.c2
-rw-r--r--drivers/vdpa/solidrun/snet_main.c15
-rw-r--r--drivers/vdpa/solidrun/snet_vdpa.h1
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c6
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/scsi.c103
-rw-r--r--drivers/vhost/vhost.c419
-rw-r--r--drivers/vhost/vhost.h24
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c4
-rw-r--r--drivers/video/backlight/adp8870_bl.c4
-rw-r--r--drivers/video/backlight/arcxcnn_bl.c2
-rw-r--r--drivers/video/backlight/bd6107.c2
-rw-r--r--drivers/video/backlight/ktz8866.c2
-rw-r--r--drivers/video/backlight/led_bl.c5
-rw-r--r--drivers/video/backlight/lm3630a_bl.c10
-rw-r--r--drivers/video/backlight/lm3639_bl.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c50
-rw-r--r--drivers/video/backlight/lv5207lp.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c9
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/sticore.c159
-rw-r--r--drivers/virtio/virtio_pci_common.h7
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c33
-rw-r--r--drivers/virtio/virtio_vdpa.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c2
-rw-r--r--drivers/w1/slaves/Kconfig4
-rw-r--r--drivers/w1/slaves/w1_ds2438.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c37
-rw-r--r--drivers/w1/w1.c55
-rw-r--r--drivers/watchdog/Kconfig18
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c5
-rw-r--r--drivers/watchdog/ibmasr.c3
-rw-r--r--drivers/watchdog/loongson1_wdt.c13
-rw-r--r--drivers/watchdog/m54xx_wdt.c4
-rw-r--r--drivers/watchdog/max63xx_wdt.c5
-rw-r--r--drivers/watchdog/moxart_wdt.c4
-rw-r--r--drivers/watchdog/octeon-wdt-nmi.S5
-rw-r--r--drivers/watchdog/orion_wdt.c4
-rw-r--r--drivers/watchdog/rtd119x_wdt.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c4
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/ts4800_wdt.c4
-rw-r--r--drivers/watchdog/ts72xx_wdt.c4
-rw-r--r--drivers/watchdog/xilinx_wwdt.c201
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/ceph/addr.c85
-rw-r--r--fs/ceph/caps.c55
-rw-r--r--fs/ceph/dir.c17
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/ceph/metric.c8
-rw-r--r--fs/ceph/snap.c9
-rw-r--r--fs/ceph/super.h13
-rw-r--r--fs/dax.c14
-rw-r--r--fs/debugfs/file.c9
-rw-r--r--fs/f2fs/checkpoint.c7
-rw-r--r--fs/f2fs/compress.c41
-rw-r--r--fs/f2fs/data.c71
-rw-r--r--fs/f2fs/dir.c9
-rw-r--r--fs/f2fs/f2fs.h124
-rw-r--r--fs/f2fs/file.c77
-rw-r--r--fs/f2fs/gc.c43
-rw-r--r--fs/f2fs/inode.c207
-rw-r--r--fs/f2fs/iostat.c1
-rw-r--r--fs/f2fs/namei.c32
-rw-r--r--fs/f2fs/node.c45
-rw-r--r--fs/f2fs/node.h3
-rw-r--r--fs/f2fs/recovery.c75
-rw-r--r--fs/f2fs/segment.c162
-rw-r--r--fs/f2fs/super.c252
-rw-r--r--fs/f2fs/sysfs.c279
-rw-r--r--fs/f2fs/xattr.c6
-rw-r--r--fs/f2fs/xattr.h1
-rw-r--r--fs/file_table.c14
-rw-r--r--fs/fuse/virtio_fs.c3
-rw-r--r--fs/gfs2/aops.c19
-rw-r--r--fs/gfs2/bmap.c4
-rw-r--r--fs/gfs2/file.c5
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glops.c69
-rw-r--r--fs/gfs2/incore.h12
-rw-r--r--fs/gfs2/lock_dlm.c23
-rw-r--r--fs/gfs2/log.c11
-rw-r--r--fs/gfs2/lops.c21
-rw-r--r--fs/gfs2/ops_fstype.c15
-rw-r--r--fs/gfs2/quota.c26
-rw-r--r--fs/gfs2/recovery.c28
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c215
-rw-r--r--fs/gfs2/super.h1
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/trans.c3
-rw-r--r--fs/gfs2/util.c49
-rw-r--r--fs/gfs2/util.h3
-rw-r--r--fs/inode.c6
-rw-r--r--fs/iomap/buffered-io.c2
-rw-r--r--fs/kernfs/dir.c2
-rw-r--r--fs/lockd/clntlock.c6
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/client.c32
-rw-r--r--fs/nfs/fs_context.c67
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3client.c13
-rw-r--r--fs/nfs/nfs42proc.c25
-rw-r--r--fs/nfs/nfs42xattr.c79
-rw-r--r--fs/nfs/nfs42xdr.c621
-rw-r--r--fs/nfs/nfs4client.c44
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfs/nfs4state.c3
-rw-r--r--fs/nfs/super.c18
-rw-r--r--fs/nfs/sysfs.c235
-rw-r--r--fs/nfs/sysfs.h10
-rw-r--r--fs/notify/fanotify/fanotify_user.c14
-rw-r--r--fs/ntfs3/attrib.c2
-rw-r--r--fs/ntfs3/attrlist.c7
-rw-r--r--fs/ntfs3/bitmap.c10
-rw-r--r--fs/ntfs3/file.c6
-rw-r--r--fs/ntfs3/frecord.c58
-rw-r--r--fs/ntfs3/fslog.c40
-rw-r--r--fs/ntfs3/fsntfs.c99
-rw-r--r--fs/ntfs3/index.c20
-rw-r--r--fs/ntfs3/inode.c23
-rw-r--r--fs/ntfs3/lznt.c6
-rw-r--r--fs/ntfs3/namei.c31
-rw-r--r--fs/ntfs3/ntfs.h117
-rw-r--r--fs/ntfs3/ntfs_fs.h31
-rw-r--r--fs/ntfs3/record.c14
-rw-r--r--fs/ntfs3/run.c4
-rw-r--r--fs/ntfs3/super.c280
-rw-r--r--fs/ntfs3/xattr.c20
-rw-r--r--fs/ocfs2/Kconfig6
-rw-r--r--fs/overlayfs/overlayfs.h41
-rw-r--r--fs/overlayfs/params.c532
-rw-r--r--fs/overlayfs/params.h42
-rw-r--r--fs/overlayfs/super.c530
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/smb/client/cifsglob.h2
-rw-r--r--fs/smb/client/smbdirect.c2
-rw-r--r--fs/squashfs/block.c27
-rw-r--r--fs/sysfs/group.c12
-rw-r--r--fs/xfs/libxfs/xfs_ag.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c289
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h24
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c32
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c22
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c10
-rw-r--r--fs/xfs/libxfs/xfs_sb.c56
-rw-r--r--fs/xfs/xfs_extent_busy.c36
-rw-r--r--fs/xfs/xfs_extent_busy.h6
-rw-r--r--fs/xfs/xfs_extfree_item.c75
-rw-r--r--fs/xfs/xfs_fsmap.c255
-rw-r--r--fs/xfs/xfs_log.c47
-rw-r--r--fs/xfs/xfs_notify_failure.c9
-rw-r--r--fs/xfs/xfs_reflink.c3
-rw-r--r--fs/xfs/xfs_trace.h25
-rw-r--r--fs/xfs/xfs_trans_ail.c2
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/asm-generic/export.h83
-rw-r--r--include/asm-generic/page.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/dt-bindings/clock/qcom,sm8350-videocc.h35
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996-cbf.h12
-rw-r--r--include/dt-bindings/leds/leds-lp55xx.h10
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h1
-rw-r--r--include/dt-bindings/mux/ti-serdes.h62
-rw-r--r--include/dt-bindings/reset/qcom,sm8350-videocc.h18
-rw-r--r--include/kvm/arm_pmu.h8
-rw-r--r--include/kvm/iodev.h6
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/arm_ffa.h8
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/bootmem_info.h2
-rw-r--r--include/linux/cleanup.h171
-rw-r--r--include/linux/compat.h82
-rw-r--r--include/linux/compiler-clang.h9
-rw-r--r--include/linux/compiler_attributes.h6
-rw-r--r--include/linux/coresight.h127
-rw-r--r--include/linux/dax.h13
-rw-r--r--include/linux/device.h129
-rw-r--r--include/linux/dma/edma.h7
-rw-r--r--include/linux/dsa/sja1105.h4
-rw-r--r--include/linux/export-internal.h49
-rw-r--r--include/linux/export.h130
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/file.h6
-rw-r--r--include/linux/firewire.h82
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h2
-rw-r--r--include/linux/hsi/ssi_protocol.h1
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i8254.h21
-rw-r--r--include/linux/iio/common/st_sensors.h1
-rw-r--r--include/linux/iio/iio.h3
-rw-r--r--include/linux/iio/trigger.h1
-rw-r--r--include/linux/interconnect-clk.h22
-rw-r--r--include/linux/interconnect.h8
-rw-r--r--include/linux/irqflags.h7
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kgdb.h1
-rw-r--r--include/linux/kvm_host.h9
-rw-r--r--include/linux/leds.h43
-rw-r--r--include/linux/lockd/bind.h2
-rw-r--r--include/linux/mfd/axp20x.h84
-rw-r--r--include/linux/mfd/intel-m10-bmc.h43
-rw-r--r--include/linux/mfd/max5970.h (renamed from include/linux/mfd/max597x.h)16
-rw-r--r--include/linux/mfd/max77541.h91
-rw-r--r--include/linux/mfd/rt5033-private.h64
-rw-r--r--include/linux/mfd/rt5033.h24
-rw-r--r--include/linux/mfd/stpmic1.h12
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/mod_devicetable.h13
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/nfs_fs_sb.h6
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pds/pds_adminq.h247
-rw-r--r--include/linux/pds/pds_common.h21
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/platform_data/leds-lp55xx.h3
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/property.h12
-rw-r--r--include/linux/rcupdate.h3
-rw-r--r--include/linux/rcuwait.h23
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/serial_8250.h45
-rw-r--r--include/linux/serial_core.h7
-rw-r--r--include/linux/sh_intc.h6
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/soundwire/sdw.h6
-rw-r--r--include/linux/soundwire/sdw_intel.h94
-rw-r--r--include/linux/spinlock.h31
-rw-r--r--include/linux/srcu.h5
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/clnt.h13
-rw-r--r--include/linux/sunrpc/xprt.h18
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/syscalls.h140
-rw-r--r--include/linux/thunderbolt.h18
-rw-r--r--include/linux/uacce.h4
-rw-r--r--include/linux/ulpi/driver.h2
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/typec_mux.h11
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/linux/virtio_pci_modern.h6
-rw-r--r--include/media/dvbdev.h6
-rw-r--r--include/media/jpeg.h20
-rw-r--r--include/media/media-entity.h6
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h8
-rw-r--r--include/media/v4l2-mem2mem.h18
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/mgmt.h1
-rw-r--r--include/rdma/uverbs_ioctl.h6
-rw-r--r--include/soc/mscc/ocelot.h10
-rw-r--r--include/trace/events/f2fs.h24
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/fib6.h2
-rw-r--r--include/trace/events/net.h3
-rw-r--r--include/trace/events/sunrpc.h96
-rw-r--r--include/uapi/asm-generic/bitsperlong.h13
-rw-r--r--include/uapi/asm-generic/unistd.h129
-rw-r--r--include/uapi/linux/counter.h6
-rw-r--r--include/uapi/linux/dvb/frontend.h8
-rw-r--r--include/uapi/linux/dvb/version.h2
-rw-r--r--include/uapi/linux/firewire-cdev.h178
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--include/uapi/linux/media.h28
-rw-r--r--include/uapi/linux/tps6594_pfsm.h37
-rw-r--r--include/uapi/linux/usb/ch9.h11
-rw-r--r--include/uapi/linux/v4l2-controls.h727
-rw-r--r--include/uapi/linux/vhost.h31
-rw-r--r--include/uapi/linux/vhost_types.h16
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--include/video/sticore.h42
-rw-r--r--init/Makefile1
-rw-r--r--io_uring/io_uring.c49
-rw-r--r--io_uring/net.c8
-rw-r--r--kernel/bpf/btf.c6
-rw-r--r--kernel/debug/kdb/kdb_io.c16
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c2
-rw-r--r--kernel/debug/kdb/kdb_private.h1
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/fork.c1
-rwxr-xr-xkernel/gen_kheaders.sh9
-rw-r--r--kernel/module/internal.h12
-rw-r--r--kernel/module/main.c39
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/sys_ni.c110
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_boot.c8
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/dhry_1.c11
-rw-r--r--lib/fortify_kunit.c14
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/test_bitmap.c18
-rw-r--r--lib/test_firmware.c12
-rw-r--r--mm/gup.c51
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/kasan/generic.c3
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mmap.c120
-rw-r--r--mm/mremap.c28
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/page-writeback.c8
-rw-r--r--mm/slab.h16
-rw-r--r--net/bluetooth/hci_conn.c49
-rw-r--r--net/bluetooth/hci_event.c67
-rw-r--r--net/bluetooth/hci_sync.c30
-rw-r--r--net/bluetooth/hci_sysfs.c14
-rw-r--r--net/bluetooth/iso.c4
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bridge/br_if.c5
-rw-r--r--net/dsa/tag_sja1105.c90
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/mac80211/led.c2
-rw-r--r--net/mac80211/led.h8
-rw-r--r--net/mptcp/protocol.c7
-rw-r--r--net/netfilter/xt_LED.c3
-rw-r--r--net/sched/act_ipt.c70
-rw-r--r--net/sched/act_pedit.c1
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_tls.c175
-rw-r--r--net/sunrpc/clnt.c22
-rw-r--r--net/sunrpc/rpcb_clnt.c39
-rw-r--r--net/sunrpc/sysfs.c1
-rw-r--r--net/sunrpc/sysfs.h7
-rw-r--r--net/sunrpc/xprtsock.c434
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile1
-rw-r--r--samples/pfsm/.gitignore2
-rw-r--r--samples/pfsm/Makefile4
-rw-r--r--samples/pfsm/pfsm-wakeup.c125
-rw-r--r--samples/pktgen/functions.sh13
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh4
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh4
-rwxr-xr-xsamples/pktgen/pktgen_sample01_simple.sh4
-rwxr-xr-xsamples/pktgen/pktgen_sample02_multiqueue.sh3
-rwxr-xr-xsamples/pktgen/pktgen_sample03_burst_single_flow.sh4
-rwxr-xr-xsamples/pktgen/pktgen_sample04_many_flows.sh4
-rwxr-xr-xsamples/pktgen/pktgen_sample05_flow_per_thread.sh4
-rwxr-xr-xsamples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh3
-rw-r--r--scripts/Makefile.build31
-rw-r--r--scripts/Makefile.clang3
-rw-r--r--scripts/Makefile.clean4
-rw-r--r--scripts/Makefile.compiler4
-rw-r--r--scripts/Makefile.lib3
-rw-r--r--scripts/Makefile.modfinal2
-rw-r--r--scripts/Makefile.modpost8
-rw-r--r--scripts/Makefile.vmlinux1
-rwxr-xr-xscripts/adjust_autoksyms.sh73
-rw-r--r--scripts/basic/fixdep.c3
-rwxr-xr-xscripts/check-local-export70
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/gen_autoksyms.sh62
-rwxr-xr-xscripts/gen_ksymdeps.sh30
-rw-r--r--scripts/kallsyms.c63
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rwxr-xr-xscripts/kernel-doc31
-rwxr-xr-xscripts/min-tool-version.sh4
-rwxr-xr-xscripts/mksysmap10
-rw-r--r--scripts/mod/modpost.c785
-rw-r--r--scripts/mod/modpost.h6
-rwxr-xr-xscripts/package/builddeb14
-rwxr-xr-xscripts/remove-stale-files4
-rwxr-xr-xscripts/tags.sh11
-rw-r--r--security/apparmor/crypto.c10
-rw-r--r--security/apparmor/file.c2
-rw-r--r--security/apparmor/include/lib.h6
-rw-r--r--security/apparmor/lsm.c8
-rw-r--r--security/apparmor/policy.c20
-rw-r--r--security/apparmor/policy_compat.c20
-rw-r--r--security/apparmor/policy_unpack.c102
-rw-r--r--security/apparmor/policy_unpack_test.c13
-rw-r--r--security/apparmor/secid.c3
-rw-r--r--sound/core/jack.c15
-rw-r--r--sound/core/pcm_memory.c44
-rw-r--r--sound/core/seq/seq_ump_convert.c2
-rw-r--r--sound/core/ump_convert.c2
-rw-r--r--sound/firewire/fireface/ff.c2
-rw-r--r--sound/firewire/oxfw/oxfw.c2
-rw-r--r--sound/hda/hdac_regmap.c7
-rw-r--r--sound/pci/hda/patch_realtek.c46
-rw-r--r--sound/soc/sof/intel/hda.c33
-rw-r--r--sound/soc/sof/intel/shim.h1
-rw-r--r--tools/arch/arm64/include/uapi/asm/bitsperlong.h24
-rw-r--r--tools/arch/hexagon/include/uapi/asm/bitsperlong.h27
-rw-r--r--tools/arch/loongarch/include/uapi/asm/bitsperlong.h9
-rw-r--r--tools/arch/microblaze/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/arch/riscv/include/uapi/asm/bitsperlong.h14
-rw-r--r--tools/counter/.gitignore2
-rw-r--r--tools/counter/Makefile1
-rw-r--r--tools/include/uapi/asm-generic/bitsperlong.h14
-rw-r--r--tools/include/uapi/asm-generic/unistd.h129
-rw-r--r--tools/include/uapi/asm/bitsperlong.h6
-rw-r--r--tools/perf/Documentation/perf-config.txt2
-rw-r--r--tools/perf/Documentation/perf-lock.txt8
-rw-r--r--tools/perf/Makefile.config17
-rw-r--r--tools/perf/Makefile.perf45
-rw-r--r--tools/perf/arch/x86/util/evsel.c20
-rw-r--r--tools/perf/builtin-daemon.c2
-rw-r--r--tools/perf/builtin-kallsyms.c1
-rw-r--r--tools/perf/builtin-lock.c385
-rw-r--r--tools/perf/builtin-report.c1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv17
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json811
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json143
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json410
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json142
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json1121
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json257
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/cache.json894
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json105
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/frontend.json377
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/memory.json394
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json113
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/other.json242
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json801
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json1571
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json165
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json308
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/frontend.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json8
-rw-r--r--tools/perf/pmu-events/metric.py8
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/expr.c4
-rw-r--r--tools/perf/tests/make4
-rw-r--r--tools/perf/tests/parse-events.c100
-rw-r--r--tools/perf/tests/pe-file-parsing.c3
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh36
-rw-r--r--tools/perf/util/bpf_skel/.gitignore1
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h (renamed from tools/perf/util/bpf_skel/vmlinux.h)10
-rw-r--r--tools/perf/util/dso.c7
-rw-r--r--tools/perf/util/dso.h3
-rw-r--r--tools/perf/util/event.c7
-rw-r--r--tools/perf/util/evsel.c30
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/expr.c21
-rw-r--r--tools/perf/util/expr.h1
-rw-r--r--tools/perf/util/expr.l1
-rw-r--r--tools/perf/util/expr.y8
-rw-r--r--tools/perf/util/llvm-utils.c4
-rw-r--r--tools/perf/util/map.c14
-rw-r--r--tools/perf/util/map.h12
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c186
-rw-r--r--tools/perf/util/pmu.c27
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/pmus.c7
-rw-r--r--tools/perf/util/probe-event.c16
-rw-r--r--tools/perf/util/python.c8
-rw-r--r--tools/perf/util/symbol.c136
-rw-r--r--tools/perf/util/symbol.h12
-rw-r--r--tools/perf/util/symbol_conf.h1
-rw-r--r--tools/perf/util/symbol_fprintf.c10
-rw-r--r--tools/perf/util/trace-event-info.c12
-rw-r--r--tools/perf/util/trace-event.h6
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/testing/cxl/Kbuild5
-rw-r--r--tools/testing/cxl/test/cxl.c36
-rw-r--r--tools/testing/cxl/test/mem.c367
-rw-r--r--tools/testing/cxl/test/mock.c59
-rw-r--r--tools/testing/cxl/test/mock.h3
-rw-r--r--tools/testing/nvdimm/test/nfit.c6
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h29
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/kvm/Makefile19
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c32
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c96
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h1
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h8
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c17
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c75
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c700
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c259
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c22
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/net/mptcp/config1
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh3
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh29
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c10
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rw-r--r--tools/testing/selftests/riscv/vector/.gitignore1
-rw-r--r--tools/testing/selftests/riscv/vector/Makefile6
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval_nolibc.c68
-rw-r--r--tools/testing/selftests/tty/.gitignore2
-rw-r--r--tools/testing/selftests/tty/Makefile5
-rw-r--r--tools/testing/selftests/tty/tty_tstamp_update.c88
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh30
-rw-r--r--tools/tracing/rtla/src/osnoise.c65
-rw-r--r--tools/tracing/rtla/src/osnoise.h5
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c90
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c83
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c35
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.h5
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c262
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c229
-rw-r--r--tools/tracing/rtla/src/timerlat_u.c224
-rw-r--r--tools/tracing/rtla/src/timerlat_u.h18
-rw-r--r--tools/tracing/rtla/src/utils.c324
-rw-r--r--tools/tracing/rtla/src/utils.h7
-rw-r--r--tools/usb/usbip/configure.ac4
-rw-r--r--tools/usb/usbip/src/usbip_attach.c2
-rw-r--r--tools/usb/usbip/src/usbip_detach.c2
-rw-r--r--tools/usb/usbip/src/usbip_port.c2
-rw-r--r--tools/virtio/Makefile13
-rw-r--r--usr/Makefile1
-rwxr-xr-xusr/gen_initramfs.sh16
-rw-r--r--virt/kvm/coalesced_mmio.c9
-rw-r--r--virt/kvm/eventfd.c8
-rw-r--r--virt/kvm/kvm_main.c51
3030 files changed, 103334 insertions, 27196 deletions
diff --git a/.gitignore b/.gitignore
index 7f86e0837909..9fd4c9533b3d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,7 +16,6 @@
*.bin
*.bz2
*.c.[012]*.*
-*.cover
*.dt.yaml
*.dtb
*.dtbo
@@ -34,7 +33,6 @@
*.lz4
*.lzma
*.lzo
-*.mbx
*.mod
*.mod.c
*.o
@@ -51,7 +49,6 @@
*.symversions
*.tab.[ch]
*.tar
-*.usyms
*.xz
*.zst
Module.symvers
@@ -112,7 +109,6 @@ modules.order
#
/include/config/
/include/generated/
-/include/ksym/
/arch/*/include/generated/
# stgit generated dirs
diff --git a/.mailmap b/.mailmap
index 4a9d87472ba8..1bce47a7f2ce 100644
--- a/.mailmap
+++ b/.mailmap
@@ -5,7 +5,8 @@
# same person appearing not to be so or badly displayed. Also allows for
# old email addresses to map to new email addresses.
#
-# For format details, see "MAPPING AUTHORS" in "man git-shortlog".
+# For format details, see "man gitmailmap" or "MAPPING AUTHORS" in
+# "man git-shortlog" on older systems.
#
# Please keep this list dictionary sorted.
#
@@ -177,6 +178,9 @@ Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
+Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@bqreaders.com>
+Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@theobroma-systems.com>
+Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@vrull.eu>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
Henrik Rydberg <rydberg@bitmath.org>
@@ -275,6 +279,10 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
+Lee Jones <lee@kernel.org> <joneslee@google.com>
+Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
+Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
+Lee Jones <lee@kernel.org> <lee@ubuntu.com>
Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
@@ -301,6 +309,7 @@ Marek Behún <kabel@kernel.org> <marek.behun@nic.cz>
Marek Behún <kabel@kernel.org> Marek Behun <marek.behun@nic.cz>
Mark Brown <broonie@sirena.org.uk>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
+Markus Schneider-Pargmann <msp@baylibre.com> <mpa@pengutronix.de>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
diff --git a/Documentation/ABI/obsolete/o2cb b/Documentation/ABI/obsolete/o2cb
index fe7e45e17bc7..8f39b596731d 100644
--- a/Documentation/ABI/obsolete/o2cb
+++ b/Documentation/ABI/obsolete/o2cb
@@ -1,11 +1,11 @@
What: /sys/o2cb
Date: Dec 2005
KernelVersion: 2.6.16
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description: Ocfs2-tools looks at 'interface-revision' for versioning
information. Each logmask/ file controls a set of debug prints
and can be written into with the strings "allow", "deny", or
"off". Reading the file returns the current state.
Was renamed to /sys/fs/u2cb/
Users: ocfs2-tools. It's sufficient to mail proposed changes to
- ocfs2-devel@oss.oracle.com.
+ ocfs2-devel@lists.linux.dev.
diff --git a/Documentation/ABI/removed/o2cb b/Documentation/ABI/removed/o2cb
index 20c91adca6d4..61cff238fbe8 100644
--- a/Documentation/ABI/removed/o2cb
+++ b/Documentation/ABI/removed/o2cb
@@ -1,10 +1,10 @@
What: /sys/o2cb symlink
Date: May 2011
KernelVersion: 3.0
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description: This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink is
removed when new versions of ocfs2-tools which know to look
in /sys/fs/o2cb are sufficiently prevalent. Don't code new
software to look here, it should try /sys/fs/o2cb instead.
Users: ocfs2-tools. It's sufficient to mail proposed changes to
- ocfs2-devel@oss.oracle.com.
+ ocfs2-devel@lists.linux.dev.
diff --git a/Documentation/ABI/stable/o2cb b/Documentation/ABI/stable/o2cb
index b62a967f01a0..3a83b5c54e93 100644
--- a/Documentation/ABI/stable/o2cb
+++ b/Documentation/ABI/stable/o2cb
@@ -1,10 +1,10 @@
What: /sys/fs/o2cb/
Date: Dec 2005
KernelVersion: 2.6.16
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description: Ocfs2-tools looks at 'interface-revision' for versioning
information. Each logmask/ file controls a set of debug prints
and can be written into with the strings "allow", "deny", or
"off". Reading the file returns the current state.
Users: ocfs2-tools. It's sufficient to mail proposed changes to
- ocfs2-devel@oss.oracle.com.
+ ocfs2-devel@lists.linux.dev.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
index 1417c4272c6c..dc3b3a5c876b 100644
--- a/Documentation/ABI/testing/sysfs-bus-counter
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -90,6 +90,60 @@ Description:
counter does not freeze at the boundary points, but
counts continuously throughout.
+ interrupt on terminal count:
+ The output signal is initially low, and will remain low
+ until the counter reaches zero. The output signal then
+ goes high and remains high until a new preset value is
+ set.
+
+ hardware retriggerable one-shot:
+ The output signal is initially high. The output signal
+ will go low by a trigger input signal, and will remain
+ low until the counter reaches zero. The output will then
+ go high and remain high until the next trigger. A
+ trigger results in loading the counter to the preset
+ value and setting the output signal low, thus starting
+ the one-shot pulse.
+
+ rate generator:
+ The output signal is initially high. When the counter
+ has decremented to 1, the output signal goes low for one
+ clock pulse. The output signal then goes high again, the
+ counter is reloaded to the preset value, and the process
+ repeats in a periodic manner as such.
+
+ square wave mode:
+ The output signal is initially high.
+
+ If the initial count is even, the counter is decremented
+ by two on succeeding clock pulses. When the count
+ expires, the output signal changes value and the
+ counter is reloaded to the preset value. The process
+ repeats in periodic manner as such.
+
+ If the initial count is odd, the initial count minus one
+ (an even number) is loaded and then is decremented by
+ two on succeeding clock pulses. One clock pulse after
+ the count expires, the output signal goes low and the
+ counter is reloaded to the preset value minus one.
+ Succeeding clock pulses decrement the count by two. When
+ the count expires, the output goes high again and the
+ counter is reloaded to the preset value minus one. The
+ process repeats in a periodic manner as such.
+
+ software triggered strobe:
+ The output signal is initially high. When the count
+ expires, the output will go low for one clock pulse and
+ then go high again. The counting sequence is "triggered"
+ by setting the preset value.
+
+ hardware triggered strobe:
+ The output signal is initially high. Counting is started
+ by a trigger input signal. When the count expires, the
+ output signal will go low for one clock pulse and then
+ go high again. A trigger results in loading the counter
+ to the preset value.
+
What: /sys/bus/counter/devices/counterX/countY/count_mode_available
What: /sys/bus/counter/devices/counterX/countY/error_noise_available
What: /sys/bus/counter/devices/counterX/countY/function_available
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 48ac0d911801..6350dd82b9a9 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -58,6 +58,54 @@ Description:
affinity for this device.
+What: /sys/bus/cxl/devices/memX/security/state
+Date: June, 2023
+KernelVersion: v6.5
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) Reading this file will display the CXL security state for
+ that device. Such states can be: 'disabled', 'sanitize', when
+ a sanitization is currently underway; or those available only
+ for persistent memory: 'locked', 'unlocked' or 'frozen'. This
+ sysfs entry is select/poll capable from userspace to notify
+ upon completion of a sanitize operation.
+
+
+What: /sys/bus/cxl/devices/memX/security/sanitize
+Date: June, 2023
+KernelVersion: v6.5
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (WO) Write a boolean 'true' string value to this attribute to
+ sanitize the device to securely re-purpose or decommission it.
+ This is done by ensuring that all user data and meta-data,
+ whether it resides in persistent capacity, volatile capacity,
+ or the LSA, is made permanently unavailable by whatever means
+ is appropriate for the media type. This functionality requires
+ the device to be not be actively decoding any HPA ranges.
+
+
+What /sys/bus/cxl/devices/memX/security/erase
+Date: June, 2023
+KernelVersion: v6.5
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (WO) Write a boolean 'true' string value to this attribute to
+ secure erase user data by changing the media encryption keys for
+ all user data areas of the device.
+
+
+What: /sys/bus/cxl/devices/memX/firmware/
+Date: April, 2023
+KernelVersion: v6.5
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Firmware uploader mechanism. The different files under
+ this directory can be used to upload and activate new
+ firmware for CXL devices. The interfaces under this are
+ documented in sysfs-class-firmware.
+
+
What: /sys/bus/cxl/devices/*/devtype
Date: June, 2021
KernelVersion: v5.14
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index cb172db41b34..be663258b9b7 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -292,6 +292,16 @@ Description:
which is marked with early_stop has failed to initialize, it will ignore
all future connections until this attribute is clear.
+What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/state
+Date: June 2023
+Contact: Roy Luo <royluo@google.com>
+Description:
+ Indicates current state of the USB device attached to the port.
+ Valid states are: 'not-attached', 'attached', 'powered',
+ 'reconnecting', 'unauthenticated', 'default', 'addressed',
+ 'configured', and 'suspended'. This file supports poll() to
+ monitor the state change from user space.
+
What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
Date: May 2013
Contact: Mathias Nyman <mathias.nyman@linux.intel.com>
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-aw200xx b/Documentation/ABI/testing/sysfs-class-led-driver-aw200xx
new file mode 100644
index 000000000000..6d4449cf9d71
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-aw200xx
@@ -0,0 +1,5 @@
+What: /sys/class/leds/<led>/dim
+Date: May 2023
+Description: 64-level DIM current. If you write a negative value or
+ "auto", the dim will be calculated according to the
+ brightness.
diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi b/Documentation/ABI/testing/sysfs-class-net-qmi
index 47e6b9732337..b028f5bc86db 100644
--- a/Documentation/ABI/testing/sysfs-class-net-qmi
+++ b/Documentation/ABI/testing/sysfs-class-net-qmi
@@ -62,7 +62,7 @@ Description:
What: /sys/class/net/<iface>/qmi/pass_through
Date: January 2021
KernelVersion: 5.12
-Contact: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Contact: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
Description:
Boolean. Default: 'N'
diff --git a/Documentation/ABI/testing/sysfs-devices-hisi_ptt b/Documentation/ABI/testing/sysfs-devices-hisi_ptt
index 82de6d710266..d7e206b4901c 100644
--- a/Documentation/ABI/testing/sysfs-devices-hisi_ptt
+++ b/Documentation/ABI/testing/sysfs-devices-hisi_ptt
@@ -59,3 +59,55 @@ Description: (RW) Control the allocated buffer watermark of outbound packets.
The available tune data is [0, 1, 2]. Writing a negative value
will return an error, and out of range values will be converted
to 2. The value indicates a probable level of the event.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/root_port_filters
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: This directory contains the files providing the PCIe Root Port filters
+ information used for PTT trace. Each file is named after the supported
+ Root Port device name <domain>:<bus>:<device>.<function>.
+
+ See the description of the "filter" in Documentation/trace/hisi-ptt.rst
+ for more information.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/root_port_filters/multiselect
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: (Read) Indicates if this kind of filter can be selected at the same
+ time as others filters, or must be used on it's own. 1 indicates
+ the former case and 0 indicates the latter.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/root_port_filters/<bdf>
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: (Read) Indicates the filter value of this Root Port filter, which
+ can be used to control the TLP headers to trace by the PTT trace.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/requester_filters
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: This directory contains the files providing the PCIe Requester filters
+ information used for PTT trace. Each file is named after the supported
+ Endpoint device name <domain>:<bus>:<device>.<function>.
+
+ See the description of the "filter" in Documentation/trace/hisi-ptt.rst
+ for more information.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/requester_filters/multiselect
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: (Read) Indicates if this kind of filter can be selected at the same
+ time as others filters, or must be used on it's own. 1 indicates
+ the former case and 0 indicates the latter.
+
+What: /sys/devices/hisi_ptt<sicl_id>_<core_id>/requester_filters/<bdf>
+Date: May 2023
+KernelVersion: 6.5
+Contact: Yicong Yang <yangyicong@hisilicon.com>
+Description: (Read) Indicates the filter value of this Requester filter, which
+ can be used to control the TLP headers to trace by the PTT trace.
diff --git a/Documentation/ABI/testing/sysfs-driver-eud b/Documentation/ABI/testing/sysfs-driver-eud
index 83f3872182a4..2bab0db2d2f0 100644
--- a/Documentation/ABI/testing/sysfs-driver-eud
+++ b/Documentation/ABI/testing/sysfs-driver-eud
@@ -1,4 +1,4 @@
-What: /sys/bus/platform/drivers/eud/.../enable
+What: /sys/bus/platform/drivers/qcom_eud/.../enable
Date: February 2022
Contact: Souradeep Chowdhury <quic_schowdhu@quicinc.com>
Description:
diff --git a/Documentation/ABI/testing/sysfs-ocfs2 b/Documentation/ABI/testing/sysfs-ocfs2
index b7cc516a8a8a..494d7c1ac710 100644
--- a/Documentation/ABI/testing/sysfs-ocfs2
+++ b/Documentation/ABI/testing/sysfs-ocfs2
@@ -1,13 +1,13 @@
What: /sys/fs/ocfs2/
Date: April 2008
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description:
The /sys/fs/ocfs2 directory contains knobs used by the
ocfs2-tools to interact with the filesystem.
What: /sys/fs/ocfs2/max_locking_protocol
Date: April 2008
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description:
The /sys/fs/ocfs2/max_locking_protocol file displays version
of ocfs2 locking supported by the filesystem. This version
@@ -28,7 +28,7 @@ Description:
What: /sys/fs/ocfs2/loaded_cluster_plugins
Date: April 2008
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description:
The /sys/fs/ocfs2/loaded_cluster_plugins file describes
the available plugins to support ocfs2 cluster operation.
@@ -48,7 +48,7 @@ Description:
What: /sys/fs/ocfs2/active_cluster_plugin
Date: April 2008
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description:
The /sys/fs/ocfs2/active_cluster_plugin displays which
cluster plugin is currently in use by the filesystem.
@@ -65,7 +65,7 @@ Description:
What: /sys/fs/ocfs2/cluster_stack
Date: April 2008
-Contact: ocfs2-devel@oss.oracle.com
+Contact: ocfs2-devel@lists.linux.dev
Description:
The /sys/fs/ocfs2/cluster_stack file contains the name
of current ocfs2 cluster stack. This value is set by
@@ -86,4 +86,4 @@ Description:
stack return an error.
Users:
- ocfs2-tools <ocfs2-tools-devel@oss.oracle.com>
+ ocfs2-tools <ocfs2-tools-devel@lists.linux.dev>
diff --git a/Documentation/admin-guide/acpi/ssdt-overlays.rst b/Documentation/admin-guide/acpi/ssdt-overlays.rst
index b5fbf54dca19..5ea9f4a3b76e 100644
--- a/Documentation/admin-guide/acpi/ssdt-overlays.rst
+++ b/Documentation/admin-guide/acpi/ssdt-overlays.rst
@@ -103,7 +103,7 @@ allows a persistent, OS independent way of storing the user defined SSDTs. There
is also work underway to implement EFI support for loading user defined SSDTs
and using this method will make it easier to convert to the EFI loading
mechanism when that will arrive. To enable it, the
-CONFIG_EFI_CUSTOM_SSDT_OVERLAYS shoyld be chosen to y.
+CONFIG_EFI_CUSTOM_SSDT_OVERLAYS should be chosen to y.
In order to load SSDTs from an EFI variable the ``"efivar_ssdt=..."`` kernel
command line parameter can be used (the name has a limitation of 16 characters).
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 85fb0fa5d091..a1457995fd41 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4064,7 +4064,7 @@
extra details on the taint flags that users can pick
to compose the bitmask to assign to panic_on_taint.
- panic_on_warn panic() instead of WARN(). Useful to cause kdump
+ panic_on_warn=1 panic() instead of WARN(). Useful to cause kdump
on a WARN().
parkbd.port= [HW] Parallel port number the keyboard adapter is
diff --git a/Documentation/admin-guide/media/rkisp1.rst b/Documentation/admin-guide/media/rkisp1.rst
index ccf418713623..6f14d9561fa5 100644
--- a/Documentation/admin-guide/media/rkisp1.rst
+++ b/Documentation/admin-guide/media/rkisp1.rst
@@ -10,8 +10,8 @@ Introduction
============
This file documents the driver for the Rockchip ISP1 that is part of RK3288
-and RK3399 SoCs. The driver is located under drivers/staging/media/rkisp1
-and uses the Media-Controller API.
+and RK3399 SoCs. The driver is located under drivers/media/platform/rockchip/
+rkisp1 and uses the Media-Controller API.
Revisions
=========
diff --git a/Documentation/admin-guide/perf/cxl.rst b/Documentation/admin-guide/perf/cxl.rst
new file mode 100644
index 000000000000..9233ea0d0b10
--- /dev/null
+++ b/Documentation/admin-guide/perf/cxl.rst
@@ -0,0 +1,68 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+CXL Performance Monitoring Unit (CPMU)
+======================================
+
+The CXL rev 3.0 specification provides a definition of CXL Performance
+Monitoring Unit in section 13.2: Performance Monitoring.
+
+CXL components (e.g. Root Port, Switch Upstream Port, End Point) may have
+any number of CPMU instances. CPMU capabilities are fully discoverable from
+the devices. The specification provides event definitions for all CXL protocol
+message types and a set of additional events for things commonly counted on
+CXL devices (e.g. DRAM events).
+
+CPMU driver
+===========
+
+The CPMU driver registers a perf PMU with the name pmu_mem<X>.<Y> on the CXL bus
+representing the Yth CPMU for memX.
+
+ /sys/bus/cxl/device/pmu_mem<X>.<Y>
+
+The associated PMU is registered as
+
+ /sys/bus/event_sources/devices/cxl_pmu_mem<X>.<Y>
+
+In common with other CXL bus devices, the id has no specific meaning and the
+relationship to specific CXL device should be established via the device parent
+of the device on the CXL bus.
+
+PMU driver provides description of available events and filter options in sysfs.
+
+The "format" directory describes all formats of the config (event vendor id,
+group id and mask) config1 (threshold, filter enables) and config2 (filter
+parameters) fields of the perf_event_attr structure. The "events" directory
+describes all documented events show in perf list.
+
+The events shown in perf list are the most fine grained events with a single
+bit of the event mask set. More general events may be enable by setting
+multiple mask bits in config. For example, all Device to Host Read Requests
+may be captured on a single counter by setting the bits for all of
+
+* d2h_req_rdcurr
+* d2h_req_rdown
+* d2h_req_rdshared
+* d2h_req_rdany
+* d2h_req_rdownnodata
+
+Example of usage::
+
+ $#perf list
+ cxl_pmu_mem0.0/clock_ticks/ [Kernel PMU event]
+ cxl_pmu_mem0.0/d2h_req_rdshared/ [Kernel PMU event]
+ cxl_pmu_mem0.0/h2d_req_snpcur/ [Kernel PMU event]
+ cxl_pmu_mem0.0/h2d_req_snpdata/ [Kernel PMU event]
+ cxl_pmu_mem0.0/h2d_req_snpinv/ [Kernel PMU event]
+ -----------------------------------------------------------
+
+ $# perf stat -a -e cxl_pmu_mem0.0/clock_ticks/ -e cxl_pmu_mem0.0/d2h_req_rdshared/
+
+Vendor specific events may also be available and if so can be used via
+
+ $# perf stat -a -e cxl_pmu_mem0.0/vid=VID,gid=GID,mask=MASK/
+
+The driver does not support sampling so "perf record" is unsupported.
+It only supports system-wide counting so attaching to a task is
+unsupported.
diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst
index 9de64a40adab..f60be04e4e33 100644
--- a/Documentation/admin-guide/perf/index.rst
+++ b/Documentation/admin-guide/perf/index.rst
@@ -21,3 +21,4 @@ Performance monitor support
alibaba_pmu
nvidia-pmu
meson-ddr-pmu
+ cxl
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index f093a9d8bc5c..496cdca5cb99 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -52,6 +52,9 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-cti.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-cti.yaml
index 0c5b875cb654..d6c84b6e7fe6 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-cti.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-cti.yaml
@@ -287,7 +287,7 @@ examples:
arm,trig-in-sigs = <0 1>;
arm,trig-in-types = <PE_DBGTRIGGER
PE_PMUIRQ>;
- arm,trig-out-sigs=<0 1 2 >;
+ arm,trig-out-sigs = <0 1 2 >;
arm,trig-out-types = <PE_EDBGREQ
PE_DBGRESTART
PE_CTIIRQ>;
@@ -309,24 +309,24 @@ examples:
trig-conns@0 {
reg = <0>;
- arm,trig-in-sigs=<0>;
- arm,trig-in-types=<GEN_INTREQ>;
- arm,trig-out-sigs=<0>;
- arm,trig-out-types=<GEN_HALTREQ>;
+ arm,trig-in-sigs = <0>;
+ arm,trig-in-types = <GEN_INTREQ>;
+ arm,trig-out-sigs = <0>;
+ arm,trig-out-types = <GEN_HALTREQ>;
arm,trig-conn-name = "sys_profiler";
};
trig-conns@1 {
reg = <1>;
- arm,trig-out-sigs=<2 3>;
- arm,trig-out-types=<GEN_HALTREQ GEN_RESTARTREQ>;
+ arm,trig-out-sigs = <2 3>;
+ arm,trig-out-types = <GEN_HALTREQ GEN_RESTARTREQ>;
arm,trig-conn-name = "watchdog";
};
trig-conns@2 {
reg = <2>;
- arm,trig-in-sigs=<1 6>;
- arm,trig-in-types=<GEN_HALTREQ GEN_RESTARTREQ>;
+ arm,trig-in-sigs = <1 6>;
+ arm,trig-in-types = <GEN_HALTREQ GEN_RESTARTREQ>;
arm,trig-conn-name = "g_counter";
};
};
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
new file mode 100644
index 000000000000..cb78cfa56702
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,coresight-dummy-sink.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Coresight Dummy sink component
+
+description: |
+ CoreSight components are compliant with the ARM CoreSight architecture
+ specification and can be connected in various topologies to suit a particular
+ SoCs tracing needs. These trace components can generally be classified as
+ sinks, links and sources. Trace data produced by one or more sources flows
+ through the intermediate links connecting the source to the currently selected
+ sink.
+
+ The Coresight dummy sink component is for the specific coresight sink devices
+ kernel don't have permission to access or configure, e.g., CoreSight EUD on
+ Qualcomm platforms. It is a mini-USB hub implemented to support the USB-based
+ debug and trace capabilities. For this device, a dummy driver is needed to
+ register it as Coresight sink device in kernel side, so that path can be
+ created in the driver. Then the trace flow would be transferred to EUD via
+ coresight link of AP processor. It provides Coresight API for operations on
+ dummy source devices, such as enabling and disabling them. It also provides
+ the Coresight dummy source paths for debugging.
+
+ The primary use case of the coresight dummy sink is to build path in kernel
+ side for dummy sink component.
+
+maintainers:
+ - Mike Leach <mike.leach@linaro.org>
+ - Suzuki K Poulose <suzuki.poulose@arm.com>
+ - James Clark <james.clark@arm.com>
+ - Mao Jinlong <quic_jinlmao@quicinc.com>
+ - Hao Zhang <quic_hazha@quicinc.com>
+
+properties:
+ compatible:
+ enum:
+ - arm,coresight-dummy-sink
+
+ in-ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port:
+ description: Input connection from the Coresight Trace bus to
+ dummy sink, such as Embedded USB debugger(EUD).
+
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - in-ports
+
+additionalProperties: false
+
+examples:
+ # Minimum dummy sink definition. Dummy sink connect to coresight replicator.
+ - |
+ sink {
+ compatible = "arm,coresight-dummy-sink";
+
+ in-ports {
+ port {
+ eud_in_replicator_swao: endpoint {
+ remote-endpoint = <&replicator_swao_out_eud>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
new file mode 100644
index 000000000000..5fedaed49a1f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,coresight-dummy-source.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Coresight Dummy source component
+
+description: |
+ CoreSight components are compliant with the ARM CoreSight architecture
+ specification and can be connected in various topologies to suit a particular
+ SoCs tracing needs. These trace components can generally be classified as
+ sinks, links and sources. Trace data produced by one or more sources flows
+ through the intermediate links connecting the source to the currently selected
+ sink.
+
+ The Coresight dummy source component is for the specific coresight source
+ devices kernel don't have permission to access or configure. For some SOCs,
+ there would be Coresight source trace components on sub-processor which
+ are conneted to AP processor via debug bus. For these devices, a dummy driver
+ is needed to register them as Coresight source devices, so that paths can be
+ created in the driver. It provides Coresight API for operations on dummy
+ source devices, such as enabling and disabling them. It also provides the
+ Coresight dummy source paths for debugging.
+
+ The primary use case of the coresight dummy source is to build path in kernel
+ side for dummy source component.
+
+maintainers:
+ - Mike Leach <mike.leach@linaro.org>
+ - Suzuki K Poulose <suzuki.poulose@arm.com>
+ - James Clark <james.clark@arm.com>
+ - Mao Jinlong <quic_jinlmao@quicinc.com>
+ - Hao Zhang <quic_hazha@quicinc.com>
+
+properties:
+ compatible:
+ enum:
+ - arm,coresight-dummy-source
+
+ out-ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port:
+ description: Output connection from the source to Coresight
+ Trace bus.
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - out-ports
+
+additionalProperties: false
+
+examples:
+ # Minimum dummy source definition. Dummy source connect to coresight funnel.
+ - |
+ source {
+ compatible = "arm,coresight-dummy-source";
+
+ out-ports {
+ port {
+ dummy_riscv_out_funnel_swao: endpoint {
+ remote-endpoint = <&funnel_swao_in_dummy_riscv>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index ab1b352344ae..67a66bf74895 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -52,100 +52,6 @@ Example:
reg = <0xe3804000 0x1000>;
};
-SHDWC Shutdown Controller
-
-required properties:
-- compatible: Should be "atmel,<chip>-shdwc".
- <chip> can be "at91sam9260", "at91sam9rl" or "at91sam9x5".
-- reg: Should contain registers location and length
-- clocks: phandle to input clock.
-
-optional properties:
-- atmel,wakeup-mode: String, operation mode of the wakeup mode.
- Supported values are: "none", "high", "low", "any".
-- atmel,wakeup-counter: Counter on Wake-up 0 (between 0x0 and 0xf).
-
-optional at91sam9260 properties:
-- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
-
-optional at91sam9rl properties:
-- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
-- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
-
-optional at91sam9x5 properties:
-- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
-
-Example:
-
- shdwc@fffffd10 {
- compatible = "atmel,at91sam9260-shdwc";
- reg = <0xfffffd10 0x10>;
- clocks = <&clk32k>;
- };
-
-SHDWC SAMA5D2-Compatible Shutdown Controller
-
-1) shdwc node
-
-required properties:
-- compatible: should be "atmel,sama5d2-shdwc", "microchip,sam9x60-shdwc" or
- "microchip,sama7g5-shdwc"
-- reg: should contain registers location and length
-- clocks: phandle to input clock.
-- #address-cells: should be one. The cell is the wake-up input index.
-- #size-cells: should be zero.
-
-optional properties:
-
-- debounce-delay-us: minimum wake-up inputs debouncer period in
- microseconds. It's usually a board-related property.
-- atmel,wakeup-rtc-timer: boolean to enable Real-Time Clock wake-up.
-
-optional microchip,sam9x60-shdwc or microchip,sama7g5-shdwc properties:
-- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
-
-The node contains child nodes for each wake-up input that the platform uses.
-
-2) input nodes
-
-Wake-up input nodes are usually described in the "board" part of the Device
-Tree. Note also that input 0 is linked to the wake-up pin and is frequently
-used.
-
-Required properties:
-- reg: should contain the wake-up input index [0 - 15].
-
-Optional properties:
-- atmel,wakeup-active-high: boolean, the corresponding wake-up input described
- by the child, forces the wake-up of the core power supply on a high level.
- The default is to be active low.
-
-Example:
-
-On the SoC side:
- shdwc@f8048010 {
- compatible = "atmel,sama5d2-shdwc";
- reg = <0xf8048010 0x10>;
- clocks = <&clk32k>;
- #address-cells = <1>;
- #size-cells = <0>;
- atmel,wakeup-rtc-timer;
- };
-
-On the board side:
- shdwc@f8048010 {
- debounce-delay-us = <976>;
-
- input@0 {
- reg = <0>;
- };
-
- input@1 {
- reg = <1>;
- atmel,wakeup-active-high;
- };
- };
-
Special Function Registers (SFR)
Special Function Registers (SFR) manage specific aspects of the integrated
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
index 91b96065f7df..86b59de7707e 100644
--- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
@@ -96,8 +96,8 @@ examples:
compatible = "ti,k2g-sci";
ti,system-reboot-controller;
mbox-names = "rx", "tx";
- mboxes= <&msgmgr 5 2>,
- <&msgmgr 0 0>;
+ mboxes = <&msgmgr 5 2>,
+ <&msgmgr 0 0>;
reg-names = "debug_messages";
reg = <0x02921800 0x800>;
};
@@ -107,8 +107,8 @@ examples:
compatible = "ti,k2g-sci";
ti,host-id = <12>;
mbox-names = "rx", "tx";
- mboxes= <&secure_proxy_main 11>,
- <&secure_proxy_main 13>;
+ mboxes = <&secure_proxy_main 11>,
+ <&secure_proxy_main 13>;
reg-names = "debug_messages";
reg = <0x44083000 0x1000>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
index 659669bf224b..9436266828af 100644
--- a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,ipq5332-a53pll
- qcom,ipq6018-a53pll
- qcom,ipq8074-a53pll
+ - qcom,ipq9574-a73pll
- qcom,msm8916-a53pll
- qcom,msm8939-a53pll
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8953.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8953.yaml
new file mode 100644
index 000000000000..fe9fd4cb185f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8953.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-msm8953.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller on MSM8953
+
+maintainers:
+ - Adam Skladowski <a_skl39@protonmail.com>
+ - Sireesh Kodali <sireeshkodali@protonmail.com>
+
+description: |
+ Qualcomm global clock control module provides the clocks, resets and power
+ domains on MSM8953.
+
+ See also: include/dt-bindings/clock/qcom,gcc-msm8953.h
+
+properties:
+ compatible:
+ const: qcom,gcc-msm8953
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+ - description: Byte clock from DSI PHY0
+ - description: Pixel clock from DSI PHY0
+ - description: Byte clock from DSI PHY1
+ - description: Pixel clock from DSI PHY1
+
+ clock-names:
+ items:
+ - const: xo
+ - const: sleep
+ - const: dsi0pll
+ - const: dsi0pllbyte
+ - const: dsi1pll
+ - const: dsi1pllbyte
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ clock-controller@1800000 {
+ compatible = "qcom,gcc-msm8953";
+ reg = <0x01800000 0x80000>;
+ clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ <&sleep_clk>,
+ <&dsi0_phy 1>,
+ <&dsi0_phy 0>,
+ <&dsi1_phy 1>,
+ <&dsi1_phy 0>;
+ clock-names = "xo",
+ "sleep",
+ "dsi0pll",
+ "dsi0pllbyte",
+ "dsi1pll",
+ "dsi1pllbyte";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
index ae01e7749534..ba969e7a57bf 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
@@ -30,7 +30,6 @@ properties:
enum:
- qcom,gcc-ipq6018
- qcom,gcc-mdm9607
- - qcom,gcc-msm8953
- qcom,gcc-mdm9615
required:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
index 06dce0c6b7d0..8bf9b6f49550 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
@@ -32,6 +32,10 @@ properties:
- const: bi_tcxo_ao
- const: sleep_clk
+ power-domains:
+ items:
+ - description: CX domain
+
required:
- compatible
- clocks
@@ -45,6 +49,8 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
clock-controller@100000 {
compatible = "qcom,gcc-sc7180";
reg = <0x00100000 0x1f0000>;
@@ -52,6 +58,7 @@ examples:
<&rpmhcc RPMH_CXO_CLK_A>,
<&sleep_clk>;
clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
+ power-domains = <&rpmhpd SC7180_CX>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
index 947b47168cec..ff0b18bbb0fc 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
@@ -43,6 +43,10 @@ properties:
- const: ufs_phy_tx_symbol_0_clk
- const: usb3_phy_wrapper_gcc_usb30_pipe_clk
+ power-domains:
+ items:
+ - description: CX domain
+
required:
- compatible
- clocks
@@ -56,6 +60,8 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
clock-controller@100000 {
compatible = "qcom,gcc-sc7280";
reg = <0x00100000 0x1f0000>;
@@ -71,6 +77,7 @@ examples:
"pcie_1_pipe_clk", "ufs_phy_rx_symbol_0_clk",
"ufs_phy_rx_symbol_1_clk", "ufs_phy_tx_symbol_0_clk",
"usb3_phy_wrapper_gcc_usb30_pipe_clk";
+ power-domains = <&rpmhpd SC7280_CX>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
index b752542ee20c..ead6665b9a45 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
@@ -23,11 +23,13 @@ properties:
clocks:
items:
- description: Board XO source
+ - description: Board active XO source
- description: Sleep clock source
clock-names:
items:
- const: bi_tcxo
+ - const: bi_tcxo_ao
- const: sleep_clk
required:
@@ -47,8 +49,9 @@ examples:
compatible = "qcom,gcc-sm8250";
reg = <0x00100000 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
<&sleep_clk>;
- clock-names = "bi_tcxo", "sleep_clk";
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml
index 1e3dc9deded9..a00216b3b15a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml
@@ -50,6 +50,9 @@ properties:
- const: gcc_gpu_gpll0_clk_src
- const: gcc_gpu_gpll0_div_clk_src
+ power-domains:
+ maxItems: 1
+
'#clock-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
index afc68eb9d7cc..944a0ea79cd6 100644
--- a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
@@ -7,6 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller on IPQ9574
maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
- Anusha Rao <quic_anusha@quicinc.com>
description: |
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index acf0c923c24f..422f5776a771 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -31,11 +31,11 @@ properties:
- qcom,mmcc-sdm660
clocks:
- minItems: 8
+ minItems: 7
maxItems: 13
clock-names:
- minItems: 8
+ minItems: 7
maxItems: 13
'#clock-cells':
@@ -104,6 +104,34 @@ allOf:
compatible:
contains:
enum:
+ - qcom,mmcc-msm8226
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Board XO source
+ - description: MMSS GPLL0 voted clock
+ - description: GPLL0 voted clock
+ - description: GPLL1 voted clock
+ - description: GFX3D clock source
+ - description: DSI phy instance 0 dsi clock
+ - description: DSI phy instance 0 byte clock
+
+ clock-names:
+ items:
+ - const: xo
+ - const: mmss_gpll0_vote
+ - const: gpll0_vote
+ - const: gpll1_vote
+ - const: gfx3d_clk_src
+ - const: dsi0pll
+ - const: dsi0pllbyte
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,mmcc-msm8974
then:
properties:
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index d5a250b7c2af..267cf8c26823 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -27,6 +27,7 @@ properties:
- qcom,sdm845-rpmh-clk
- qcom,sdx55-rpmh-clk
- qcom,sdx65-rpmh-clk
+ - qcom,sdx75-rpmh-clk
- qcom,sm6350-rpmh-clk
- qcom,sm8150-rpmh-clk
- qcom,sm8250-rpmh-clk
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
index b480ead5bd69..cf4cad76f6c9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm6375-gpucc.yaml
@@ -27,9 +27,21 @@ properties:
- description: GPLL0 div branch source
- description: SNoC DVM GFX source
+ power-domains:
+ description:
+ A phandle and PM domain specifier for the VDD_GX power rail
+ maxItems: 1
+
+ required-opps:
+ description:
+ A phandle to an OPP node describing required VDD_GX performance point.
+ maxItems: 1
+
required:
- compatible
- clocks
+ - power-domains
+ - required-opps
allOf:
- $ref: qcom,gcc.yaml#
@@ -40,6 +52,7 @@ examples:
- |
#include <dt-bindings/clock/qcom,sm6375-gcc.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
soc {
#address-cells = <2>;
@@ -52,6 +65,8 @@ examples:
<&gcc GCC_GPU_GPLL0_CLK_SRC>,
<&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>,
<&gcc GCC_GPU_SNOC_DVM_GFX_CLK>;
+ power-domains = <&rpmpd SM6375_VDDGX>;
+ required-opps = <&rpmpd_opp_low_svs>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8350-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8350-videocc.yaml
new file mode 100644
index 000000000000..23505c8c3dbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8350-videocc.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sm8350-videocc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8350 Video Clock & Reset Controller
+
+maintainers:
+ - Konrad Dybcio <konrad.dybcio@linaro.org>
+
+description: |
+ Qualcomm video clock control module provides the clocks, resets and power
+ domains on Qualcomm SoCs.
+
+ See also::
+ include/dt-bindings/clock/qcom,videocc-sm8350.h
+ include/dt-bindings/reset/qcom,videocc-sm8350.h
+
+properties:
+ compatible:
+ const: qcom,sm8350-videocc
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Board sleep clock
+
+ power-domains:
+ description:
+ A phandle and PM domain specifier for the MMCX power domain.
+ maxItems: 1
+
+ required-opps:
+ description:
+ A phandle to an OPP node describing required MMCX performance point.
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - power-domains
+ - required-opps
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ clock-controller@abf0000 {
+ compatible = "qcom,sm8350-videocc";
+ reg = <0x0abf0000 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd SM8350_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
index fe1fda77b835..f1c6dd50f184 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
@@ -17,7 +17,9 @@ description: |
properties:
compatible:
- const: qcom,sm8450-videocc
+ enum:
+ - qcom,sm8450-videocc
+ - qcom,sm8550-videocc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
index 6f5e7904181f..7e1bb992ce90 100644
--- a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
@@ -28,6 +28,7 @@ select:
- qcom,apq8064
- qcom,apq8096
- qcom,ipq8064
+ - qcom,ipq8074
- qcom,msm8939
- qcom,msm8960
- qcom,msm8974
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml
index 5fc4106110ad..d65926b4f054 100644
--- a/Documentation/devicetree/bindings/display/msm/gmu.yaml
+++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml
@@ -243,7 +243,7 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
gmu: gmu@506a000 {
- compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
+ compatible = "qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
index 1cdc91b3439f..200fbf1c74a0 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa0.yaml
@@ -74,7 +74,7 @@ examples:
vdd3-supply = <&vcclcd_reg>;
vci-supply = <&vlcd_reg>;
reset-gpios = <&gpy4 5 0>;
- power-on-delay= <50>;
+ power-on-delay = <50>;
reset-delay = <100>;
init-delay = <100>;
panel-width-mm = <58>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
index 6f43d885c9b3..df61cb5f5c54 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
@@ -121,11 +121,11 @@ examples:
#size-cells = <0>;
vopb_out_edp: endpoint@0 {
reg = <0>;
- remote-endpoint=<&edp_in_vopb>;
+ remote-endpoint = <&edp_in_vopb>;
};
vopb_out_hdmi: endpoint@1 {
reg = <1>;
- remote-endpoint=<&hdmi_in_vopb>;
+ remote-endpoint = <&hdmi_in_vopb>;
};
};
};
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml
index a42bf33d1e7d..2181855a0920 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml
@@ -73,6 +73,18 @@ properties:
avdd-dsi-csi-supply:
description: DSI/CSI power supply. Must supply 1.2 V.
+ vip:
+ $ref: /schemas/display/tegra/nvidia,tegra20-vip.yaml
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input from the VIP (parallel input capture) module
+
patternProperties:
"^csi@[0-9a-f]+$":
type: object
@@ -108,6 +120,22 @@ examples:
#include <dt-bindings/clock/tegra20-car.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ camera@48 {
+ compatible = "aptina,mt9v111";
+ reg = <0x48>;
+ clocks = <&camera_clk>;
+
+ port {
+ mt9v111_out: endpoint {
+ remote-endpoint = <&vi_vip_in>;
+ };
+ };
+ };
+ };
+
vi@54080000 {
compatible = "nvidia,tegra20-vi";
reg = <0x54080000 0x00040000>;
@@ -115,6 +143,37 @@ examples:
clocks = <&tegra_car TEGRA20_CLK_VI>;
resets = <&tegra_car 100>;
reset-names = "vi";
+
+ vip {
+ compatible = "nvidia,tegra20-vip";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ vi_vip_in: endpoint {
+ remote-endpoint = <&mt9v111_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ vi_vip_out: endpoint {
+ remote-endpoint = <&vi_in>;
+ };
+ };
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ vi_in: endpoint {
+ remote-endpoint = <&vi_vip_out>;
+ };
+ };
+ };
};
- |
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vip.yaml b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vip.yaml
new file mode 100644
index 000000000000..14294edb8d8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vip.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/tegra/nvidia,tegra20-vip.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra VIP (parallel video capture) controller
+
+maintainers:
+ - Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra20-vip
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port receiving the video stream from the sensor
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port sending the video stream to the VI
+
+ required:
+ - port@0
+ - port@1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - ports
+
+# see nvidia,tegra20-vi.yaml for an example
diff --git a/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml b/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml
index 64845347f44d..1e5752b19a49 100644
--- a/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml
+++ b/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml
@@ -112,14 +112,23 @@ properties:
- const: stericsson,dma40
reg:
- items:
- - description: DMA40 memory base
- - description: LCPA memory base
+ oneOf:
+ - items:
+ - description: DMA40 memory base
+ - items:
+ - description: DMA40 memory base
+ - description: LCPA memory base, deprecated, use eSRAM pool instead
+ deprecated: true
+
reg-names:
- items:
- - const: base
- - const: lcpa
+ oneOf:
+ - items:
+ - const: base
+ - items:
+ - const: base
+ - const: lcpa
+ deprecated: true
interrupts:
maxItems: 1
@@ -127,6 +136,15 @@ properties:
clocks:
maxItems: 1
+ sram:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: A phandle array with inner size 1 (no arg cells).
+ First phandle is the LCPA (Logical Channel Parameter Address) memory.
+ Second phandle is the LCLA (Logical Channel Link base Address) memory.
+ maxItems: 2
+ items:
+ maxItems: 1
+
memcpy-channels:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Array of u32 elements indicating which channels on the DMA
@@ -138,6 +156,7 @@ required:
- reg
- interrupts
- clocks
+ - sram
- memcpy-channels
additionalProperties: false
@@ -149,8 +168,9 @@ examples:
#include <dt-bindings/mfd/dbx500-prcmu.h>
dma-controller@801c0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
- reg = <0x801c0000 0x1000>, <0x40010000 0x800>;
- reg-names = "base", "lcpa";
+ reg = <0x801c0000 0x1000>;
+ reg-names = "base";
+ sram = <&lcpa>, <&lcla>;
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
index beecfe7a1732..4ca300a42a99 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
@@ -33,6 +33,7 @@ properties:
enum:
- ti,am62a-dmss-bcdma-csirx
- ti,am64-dmss-bcdma
+ - ti,j721s2-dmss-bcdma-csi
reg:
minItems: 3
@@ -151,7 +152,12 @@ allOf:
required:
- power-domains
- else:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,am64-dmss-bcdma
+ then:
properties:
reg:
minItems: 5
@@ -168,6 +174,28 @@ allOf:
- ti,sci-rm-range-bchan
- ti,sci-rm-range-tchan
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,j721s2-dmss-bcdma-csi
+ then:
+ properties:
+ ti,sci-rm-range-bchan: false
+
+ reg:
+ maxItems: 4
+
+ reg-names:
+ items:
+ - const: gcfg
+ - const: rchanrt
+ - const: tchanrt
+ - const: ringrt
+
+ required:
+ - ti,sci-rm-range-tchan
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml
index d6cbd95ec26d..2128f4645c98 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml
+++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml
@@ -41,6 +41,9 @@ properties:
clock-names:
const: axi_clk
+ power-domains:
+ maxItems: 1
+
required:
- "#dma-cells"
- compatible
@@ -48,12 +51,14 @@ required:
- interrupts
- clocks
- clock-names
+ - power-domains
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/xlnx-zynqmp-power.h>
dma: dma-controller@fd4c0000 {
compatible = "xlnx,zynqmp-dpdma";
@@ -63,6 +68,7 @@ examples:
clocks = <&dpdma_clk>;
clock-names = "axi_clk";
#dma-cells = <1>;
+ power-domains = <&zynqmp_firmware PD_DP>;
};
...
diff --git a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
index 6a9c96f0352a..2c8cf6aab19a 100644
--- a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
+++ b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
@@ -27,10 +27,14 @@ properties:
interrupt-names:
minItems: 1
- items:
- - const: usb_id
- - const: usb_vbus
-
+ anyOf:
+ - items:
+ - const: usb_id
+ - const: usb_vbus
+ - items:
+ - const: usb_id
+ - items:
+ - const: usb_vbus
required:
- compatible
- reg
@@ -49,7 +53,7 @@ examples:
interrupt-controller;
#interrupt-cells = <4>;
- usb_id: misc@900 {
+ usb_id: usb-detect@900 {
compatible = "qcom,pm8941-misc";
reg = <0x900>;
interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
diff --git a/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
index efdf59abb2e1..351b202d0e10 100644
--- a/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
+++ b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
@@ -23,7 +23,7 @@ properties:
headphone detect mode to HPDETL, ARIZONA_ACCDET_MODE_HPR/2 sets it
to HPDETR. If this node is not included or if the value is unknown,
then headphone detection mode is set to HPDETL.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 2
@@ -51,7 +51,7 @@ properties:
description:
Additional software microphone detection debounce specified in
milliseconds.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
wlf,micd-pol-gpio:
description:
@@ -63,7 +63,7 @@ properties:
description:
Time allowed for MICBIAS to startup prior to performing microphone
detection, specified as per the ARIZONA_MICD_TIME_XXX defines.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 12
@@ -71,7 +71,7 @@ properties:
description:
Delay between successive microphone detection measurements, specified
as per the ARIZONA_MICD_TIME_XXX defines.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 12
@@ -79,7 +79,7 @@ properties:
description:
Microphone detection hardware debounces specified as the number of
measurements to take.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [2, 4]
wlf,micd-timeout-ms:
@@ -97,7 +97,7 @@ properties:
CTIA / OMTP headsets), the field can be of variable length but
should always be a multiple of 3 cells long, each three cell group
represents one polarity configuration.
- $ref: "/schemas/types.yaml#/definitions/uint32-matrix"
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
items:
items:
- description:
@@ -119,7 +119,7 @@ properties:
description:
Settings for the general purpose switch, set as one of the
ARIZONA_GPSW_XXX defines.
- $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 3
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index d521d516088b..16def2985ab4 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -47,6 +47,9 @@ properties:
avdd-supply:
description: AVdd voltage supply
+ vref-supply:
+ description: VRef voltage supply
+
adi,rejection-60-Hz-enable:
description: |
This bit enables a notch at 60 Hz when the first notch of the sinc
@@ -89,6 +92,7 @@ required:
- interrupts
- dvdd-supply
- avdd-supply
+ - vref-supply
- spi-cpol
- spi-cpha
@@ -115,6 +119,7 @@ examples:
interrupt-parent = <&gpio>;
dvdd-supply = <&dvdd>;
avdd-supply = <&avdd>;
+ vref-supply = <&vref>;
adi,refin2-pins-enable;
adi,rejection-60-Hz-enable;
diff --git a/Documentation/devicetree/bindings/iio/adc/mediatek,mt2701-auxadc.yaml b/Documentation/devicetree/bindings/iio/adc/mediatek,mt2701-auxadc.yaml
index 7f79a06e76f5..6168b44ea72c 100644
--- a/Documentation/devicetree/bindings/iio/adc/mediatek,mt2701-auxadc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/mediatek,mt2701-auxadc.yaml
@@ -26,6 +26,7 @@ properties:
- mediatek,mt2712-auxadc
- mediatek,mt6765-auxadc
- mediatek,mt7622-auxadc
+ - mediatek,mt7986-auxadc
- mediatek,mt8173-auxadc
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
index bd6e0d6f6e0c..ad7d6fc49de5 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
@@ -54,7 +54,7 @@ required:
- '#io-channel-cells'
patternProperties:
- "^.*@[0-9a-f]+$":
+ "^channel@[0-9a-f]+$":
type: object
additionalProperties: false
description: |
@@ -101,7 +101,7 @@ patternProperties:
oneOf:
- items:
- const: 1
- - enum: [ 1, 3, 4, 6, 20, 8, 10 ]
+ - enum: [ 1, 3, 4, 6, 20, 8, 10, 16 ]
- items:
- const: 10
- const: 81
@@ -148,7 +148,7 @@ allOf:
then:
patternProperties:
- "^.*@[0-9a-f]+$":
+ "^channel@[0-9a-f]+$":
properties:
qcom,decimation:
enum: [ 512, 1024, 2048, 4096 ]
@@ -171,7 +171,7 @@ allOf:
then:
patternProperties:
- "^.*@[0-9a-f]+$":
+ "^channel@[0-9a-f]+$":
properties:
qcom,decimation:
enum: [ 256, 512, 1024 ]
@@ -194,7 +194,7 @@ allOf:
then:
patternProperties:
- "^.*@[0-9a-f]+$":
+ "^channel@[0-9a-f]+$":
properties:
qcom,decimation:
enum: [ 250, 420, 840 ]
@@ -217,7 +217,7 @@ allOf:
then:
patternProperties:
- "^.*@[0-9a-f]+$":
+ "^channel@[0-9a-f]+$":
properties:
qcom,decimation:
enum: [ 85, 340, 1360 ]
@@ -249,7 +249,7 @@ examples:
#io-channel-cells = <1>;
/* Channel node */
- adc-chan@39 {
+ channel@39 {
reg = <0x39>;
qcom,decimation = <512>;
qcom,ratiometric;
@@ -258,19 +258,19 @@ examples:
qcom,pre-scaling = <1 3>;
};
- adc-chan@9 {
+ channel@9 {
reg = <0x9>;
};
- adc-chan@a {
+ channel@a {
reg = <0xa>;
};
- adc-chan@e {
+ channel@e {
reg = <0xe>;
};
- adc-chan@f {
+ channel@f {
reg = <0xf>;
};
};
@@ -292,16 +292,18 @@ examples:
#io-channel-cells = <1>;
/* Other properties are omitted */
- xo-therm@44 {
+ channel@44 {
reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
qcom,ratiometric;
qcom,hw-settle-time = <200>;
+ label = "xo_therm";
};
- conn-therm@47 {
+ channel@47 {
reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
qcom,ratiometric;
qcom,hw-settle-time = <200>;
+ label = "conn_therm";
};
};
};
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
index da50b529c157..aa24b841393c 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
@@ -15,6 +15,7 @@ properties:
- const: rockchip,saradc
- const: rockchip,rk3066-tsadc
- const: rockchip,rk3399-saradc
+ - const: rockchip,rk3588-saradc
- items:
- enum:
- rockchip,px30-saradc
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml b/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
index 9b072b057f16..a60b1e100ee4 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
@@ -35,7 +35,7 @@ unevaluatedProperties: false
examples:
- |
spi {
- #address-cells= <1>;
+ #address-cells = <1>;
#size-cells = <0>;
adc@0 {
diff --git a/Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml b/Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml
index df2589f214e1..dddf97b50549 100644
--- a/Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml
+++ b/Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml
@@ -13,7 +13,7 @@ description: |
When an io-channel measures the midpoint of a voltage divider, the
interesting voltage is often the voltage over the full resistance
of the divider. This binding describes the voltage divider in such
- a curcuit.
+ a circuit.
Vin ----.
|
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
index ec64d7877fe5..1db6952ddca5 100644
--- a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -31,6 +31,9 @@ properties:
- invensense,mpu9250
- invensense,mpu9255
- items:
+ - const: invensense,icm20600
+ - const: invensense,icm20602
+ - items:
- const: invensense,icm20608d
- const: invensense,icm20608
diff --git a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
index b39f5217d8ff..ee8724ad33ab 100644
--- a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
@@ -98,6 +98,7 @@ required:
- reg
allOf:
+ - $ref: /schemas/iio/iio.yaml#
- $ref: /schemas/spi/spi-peripheral-props.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml
new file mode 100644
index 000000000000..4f66fd47b016
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/rohm,bu27008.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BU27008 color sensor
+
+maintainers:
+ - Matti Vaittinen <mazziesaccount@gmail.com>
+
+description:
+ The ROHM BU27008 is a sensor with 5 photodiodes (red, green, blue, clear
+ and IR) with four configurable channels. Red and green being always
+ available and two out of the rest three (blue, clear, IR) can be
+ selected to be simultaneously measured. Typical application is adjusting
+ LCD backlight of TVs, mobile phones and tablet PCs.
+
+properties:
+ compatible:
+ const: rohm,bu27008
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@38 {
+ compatible = "rohm,bu27008";
+ reg = <0x38>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/light/ti,opt4001.yaml b/Documentation/devicetree/bindings/iio/light/ti,opt4001.yaml
new file mode 100644
index 000000000000..12b0c7ed5d72
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/ti,opt4001.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/ti,opt4001.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments OPT4001 Ambient Light Sensor
+
+maintainers:
+ - Stefan Windfeldt-Prytz <stefan.windfeldt-prytz@axis.com>
+
+description:
+ Ambient light sensor with an i2c interface.
+ Last part of compatible is for the packaging used.
+ Picostar is a 4 pinned SMT and sot-5x3 is a 8 pinned SOT.
+ https://www.ti.com/lit/gpn/opt4001
+
+properties:
+ compatible:
+ enum:
+ - ti,opt4001-picostar
+ - ti,opt4001-sot-5x3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that provides power to the sensor
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,opt4001-sot-5x3
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ else:
+ properties:
+ interrupts: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@44 {
+ compatible = "ti,opt4001-sot-5x3";
+ reg = <0x44>;
+ vdd-supply = <&vdd_reg>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/renesas,x9250.yaml b/Documentation/devicetree/bindings/iio/potentiometer/renesas,x9250.yaml
new file mode 100644
index 000000000000..ab5c09c00ff4
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiometer/renesas,x9250.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/potentiometer/renesas,x9250.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas X9250 quad potentiometers
+
+maintainers:
+ - Herve Codina <herve.codina@bootlin.com>
+
+description:
+ The Renesas X9250 integrates four digitally controlled potentiometers.
+ On each potentiometer, the X9250T has a 100 kOhms total resistance and the
+ X9250U has a 50 kOhms total resistance.
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
+
+properties:
+ compatible:
+ enum:
+ - renesas,x9250t
+ - renesas,x9250u
+
+ reg:
+ maxItems: 1
+
+ vcc-supply:
+ description:
+ Regulator for the VCC power supply.
+
+ avp-supply:
+ description:
+ Regulator for the analog V+ power supply.
+
+ avn-supply:
+ description:
+ Regulator for the analog V- power supply.
+
+ '#io-channel-cells':
+ const: 1
+
+ spi-max-frequency:
+ maximum: 2000000
+
+ wp-gpios:
+ maxItems: 1
+ description:
+ GPIO connected to the write-protect pin.
+
+required:
+ - compatible
+ - reg
+ - vcc-supply
+ - avp-supply
+ - avn-supply
+ - '#io-channel-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ potentiometer@0 {
+ compatible = "renesas,x9250t";
+ reg = <0>;
+ vcc-supply = <&vcc_regulator>;
+ avp-supply = <&avp_regulator>;
+ avn-supply = <&avp_regulator>;
+ wp-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ spi-max-frequency = <2000000>;
+ #io-channel-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml b/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
new file mode 100644
index 000000000000..c0a923febf13
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/honeywell,mprls0025pa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Honeywell mprls0025pa pressure sensor
+
+maintainers:
+ - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+ Honeywell pressure sensor of model mprls0025pa.
+
+ This sensor has an I2C and SPI interface. Only the I2C interface is
+ implemented.
+
+ There are many models with different pressure ranges available. The vendor
+ calls them "mpr series". All of them have the identical programming model and
+ differ in the pressure range, unit and transfer function.
+
+ To support different models one need to specify the pressure range as well as
+ the transfer function. Pressure range needs to be converted from its unit to
+ pascal.
+
+ The transfer function defines the ranges of numerical values delivered by the
+ sensor. The minimal range value stands for the minimum pressure and the
+ maximum value also for the maximum pressure with linear relation inside the
+ range.
+
+ Specifications about the devices can be found at:
+ https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/
+ products/sensors/pressure-sensors/board-mount-pressure-sensors/
+ micropressure-mpr-series/documents/
+ sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+
+properties:
+ compatible:
+ const: honeywell,mprls0025pa
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ Optional GPIO for resetting the device.
+ If not present the device is not resetted during the probe.
+ maxItems: 1
+
+ honeywell,pmin-pascal:
+ description:
+ Minimum pressure value the sensor can measure in pascal.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ honeywell,pmax-pascal:
+ description:
+ Maximum pressure value the sensor can measure in pascal.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ honeywell,transfer-function:
+ description: |
+ Transfer function which defines the range of valid values delivered by the
+ sensor.
+ 1 - A, 10% to 90% of 2^24 (1677722 .. 15099494)
+ 2 - B, 2.5% to 22.5% of 2^24 (419430 .. 3774874)
+ 3 - C, 20% to 80% of 2^24 (3355443 .. 13421773)
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ vdd-supply:
+ description: provide VDD power to the sensor.
+
+required:
+ - compatible
+ - reg
+ - honeywell,pmin-pascal
+ - honeywell,pmax-pascal
+ - honeywell,transfer-function
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pressure@18 {
+ compatible = "honeywell,mprls0025pa";
+ reg = <0x18>;
+ reset-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
+ honeywell,pmin-pascal = <0>;
+ honeywell,pmax-pascal = <172369>;
+ honeywell,transfer-function = <1>;
+ vdd-supply = <&vcc_3v3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
index 1ff3afca9149..e450821a741d 100644
--- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
+++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
@@ -84,6 +84,7 @@ properties:
- st,lps35hw
- description: IMUs
enum:
+ - st,lsm303d-imu
- st,lsm9ds0-imu
- description: Deprecated bindings
enum:
diff --git a/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90614.yaml b/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90614.yaml
index d6965a0c1cf3..654d31f65d36 100644
--- a/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90614.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90614.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/iio/temperature/melexis,mlx90614.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Melexis MLX90614 contactless IR temperature sensor
+title: Melexis MLX90614/MLX90615 contactless IR temperature sensor
maintainers:
- Peter Meerwald <pmeerw@pmeerw.net>
@@ -15,7 +15,9 @@ description: |
properties:
compatible:
- const: melexis,mlx90614
+ enum:
+ - melexis,mlx90614
+ - melexis,mlx90615
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/temperature/ti,tmp006.yaml b/Documentation/devicetree/bindings/iio/temperature/ti,tmp006.yaml
new file mode 100644
index 000000000000..d43002b9bfdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/ti,tmp006.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/temperature/ti,tmp006.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TMP006 IR thermopile sensor
+
+maintainers:
+ - Peter Meerwald <pmeerw@pmeerw.net>
+
+description: |
+ TI TMP006 - Infrared Thermopile Sensor in Chip-Scale Package.
+ https://cdn.sparkfun.com/datasheets/Sensors/Temp/tmp006.pdf
+
+properties:
+ compatible:
+ const: ti,tmp006
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: provide VDD power to the sensor.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ temperature-sensor@40 {
+ compatible = "ti,tmp006";
+ reg = <0x40>;
+ vdd-supply = <&ldo4_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
index f7a5e31c506e..fc21fe3e7b37 100644
--- a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
+++ b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
@@ -51,7 +51,7 @@ properties:
type: object
fsl,ddrc:
- $ref: "/schemas/types.yaml#/definitions/phandle"
+ $ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to DDR Controller.
diff --git a/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
new file mode 100644
index 000000000000..feb5febaf361
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/awinic,aw200xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AWINIC AW200XX LED
+
+maintainers:
+ - Martin Kurbanov <mmkurbanov@sberdevices.ru>
+
+description: |
+ This controller is present on AW20036/AW20054/AW20072.
+ It is a 3x12/6x9/6x12 matrix LED programmed via
+ an I2C interface, up to 36/54/72 LEDs or 12/18/24 RGBs,
+ 3 pattern controllers for auto breathing or group dimming control.
+
+ For more product information please see the link below:
+ aw20036 - https://www.awinic.com/en/productDetail/AW20036QNR#tech-docs
+ aw20054 - https://www.awinic.com/en/productDetail/AW20054QNR#tech-docs
+ aw20072 - https://www.awinic.com/en/productDetail/AW20072QNR#tech-docs
+
+properties:
+ compatible:
+ enum:
+ - awinic,aw20036
+ - awinic,aw20054
+ - awinic,aw20072
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ awinic,display-rows:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Leds matrix size
+
+patternProperties:
+ "^led@[0-9a-f]$":
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ description:
+ LED number
+ maxItems: 1
+
+ led-max-microamp:
+ default: 9780
+ description: |
+ Note that a driver will take the minimum of all LED limits
+ since the chip has a single global setting.
+ The maximum output current of each LED is calculated by the
+ following formula:
+ IMAXled = 160000 * (592 / 600.5) * (1 / display-rows)
+ And the minimum output current formula:
+ IMINled = 3300 * (592 / 600.5) * (1 / display-rows)
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - awinic,display-rows
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: awinic,aw20036
+ then:
+ properties:
+ awinic,display-rows:
+ enum: [1, 2, 3]
+ else:
+ properties:
+ awinic,display-rows:
+ enum: [1, 2, 3, 4, 5, 6, 7]
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@3a {
+ compatible = "awinic,aw20036";
+ reg = <0x3a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ awinic,display-rows = <3>;
+
+ led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RED>;
+ led-max-microamp = <9780>;
+ };
+
+ led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_GREEN>;
+ led-max-microamp = <9780>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_BLUE>;
+ led-max-microamp = <9780>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/backlight/kinetic,ktz8866.yaml b/Documentation/devicetree/bindings/leds/backlight/kinetic,ktz8866.yaml
index e1191453c2f0..c914e1276982 100644
--- a/Documentation/devicetree/bindings/leds/backlight/kinetic,ktz8866.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/kinetic,ktz8866.yaml
@@ -21,6 +21,9 @@ properties:
compatible:
const: kinetic,ktz8866
+ reg:
+ maxItems: 1
+
vddpos-supply:
description: positive boost supply regulator.
@@ -33,6 +36,7 @@ properties:
current-num-sinks:
description: number of the LED current sinks' channels.
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 2, 3, 4, 5, 6]
kinetic,current-ramp-delay-ms:
@@ -53,6 +57,7 @@ properties:
required:
- compatible
+ - reg
- vddpos-supply
- vddneg-supply
- enable-gpios
@@ -63,14 +68,19 @@ examples:
- |
#include <dt-bindings/gpio/gpio.h>
- backlight {
- compatible = "kinetic,ktz8866";
-
- vddpos-supply = <&bl_vddpos_5p5>;
- vddneg-supply = <&bl_vddneg_5p5>;
- enable-gpios = <&tlmm 139 GPIO_ACTIVE_HIGH>;
- current-num-sinks = <5>;
- kinetic,current-ramp-delay-ms = <128>;
- kinetic,led-enable-ramp-delay-ms = <1>;
- kinetic,enable-lcd-bias;
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@11 {
+ compatible = "kinetic,ktz8866";
+ reg = <0x11>;
+ vddpos-supply = <&bl_vddpos_5p5>;
+ vddneg-supply = <&bl_vddneg_5p5>;
+ enable-gpios = <&tlmm 139 GPIO_ACTIVE_HIGH>;
+ current-num-sinks = <5>;
+ kinetic,current-ramp-delay-ms = <128>;
+ kinetic,led-enable-ramp-delay-ms = <1>;
+ kinetic,enable-lcd-bias;
+ };
};
diff --git a/Documentation/devicetree/bindings/leds/backlight/lp855x-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lp855x-backlight.yaml
new file mode 100644
index 000000000000..9416e1bfab92
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/lp855x-backlight.yaml
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/lp855x-backlight.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments LP855X backlight controllers
+
+maintainers:
+ - Artur Weber <aweber.kernel@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,lp8550
+ - ti,lp8551
+ - ti,lp8552
+ - ti,lp8553
+ - ti,lp8555
+ - ti,lp8556
+ - ti,lp8557
+
+ reg:
+ maxItems: 1
+
+ dev-ctrl:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description:
+ Value of device control register. This is a device-specific value.
+
+ bl-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Backlight device name.
+
+ init-brt:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: Initial value of backlight brightness.
+
+ power-supply:
+ description: Regulator which controls the 3V rail.
+
+ enable-supply:
+ description: Regulator which controls the EN/VDDIO input.
+
+ pwms:
+ maxItems: 1
+ description: |
+ PWM channel to use for controlling the backlight; setting this
+ enables the PWM-based backlight control mode.
+
+ pwm-names: true
+
+ pwm-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ PWM period value. Deprecated; set the period value in the pwms
+ property instead.
+ deprecated: true
+
+patternProperties:
+ "^rom-[0-9a-f]{2}h$":
+ type: object
+ description: Nodes containing the values of configuration registers.
+ additionalProperties: false
+ properties:
+ rom-addr:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: Register address of ROM area to be updated.
+
+ rom-val:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: Value to write to the ROM register.
+
+required:
+ - compatible
+ - reg
+ - dev-ctrl
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@2c {
+ compatible = "ti,lp8555";
+ reg = <0x2c>;
+
+ dev-ctrl = /bits/ 8 <0x00>;
+
+ pwms = <&pwm 0 10000>;
+ pwm-names = "lp8555";
+
+ /* 4V OV, 4 output LED0 string enabled */
+ rom-14h {
+ rom-addr = /bits/ 8 <0x14>;
+ rom-val = /bits/ 8 <0xcf>;
+ };
+
+ /* Heavy smoothing, 24ms ramp time step */
+ rom-15h {
+ rom-addr = /bits/ 8 <0x15>;
+ rom-val = /bits/ 8 <0xc7>;
+ };
+
+ /* 4 output LED1 string enabled */
+ rom-19h {
+ rom-addr = /bits/ 8 <0x19>;
+ rom-val = /bits/ 8 <0x0f>;
+ };
+ };
+ };
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@2c {
+ compatible = "ti,lp8556";
+ reg = <0x2c>;
+
+ bl-name = "lcd-bl";
+ dev-ctrl = /bits/ 8 <0x85>;
+ init-brt = /bits/ 8 <0x10>;
+ };
+ };
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ backlight@2c {
+ compatible = "ti,lp8557";
+ reg = <0x2c>;
+ enable-supply = <&backlight_vddio>;
+ power-supply = <&backlight_vdd>;
+
+ dev-ctrl = /bits/ 8 <0x41>;
+ init-brt = /bits/ 8 <0x0a>;
+
+ /* 4V OV, 4 output LED string enabled */
+ rom-14h {
+ rom-addr = /bits/ 8 <0x14>;
+ rom-val = /bits/ 8 <0xcf>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/lp855x.txt b/Documentation/devicetree/bindings/leds/backlight/lp855x.txt
deleted file mode 100644
index 88f56641fc28..000000000000
--- a/Documentation/devicetree/bindings/leds/backlight/lp855x.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-lp855x bindings
-
-Required properties:
- - compatible: "ti,lp8550", "ti,lp8551", "ti,lp8552", "ti,lp8553",
- "ti,lp8555", "ti,lp8556", "ti,lp8557"
- - reg: I2C slave address (u8)
- - dev-ctrl: Value of DEVICE CONTROL register (u8). It depends on the device.
-
-Optional properties:
- - bl-name: Backlight device name (string)
- - init-brt: Initial value of backlight brightness (u8)
- - pwm-period: PWM period value. Set only PWM input mode used (u32)
- - rom-addr: Register address of ROM area to be updated (u8)
- - rom-val: Register value to be updated (u8)
- - power-supply: Regulator which controls the 3V rail
- - enable-supply: Regulator which controls the EN/VDDIO input
-
-Example:
-
- /* LP8555 */
- backlight@2c {
- compatible = "ti,lp8555";
- reg = <0x2c>;
-
- dev-ctrl = /bits/ 8 <0x00>;
- pwm-period = <10000>;
-
- /* 4V OV, 4 output LED0 string enabled */
- rom_14h {
- rom-addr = /bits/ 8 <0x14>;
- rom-val = /bits/ 8 <0xcf>;
- };
-
- /* Heavy smoothing, 24ms ramp time step */
- rom_15h {
- rom-addr = /bits/ 8 <0x15>;
- rom-val = /bits/ 8 <0xc7>;
- };
-
- /* 4 output LED1 string enabled */
- rom_19h {
- rom-addr = /bits/ 8 <0x19>;
- rom-val = /bits/ 8 <0x0f>;
- };
- };
-
- /* LP8556 */
- backlight@2c {
- compatible = "ti,lp8556";
- reg = <0x2c>;
-
- bl-name = "lcd-bl";
- dev-ctrl = /bits/ 8 <0x85>;
- init-brt = /bits/ 8 <0x10>;
- };
-
- /* LP8557 */
- backlight@2c {
- compatible = "ti,lp8557";
- reg = <0x2c>;
- enable-supply = <&backlight_vddio>;
- power-supply = <&backlight_vdd>;
-
- dev-ctrl = /bits/ 8 <0x41>;
- init-brt = /bits/ 8 <0x0a>;
-
- /* 4V OV, 4 output LED string enabled */
- rom_14h {
- rom-addr = /bits/ 8 <0x14>;
- rom-val = /bits/ 8 <0xcf>;
- };
- };
diff --git a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
index 5ec47a8c6568..535690288990 100644
--- a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
@@ -68,7 +68,6 @@ dependencies:
required:
- compatible
- pwms
- - power-supply
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 11aedf1650a1..58b492d00246 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -105,8 +105,6 @@ properties:
- audio-mute
# LED indicates bluetooth power state
- bluetooth-power
- # LED indicates activity of all CPUs
- - cpu
# LED indicates camera flash state
- flash
# LED indicated keyboard capslock
diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
index 31840e33dcf5..e850a8894758 100644
--- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
@@ -34,7 +34,7 @@ required:
- color
allOf:
- - $ref: "common.yaml#"
+ - $ref: common.yaml#
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
index ae607911f1db..058be1fedbc8 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
@@ -66,6 +66,14 @@ properties:
'#size-cells':
const: 0
+ ti,charge-pump-mode:
+ description:
+ Set the operating mode of the internal charge pump as defined in
+ <dt-bindings/leds/leds-lp55xx.h>.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 3 # auto
+ maximum: 3
+
patternProperties:
'^multi-led@[0-8]$':
type: object
@@ -152,6 +160,7 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/leds/common.h>
+ #include <dt-bindings/leds/leds-lp55xx.h>
i2c {
#address-cells = <1>;
@@ -164,6 +173,7 @@ examples:
reg = <0x32>;
clock-mode = /bits/ 8 <2>;
pwr-sel = /bits/ 8 <3>; /* D1~9 connected to VOUT */
+ ti,charge-pump-mode = <LP55XX_CP_BYPASS>;
led@0 {
reg = <0>;
diff --git a/Documentation/devicetree/bindings/leds/leds-mt6323.txt b/Documentation/devicetree/bindings/leds/leds-mt6323.txt
index 73353692efa1..052dccb8f2ce 100644
--- a/Documentation/devicetree/bindings/leds/leds-mt6323.txt
+++ b/Documentation/devicetree/bindings/leds/leds-mt6323.txt
@@ -12,7 +12,10 @@ For MediaTek PMIC wrapper bindings see:
Documentation/devicetree/bindings/soc/mediatek/mediatek,pwrap.yaml
Required properties:
-- compatible : Must be "mediatek,mt6323-led"
+- compatible : Must be one of
+ - "mediatek,mt6323-led"
+ - "mediatek,mt6331-led"
+ - "mediatek,mt6332-led"
- address-cells : Must be 1
- size-cells : Must be 0
diff --git a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
index 6295c91f43e8..e6f1999cb22f 100644
--- a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
@@ -16,18 +16,24 @@ description: >
properties:
compatible:
- enum:
- - qcom,pm660l-lpg
- - qcom,pm8150b-lpg
- - qcom,pm8150l-lpg
- - qcom,pm8350c-pwm
- - qcom,pm8916-pwm
- - qcom,pm8941-lpg
- - qcom,pm8994-lpg
- - qcom,pmc8180c-lpg
- - qcom,pmi8994-lpg
- - qcom,pmi8998-lpg
- - qcom,pmk8550-pwm
+ oneOf:
+ - enum:
+ - qcom,pm660l-lpg
+ - qcom,pm8150b-lpg
+ - qcom,pm8150l-lpg
+ - qcom,pm8350c-pwm
+ - qcom,pm8916-pwm
+ - qcom,pm8941-lpg
+ - qcom,pm8994-lpg
+ - qcom,pmc8180c-lpg
+ - qcom,pmi632-lpg
+ - qcom,pmi8994-lpg
+ - qcom,pmi8998-lpg
+ - qcom,pmk8550-pwm
+ - items:
+ - enum:
+ - qcom,pm8550-pwm
+ - const: qcom,pm8350c-pwm
"#pwm-cells":
const: 2
diff --git a/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml b/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml
index 4d2ffe5fcfc7..37d2a93780ab 100644
--- a/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml
@@ -20,6 +20,7 @@ properties:
compatible:
enum:
- ocs,ocp8110
+ - richtek,rt5033-led
- sgmicro,sgm3140
enable-gpios:
diff --git a/Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml b/Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml
index ffacf703d9f9..a8736fd5a539 100644
--- a/Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml
+++ b/Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml
@@ -26,6 +26,8 @@ properties:
- qcom,pm8150c-flash-led
- qcom,pm8150l-flash-led
- qcom,pm8350c-flash-led
+ - qcom,pm8550-flash-led
+ - qcom,pmi8998-flash-led
- const: qcom,spmi-flash-led
reg:
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
index 64b0be9cf70b..58f0d94c6d71 100644
--- a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
+++ b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
@@ -32,7 +32,7 @@ patternProperties:
properties:
rohm,led-compatible:
description: LED identification string
- $ref: "/schemas/types.yaml#/definitions/string"
+ $ref: /schemas/types.yaml#/definitions/string
enum:
- bd71828-ambled
- bd71828-grnled
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
deleted file mode 100644
index b48d7d30012c..000000000000
--- a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Broadcom BCM2835 VideoCore mailbox IPC
-
-Required properties:
-
-- compatible: Should be "brcm,bcm2835-mbox"
-- reg: Specifies base physical address and size of the registers
-- interrupts: The interrupt number
- See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
-- #mbox-cells: Specifies the number of cells needed to encode a mailbox
- channel. The value shall be 0, since there is only one
- mailbox channel implemented by the device.
-
-Example:
-
-mailbox: mailbox@7e00b880 {
- compatible = "brcm,bcm2835-mbox";
- reg = <0x7e00b880 0x40>;
- interrupts = <0 1>;
- #mbox-cells = <0>;
-};
-
-firmware: firmware {
- compatible = "raspberrypi,firmware";
- mboxes = <&mailbox>;
- #power-domain-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.yaml b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.yaml
new file mode 100644
index 000000000000..9588817f4511
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/brcm,bcm2835-mbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 VideoCore mailbox IPC
+
+maintainers:
+ - Stefan Wahren <stefan.wahren@i2se.com>
+
+properties:
+ compatible:
+ const: brcm,bcm2835-mbox
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ mailbox@7e00b880 {
+ compatible = "brcm,bcm2835-mbox";
+ reg = <0x7e00b880 0x40>;
+ interrupts = <0 1>;
+ #mbox-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
index a3e87516d637..2d14fc948999 100644
--- a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
+++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
@@ -66,6 +66,7 @@ properties:
oneOf:
- const: nvidia,tegra186-hsp
- const: nvidia,tegra194-hsp
+ - const: nvidia,tegra264-hsp
- items:
- const: nvidia,tegra234-hsp
- const: nvidia,tegra194-hsp
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index 32d7bbc98cac..d2e25ff6db7f 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -18,6 +18,7 @@ properties:
oneOf:
- items:
- enum:
+ - qcom,ipq5018-apcs-apps-global
- qcom,ipq5332-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,ipq9574-apcs-apps-global
diff --git a/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml b/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml
index 444f24838d3d..6c72e77b927c 100644
--- a/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml
@@ -65,9 +65,14 @@ properties:
properties:
data-lanes: true
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
required:
- data-lanes
+ - bus-type
required:
- port@4
@@ -82,6 +87,7 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/media/video-interfaces.h>
i2c@e6508000 {
#address-cells = <1>;
@@ -101,6 +107,7 @@ examples:
port@4 {
reg = <4>;
max96712_out0: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
clock-lanes = <0>;
data-lanes = <1 2 3 4>;
remote-endpoint = <&csi40_in>;
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
index eb1499912c58..9cc0a968a401 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
@@ -155,7 +155,7 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8916.h>
- camss: camss@1b00000 {
+ camss: camss@1b0ac00 {
compatible = "qcom,msm8916-camss";
clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
index 8a10aa1cafc5..5cb0e337ea6e 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
@@ -221,7 +221,7 @@ examples:
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
- camss: camss@a00000 {
+ camss: camss@a34000 {
compatible = "qcom,msm8996-camss";
clocks = <&mmcc CAMSS_TOP_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
index 0a109e126064..584106e275f6 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
@@ -227,7 +227,7 @@ examples:
#include <dt-bindings/clock/qcom,gcc-sdm660.h>
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
- camss: camss@ca00000 {
+ camss: camss@ca00020 {
compatible = "qcom,sdm660-camss";
clocks = <&mmcc CAMSS_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
index 1530ad0d80bd..ec4380a0a03f 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
@@ -219,7 +219,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
- camss: camss@a00000 {
+ camss: camss@acb3000 {
compatible = "qcom,sdm845-camss";
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
diff --git a/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml b/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
index 7dde7967c886..1e72b8808d24 100644
--- a/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,rzg2l-cru.yaml
@@ -137,7 +137,7 @@ examples:
cru_parallel_in: endpoint@0 {
reg = <0>;
- remote-endpoint= <&ov5642>;
+ remote-endpoint = <&ov5642>;
hsync-active = <1>;
vsync-active = <1>;
};
@@ -150,7 +150,7 @@ examples:
cru_csi_in: endpoint@0 {
reg = <0>;
- remote-endpoint= <&csi_cru_in>;
+ remote-endpoint = <&csi_cru_in>;
};
};
};
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index 91e8f368fb52..324703bfb1bd 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -303,11 +303,11 @@ examples:
vin0csi20: endpoint@0 {
reg = <0>;
- remote-endpoint= <&csi20vin0>;
+ remote-endpoint = <&csi20vin0>;
};
vin0csi40: endpoint@2 {
reg = <2>;
- remote-endpoint= <&csi40vin0>;
+ remote-endpoint = <&csi40vin0>;
};
};
};
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index ee622a8ee1cc..772ec3283bc6 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -24,6 +24,7 @@ properties:
- rockchip,rk3399-vpu
- rockchip,px30-vpu
- rockchip,rk3568-vpu
+ - rockchip,rk3588-av1-vpu
- items:
- const: rockchip,rk3188-vpu
- const: rockchip,rk3066-vpu
diff --git a/Documentation/devicetree/bindings/mfd/adi,max77541.yaml b/Documentation/devicetree/bindings/mfd/adi,max77541.yaml
new file mode 100644
index 000000000000..c7895b2c38c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/adi,max77541.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/adi,max77541.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MAX77540/MAX77541 PMIC from ADI
+
+maintainers:
+ - Okan Sahin <okan.sahin@analog.com>
+
+description: |
+ MAX77540 is a Power Management IC with 2 buck regulators.
+
+ MAX77541 is a Power Management IC with 2 buck regulators and 1 ADC.
+
+properties:
+ compatible:
+ enum:
+ - adi,max77540
+ - adi,max77541
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ regulators:
+ $ref: /schemas/regulator/adi,max77541-regulator.yaml#
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@69 {
+ compatible = "adi,max77541";
+ reg = <0x69>;
+ interrupt-parent = <&gpio>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <5200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ buck2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <5200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
index acb9c54942d9..dc379f3ebf24 100644
--- a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
+++ b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
@@ -122,12 +122,6 @@ patternProperties:
compatible:
const: gw,gsc-fan
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
reg:
description: The fan controller base address
maxItems: 1
@@ -135,8 +129,6 @@ patternProperties:
required:
- compatible
- reg
- - "#address-cells"
- - "#size-cells"
required:
- compatible
@@ -194,8 +186,6 @@ examples:
};
fan-controller@2c {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "gw,gsc-fan";
reg = <0x2c>;
};
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
index 36de335a33aa..8b9a2008a354 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
@@ -71,6 +71,7 @@ properties:
- qcom,pm8998
- qcom,pma8084
- qcom,pmd9635
+ - qcom,pmi632
- qcom,pmi8950
- qcom,pmi8962
- qcom,pmi8994
@@ -133,6 +134,7 @@ patternProperties:
oneOf:
- $ref: /schemas/power/supply/qcom,pm8941-charger.yaml#
- $ref: /schemas/power/supply/qcom,pm8941-coincell.yaml#
+ - $ref: /schemas/power/supply/qcom,pmi8998-charger.yaml#
"gpio@[0-9a-f]+$":
type: object
@@ -146,6 +148,10 @@ patternProperties:
type: object
$ref: /schemas/nvmem/qcom,spmi-sdam.yaml#
+ "phy@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/phy/qcom,snps-eusb2-repeater.yaml#
+
"pon@[0-9a-f]+$":
type: object
$ref: /schemas/power/reset/qcom,pon.yaml#
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
index fe790af7b4fb..5ad9d5deaaf8 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -34,6 +34,7 @@ properties:
- qcom,tcsr-ipq5332
- qcom,tcsr-ipq6018
- qcom,tcsr-ipq8064
+ - qcom,tcsr-ipq8074
- qcom,tcsr-ipq9574
- qcom,tcsr-mdm9615
- qcom,tcsr-msm8226
diff --git a/Documentation/devicetree/bindings/mfd/richtek,rt5033.yaml b/Documentation/devicetree/bindings/mfd/richtek,rt5033.yaml
new file mode 100644
index 000000000000..386b1a50158a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/richtek,rt5033.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/richtek,rt5033.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT5033 Power Management Integrated Circuit
+
+maintainers:
+ - Jakob Hauser <jahau@rocketmail.com>
+
+description:
+ RT5033 is a multifunction device which includes battery charger, fuel gauge,
+ flash LED current source, LDO and synchronous Buck converter for portable
+ applications. It is interfaced to host controller using I2C interface. The
+ battery fuel gauge uses a separate I2C bus.
+
+properties:
+ compatible:
+ const: richtek,rt5033
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ regulators:
+ description:
+ The regulators of RT5033 have to be instantiated under a sub-node named
+ "regulators". For SAFE_LDO voltage there is only one value of 4.9 V. LDO
+ voltage ranges from 1.2 V to 3.0 V in 0.1 V steps. BUCK voltage ranges
+ from 1.0 V to 3.0 V in 0.1 V steps.
+ type: object
+ patternProperties:
+ "^(SAFE_LDO|LDO|BUCK)$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+ additionalProperties: false
+
+ charger:
+ type: object
+ $ref: /schemas/power/supply/richtek,rt5033-charger.yaml#
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ battery: battery {
+ compatible = "simple-battery";
+ precharge-current-microamp = <450000>;
+ constant-charge-current-max-microamp = <1000000>;
+ charge-term-current-microamp = <150000>;
+ precharge-upper-limit-microvolt = <3500000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+ };
+
+ extcon {
+ usb_con: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ fuel-gauge@35 {
+ compatible = "richtek,rt5033-battery";
+ reg = <0x35>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <121 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&fg_alert_default>;
+
+ power-supplies = <&rt5033_charger>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ pmic@34 {
+ compatible = "richtek,rt5033";
+ reg = <0x34>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <62 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_default>;
+
+ regulators {
+ safe_ldo_reg: SAFE_LDO {
+ regulator-name = "SAFE_LDO";
+ regulator-min-microvolt = <4900000>;
+ regulator-max-microvolt = <4900000>;
+ regulator-always-on;
+ };
+ ldo_reg: LDO {
+ regulator-name = "LDO";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ buck_reg: BUCK {
+ regulator-name = "BUCK";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ };
+
+ rt5033_charger: charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
index 10c7b408f33a..aea0b7d57d04 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
@@ -153,29 +153,18 @@ dependencies:
additionalProperties: false
allOf:
- - if:
+ - not:
required:
- s5m8767,pmic-buck2-uses-gpio-dvs
- then:
- properties:
- s5m8767,pmic-buck3-uses-gpio-dvs: false
- s5m8767,pmic-buck4-uses-gpio-dvs: false
-
- - if:
- required:
- s5m8767,pmic-buck3-uses-gpio-dvs
- then:
- properties:
- s5m8767,pmic-buck2-uses-gpio-dvs: false
- s5m8767,pmic-buck4-uses-gpio-dvs: false
-
- - if:
+ - not:
required:
+ - s5m8767,pmic-buck2-uses-gpio-dvs
+ - s5m8767,pmic-buck4-uses-gpio-dvs
+ - not:
+ required:
+ - s5m8767,pmic-buck3-uses-gpio-dvs
- s5m8767,pmic-buck4-uses-gpio-dvs
- then:
- properties:
- s5m8767,pmic-buck2-uses-gpio-dvs: false
- s5m8767,pmic-buck3-uses-gpio-dvs: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
index 9573e4af949e..97c61097f9e2 100644
--- a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
@@ -184,7 +184,7 @@ properties:
additionalProperties: false
patternProperties:
- "^(buck[1-4]|ldo[1-6]|boost|pwr_sw[1-2])-supply$":
+ "^(buck[1-4]|ldo[1-6]|vref_ddr|boost|pwr_sw[1-2])-supply$":
description: STPMIC1 voltage regulators supplies
"^(buck[1-4]|ldo[1-6]|boost|vref_ddr|pwr_sw[1-2])$":
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
index 0c98d913747b..e6289fbe6907 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
@@ -101,7 +101,7 @@ examples:
};
clock-controller@4140 {
- compatible = "ti,am654-ehrpwm-tbclk", "syscon";
+ compatible = "ti,am654-ehrpwm-tbclk";
reg = <0x4140 0x18>;
#clock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml b/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml
new file mode 100644
index 000000000000..9d43376bebed
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ti,tps6594.yaml
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/ti,tps6594.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TPS6594 Power Management Integrated Circuit
+
+maintainers:
+ - Julien Panis <jpanis@baylibre.com>
+
+description:
+ TPS6594 is a Power Management IC which provides regulators and others
+ features like GPIOs, RTC, watchdog, ESMs (Error Signal Monitor), and
+ PFSM (Pre-configurable Finite State Machine) managing the state of the device.
+ TPS6594 is the super-set device while TPS6593 and LP8764 are derivatives.
+
+properties:
+ compatible:
+ enum:
+ - ti,lp8764-q1
+ - ti,tps6593-q1
+ - ti,tps6594-q1
+
+ reg:
+ description: I2C slave address or SPI chip select number.
+ maxItems: 1
+
+ ti,primary-pmic:
+ type: boolean
+ description: |
+ Identify the primary PMIC on SPMI bus.
+ A multi-PMIC synchronization scheme is implemented in the PMIC device
+ to synchronize the power state changes with other PMIC devices. This is
+ accomplished through a SPMI bus: the primary PMIC is the controller
+ device on the SPMI bus, and the secondary PMICs are the target devices
+ on the SPMI bus.
+
+ system-power-controller: true
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+ description: |
+ The first cell is the pin number, the second cell is used to specify flags.
+ See ../gpio/gpio.txt for more information.
+
+ interrupts:
+ maxItems: 1
+
+ regulators:
+ type: object
+ description: List of regulators provided by this controller.
+
+ patternProperties:
+ "^buck([1-5]|12|34|123|1234)$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+
+ unevaluatedProperties: false
+
+ "^ldo[1-4]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+
+ unevaluatedProperties: false
+
+ allOf:
+ - if:
+ required:
+ - buck12
+ then:
+ properties:
+ buck123: false
+ buck1234: false
+ - if:
+ required:
+ - buck123
+ then:
+ properties:
+ buck34: false
+ - if:
+ required:
+ - buck1234
+ then:
+ properties:
+ buck34: false
+
+ additionalProperties: false
+
+patternProperties:
+ "^buck([1-5]|12|34|123|1234)-supply$":
+ description: Input supply phandle for each buck.
+
+ "^ldo[1-4]-supply$":
+ description: Input supply phandle for each ldo.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tps6593: pmic@48 {
+ compatible = "ti,tps6593-q1";
+ reg = <0x48>;
+ ti,primary-pmic;
+ system-power-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&mcu_gpio0>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+
+ buck123-supply = <&vcc_3v3_sys>;
+ buck4-supply = <&vcc_3v3_sys>;
+ buck5-supply = <&vcc_3v3_sys>;
+ ldo1-supply = <&vcc_3v3_sys>;
+ ldo2-supply = <&vcc_3v3_sys>;
+ ldo3-supply = <&buck5>;
+ ldo4-supply = <&vcc_3v3_sys>;
+
+ regulators {
+ buck123: buck123 {
+ regulator-name = "vcc_core";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4: buck4 {
+ regulator-name = "vcc_1v1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: buck5 {
+ regulator-name = "vcc_1v8_sys";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: ldo1 {
+ regulator-name = "vddshv5_sdio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2: ldo2 {
+ regulator-name = "vpp_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: ldo3 {
+ regulator-name = "vcc_0v85";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: ldo4 {
+ regulator-name = "vdda_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
index f7f0f2c0421a..9ad55746133b 100644
--- a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
+++ b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
@@ -90,6 +90,7 @@ properties:
oneOf:
- enum:
- x-powers,axp152
+ - x-powers,axp192
- x-powers,axp202
- x-powers,axp209
- x-powers,axp221
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
index f8c976898a95..18f6733408b4 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
@@ -164,7 +164,7 @@ examples:
reg = <0 0xf80000>;
};
firmware@f80000 {
- label ="firmware";
+ label = "firmware";
reg = <0xf80000 0x80000>;
read-only;
};
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
index 08d74ca0769c..5aa320c8af5a 100644
--- a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
@@ -156,7 +156,7 @@ examples:
reg = <0x1101c000 0x1300>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "macirq";
- phy-mode ="rgmii-rxid";
+ phy-mode = "rgmii-rxid";
mac-address = [00 55 7b b5 7d f7];
clock-names = "axi",
"apb",
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml b/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
index 36def7128fca..13412af7f046 100644
--- a/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
+++ b/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
@@ -36,14 +36,29 @@ properties:
et0macaddr:
type: object
description: First Ethernet interface's MAC address
+ properties:
+ "#nvmem-cell-cells":
+ description: The first argument is a MAC address offset.
+ const: 1
+ additionalProperties: false
et1macaddr:
type: object
description: Second Ethernet interface's MAC address
+ properties:
+ "#nvmem-cell-cells":
+ description: The first argument is a MAC address offset.
+ const: 1
+ additionalProperties: false
et2macaddr:
type: object
description: Third Ethernet interface's MAC address
+ properties:
+ "#nvmem-cell-cells":
+ description: The first argument is a MAC address offset.
+ const: 1
+ additionalProperties: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
index 9876243ff1e8..99e60d713dac 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/nvmem/imx-ocotp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Freescale i.MX6 On-Chip OTP Controller (OCOTP)
+title: Freescale i.MX On-Chip OTP Controller (OCOTP)
maintainers:
- Anson Huang <Anson.Huang@nxp.com>
@@ -12,7 +12,7 @@ maintainers:
description: |
This binding represents the on-chip eFuse OTP controller found on
i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
- i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN and i.MX8MP SoCs.
+ i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93 SoCs.
allOf:
- $ref: nvmem.yaml#
@@ -32,6 +32,7 @@ properties:
- fsl,imx7ulp-ocotp
- fsl,imx8mq-ocotp
- fsl,imx8mm-ocotp
+ - fsl,imx93-ocotp
- const: syscon
- items:
- enum:
@@ -46,12 +47,6 @@ properties:
reg:
maxItems: 1
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 1
-
clocks:
maxItems: 1
@@ -61,21 +56,6 @@ required:
- compatible
- reg
-patternProperties:
- "^.*@[0-9a-f]+$":
- type: object
-
- properties:
- reg:
- maxItems: 1
- description:
- Offset and size in bytes within the storage device.
-
- required:
- - reg
-
- additionalProperties: false
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
new file mode 100644
index 000000000000..e698098450e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/layouts/fixed-cell.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Fixed offset & size NVMEM cell
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+properties:
+ reg:
+ maxItems: 1
+
+ bits:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - minimum: 0
+ maximum: 7
+ description:
+ Offset in bit within the address range specified by reg.
+ - minimum: 1
+ description:
+ Size in bit within the address range specified by reg.
+
+required:
+ - reg
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml
new file mode 100644
index 000000000000..c271537d0714
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/layouts/fixed-layout.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVMEM layout for fixed NVMEM cells
+
+description:
+ Many NVMEM devices have hardcoded cells layout (offset and size of defined
+ NVMEM content doesn't change).
+
+ This binding allows defining such NVMEM layout with its cells. It can be used
+ on top of any NVMEM device.
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ const: fixed-layout
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+patternProperties:
+ "@[a-f0-9]+$":
+ type: object
+ $ref: fixed-cell.yaml
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ calibration@4000 {
+ reg = <0x4000 0x100>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
index 8512ee538c4c..3b40f7880774 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
@@ -18,16 +18,13 @@ description: |
perform their parsing. The nvmem-layout container is here to describe these.
oneOf:
+ - $ref: fixed-layout.yaml
- $ref: kontron,sl28-vpd.yaml
- $ref: onie,tlv-layout.yaml
properties:
compatible: true
- '#address-cells': false
-
- '#size-cells': false
-
required:
- compatible
diff --git a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
index d16d42fb98b6..7ec2988b597e 100644
--- a/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mediatek,efuse.yaml
@@ -27,6 +27,7 @@ properties:
- enum:
- mediatek,mt7622-efuse
- mediatek,mt7623-efuse
+ - mediatek,mt7986-efuse
- mediatek,mt8173-efuse
- mediatek,mt8183-efuse
- mediatek,mt8186-efuse
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
index 8938eec22b52..a9b822aeaa7e 100644
--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
@@ -18,12 +18,6 @@ properties:
- fsl,imx23-ocotp
- fsl,imx28-ocotp
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 1
-
reg:
maxItems: 1
@@ -35,7 +29,7 @@ required:
- reg
- clocks
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 75bb93dda9df..980244100690 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -49,23 +49,8 @@ properties:
patternProperties:
"@[0-9a-f]+(,[0-7])?$":
type: object
-
- properties:
- reg:
- maxItems: 1
- description:
- Offset and size in bytes within the storage device.
-
- bits:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- items:
- - minimum: 0
- maximum: 7
- description:
- Offset in bit within the address range specified by reg.
- - minimum: 1
- description:
- Size in bit within the address range specified by reg.
+ $ref: layouts/fixed-cell.yaml
+ deprecated: true
additionalProperties: true
@@ -83,24 +68,30 @@ examples:
/* ... */
- /* Data cells */
- tsens_calibration: calib@404 {
- reg = <0x404 0x10>;
- };
-
- tsens_calibration_bckp: calib_bckp@504 {
- reg = <0x504 0x11>;
- bits = <6 128>;
- };
-
- pvs_version: pvs-version@6 {
- reg = <0x6 0x2>;
- bits = <7 2>;
- };
-
- speed_bin: speed-bin@c{
- reg = <0xc 0x1>;
- bits = <2 3>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* Data cells */
+ tsens_calibration: calib@404 {
+ reg = <0x404 0x10>;
+ };
+
+ tsens_calibration_bckp: calib_bckp@504 {
+ reg = <0x504 0x11>;
+ bits = <6 128>;
+ };
+
+ pvs_version: pvs-version@6 {
+ reg = <0x6 0x2>;
+ bits = <7 2>;
+ };
+
+ speed_bin: speed-bin@c{
+ reg = <0xc 0x1>;
+ bits = <2 3>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 076566ef9cc8..6cd4682a167d 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -67,12 +67,6 @@ properties:
power-domains:
maxItems: 1
- # Needed if any child nodes are present.
- "#address-cells":
- const: 1
- "#size-cells":
- const: 1
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
index dce0c7d84ce7..cd980def97b8 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,spmi-sdam.yaml
@@ -25,12 +25,6 @@ properties:
reg:
maxItems: 1
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 1
-
ranges: true
required:
diff --git a/Documentation/devicetree/bindings/nvmem/rmem.yaml b/Documentation/devicetree/bindings/nvmem/rmem.yaml
index 38a39c9b8c1c..1ec0d09bcafa 100644
--- a/Documentation/devicetree/bindings/nvmem/rmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rmem.yaml
@@ -17,6 +17,7 @@ properties:
items:
- enum:
- raspberrypi,bootloader-config
+ - raspberrypi,bootloader-public-key
- const: nvmem-rmem
reg:
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
new file mode 100644
index 000000000000..9c6eff788928
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rockchip,otp.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/rockchip,otp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip internal OTP (One Time Programmable) memory
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-otp
+ - rockchip,rk3308-otp
+ - rockchip,rk3588-otp
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 3
+ maxItems: 4
+
+ clock-names:
+ minItems: 3
+ items:
+ - const: otp
+ - const: apb_pclk
+ - const: phy
+ - const: arb
+
+ resets:
+ minItems: 1
+ maxItems: 3
+
+ reset-names:
+ minItems: 1
+ maxItems: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: nvmem.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-otp
+ - rockchip,rk3308-otp
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ resets:
+ maxItems: 1
+ reset-names:
+ items:
+ - const: phy
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3588-otp
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ resets:
+ minItems: 3
+ reset-names:
+ items:
+ - const: otp
+ - const: apb
+ - const: arb
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/px30-cru.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ otp: efuse@ff290000 {
+ compatible = "rockchip,px30-otp";
+ reg = <0x0 0xff290000 0x0 0x4000>;
+ clocks = <&cru SCLK_OTP_USR>, <&cru PCLK_OTP_NS>,
+ <&cru PCLK_OTP_PHY>;
+ clock-names = "otp", "apb_pclk", "phy";
+ resets = <&cru SRST_OTP_PHY>;
+ reset-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_id: id@7 {
+ reg = <0x07 0x10>;
+ };
+
+ cpu_leakage: cpu-leakage@17 {
+ reg = <0x17 0x1>;
+ };
+
+ performance: performance@1e {
+ reg = <0x1e 0x1>;
+ bits = <4 3>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt b/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
deleted file mode 100644
index 40f649f7c2e5..000000000000
--- a/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Rockchip internal OTP (One Time Programmable) memory device tree bindings
-
-Required properties:
-- compatible: Should be one of the following.
- - "rockchip,px30-otp" - for PX30 SoCs.
- - "rockchip,rk3308-otp" - for RK3308 SoCs.
-- reg: Should contain the registers location and size
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Should be "otp", "apb_pclk" and "phy".
-- resets: Must contain an entry for each entry in reset-names.
- See ../../reset/reset.txt for details.
-- reset-names: Should be "phy".
-
-See nvmem.txt for more information.
-
-Example:
- otp: otp@ff290000 {
- compatible = "rockchip,px30-otp";
- reg = <0x0 0xff290000 0x0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
- clocks = <&cru SCLK_OTP_USR>, <&cru PCLK_OTP_NS>,
- <&cru PCLK_OTP_PHY>;
- clock-names = "otp", "apb_pclk", "phy";
- };
diff --git a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
index b8bca0599c45..efccc5aacbe0 100644
--- a/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
+++ b/Documentation/devicetree/bindings/nvmem/socionext,uniphier-efuse.yaml
@@ -14,9 +14,6 @@ allOf:
- $ref: nvmem.yaml#
properties:
- "#address-cells": true
- "#size-cells": true
-
compatible:
const: socionext,uniphier-efuse
diff --git a/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
index 8877c2283e9e..da3f1de7d281 100644
--- a/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
@@ -28,12 +28,6 @@ properties:
clocks:
maxItems: 1
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 1
-
thermal-calibration:
type: object
description: thermal calibration values
diff --git a/Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml b/Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml
index 50f46a6898b1..4adab0149108 100644
--- a/Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml
+++ b/Documentation/devicetree/bindings/perf/amlogic,g12-ddr-pmu.yaml
@@ -42,8 +42,8 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
pmu {
- #address-cells=<2>;
- #size-cells=<2>;
+ #address-cells = <2>;
+ #size-cells = <2>;
pmu@ff638000 {
compatible = "amlogic,g12a-ddr-pmu";
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
index 43a4b880534c..580fbe37b37f 100644
--- a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
@@ -115,8 +115,8 @@ allOf:
compatible:
contains:
enum:
- - const: brcm,bcm4908-usb-phy
- - const: brcm,brcmstb-usb-phy
+ - brcm,bcm4908-usb-phy
+ - brcm,brcmstb-usb-phy
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
deleted file mode 100644
index 3dc8b3d2ffbb..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-BROADCOM KONA USB2 PHY
-
-Required properties:
- - compatible: brcm,kona-usb2-phy
- - reg: offset and length of the PHY registers
- - #phy-cells: must be 0
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
-
- usbphy: usb-phy@3f130000 {
- compatible = "brcm,kona-usb2-phy";
- reg = <0x3f130000 0x28>;
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml
new file mode 100644
index 000000000000..d7faeb81f7a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,kona-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Kona family USB 2.0 PHY
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ const: brcm,kona-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@3f130000 {
+ compatible = "brcm,kona-usb2-phy";
+ reg = <0x3f130000 0x28>;
+ #phy-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml b/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml
index c9e65a2facd5..c7281a7c8244 100644
--- a/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml
@@ -31,6 +31,12 @@ properties:
"#phy-cells":
const: 0
+ cdns,usb2-disconnect-threshold-microvolt:
+ description: The microvolt threshold value utilized for detecting
+ USB disconnection event.
+ enum: [575, 610, 645]
+ default: 575
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
index e6f9f5540cc3..dc3a3f709fea 100644
--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
@@ -35,6 +35,53 @@ properties:
description:
A phandle to the regulator for USB VBUS.
+ fsl,phy-tx-vref-tune-percent:
+ description:
+ Tunes the HS DC level relative to the nominal level
+ minimum: 94
+ maximum: 124
+
+ fsl,phy-tx-rise-tune-percent:
+ description:
+ Adjusts the rise/fall time duration of the HS waveform relative to
+ its nominal value
+ minimum: 97
+ maximum: 103
+
+ fsl,phy-tx-preemp-amp-tune-microamp:
+ description:
+ Adjust amount of current sourced to DPn and DMn after a J-to-K
+ or K-to-J transition. Default is 0 (disabled).
+ minimum: 0
+ maximum: 1800
+
+ fsl,phy-tx-vboost-level-microvolt:
+ description:
+ Adjust the boosted transmit launch pk-pk differential amplitude
+ minimum: 880
+ maximum: 1120
+
+ fsl,phy-comp-dis-tune-percent:
+ description:
+ Adjust the voltage level used to detect a disconnect event at the host
+ relative to the nominal value
+ minimum: 91
+ maximum: 115
+
+ fsl,phy-pcs-tx-deemph-3p5db-attenuation-db:
+ description:
+ Adjust TX de-emphasis attenuation in dB at nominal
+ 3.5dB point as per USB specification
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 36
+
+ fsl,phy-pcs-tx-swing-full-percent:
+ description:
+ Scaling of the voltage defined by fsl,phy-tx-vboost-level-microvolt
+ minimum: 0
+ maximum: 100
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
new file mode 100644
index 000000000000..f4b1ca2fb562
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/fsl,mxs-usbphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MXS USB Phy Device
+
+maintainers:
+ - Xu Yang <xu.yang_2@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx23-usbphy
+ - fsl,imx7ulp-usbphy
+ - fsl,vf610-usbphy
+ - items:
+ - enum:
+ - fsl,imx28-usbphy
+ - fsl,imx6ul-usbphy
+ - fsl,imx6sl-usbphy
+ - fsl,imx6sx-usbphy
+ - fsl,imx6q-usbphy
+ - const: fsl,imx23-usbphy
+ - items:
+ - const: fsl,imx6sll-usbphy
+ - const: fsl,imx6ul-usbphy
+ - const: fsl,imx23-usbphy
+ - items:
+ - enum:
+ - fsl,imx8dxl-usbphy
+ - fsl,imx8qm-usbphy
+ - fsl,imx8ulp-usbphy
+ - const: fsl,imx7ulp-usbphy
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+ power-domains:
+ maxItems: 1
+
+ fsl,anatop:
+ description:
+ phandle for anatop register, it is only for imx6 SoC series.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ phy-3p0-supply:
+ description:
+ One of USB PHY's power supply. Can be used to keep a good signal
+ quality.
+
+ fsl,tx-cal-45-dn-ohms:
+ description:
+ Resistance (in ohms) of switchable high-speed trimming resistor
+ connected in parallel with the 45 ohm resistor that terminates
+ the DN output signal.
+ minimum: 35
+ maximum: 54
+ default: 45
+
+ fsl,tx-cal-45-dp-ohms:
+ description:
+ Resistance (in ohms) of switchable high-speed trimming resistor
+ connected in parallel with the 45 ohm resistor that terminates
+ the DP output signal.
+ minimum: 35
+ maximum: 54
+ default: 45
+
+ fsl,tx-d-cal:
+ description:
+ Current trimming value (as a percentage) of the 17.78 mA TX
+ reference current.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 79
+ maximum: 119
+ default: 100
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx6q-usbphy
+ - fsl,imx6sl-usbphy
+ - fsl,imx6sx-usbphy
+ - fsl,imx6sll-usbphy
+ - fsl,vf610-usbphy
+ - items:
+ - const: fsl,imx6ul-usbphy
+ - const: fsl,imx23-usbphy
+ then:
+ required:
+ - fsl,anatop
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+
+ usbphy1: usb-phy@20c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+ clocks = <&clks IMX6QDL_CLK_USBPHY1>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,anatop = <&anatop>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml b/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml
index 5d54b0a0e873..7dd6a4d94b48 100644
--- a/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml
@@ -15,7 +15,7 @@ description: |
properties:
$nodename:
- pattern: "combophy(@.*|-[0-9a-f])*$"
+ pattern: "combophy(@.*|-([0-9]|[1-9][0-9]+))?$"
compatible:
items:
diff --git a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
index a9f78344efdb..a63b20dfa4a5 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
@@ -87,7 +87,7 @@ examples:
clocks = <&clk26m>;
clock-output-names = "mipi_tx0_pll";
drive-strength-microamp = <4000>;
- nvmem-cells= <&mipi_tx_calibration>;
+ nvmem-cells = <&mipi_tx_calibration>;
nvmem-cell-names = "calibration-data";
#clock-cells = <0>;
#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
index 786cfd71cb7e..3c28ec50f097 100644
--- a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
@@ -32,15 +32,6 @@ properties:
clock-names:
const: phy_ref
- assigned-clocks:
- maxItems: 1
-
- assigned-clock-parents:
- maxItems: 1
-
- assigned-clock-rates:
- maxItems: 1
-
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt b/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
deleted file mode 100644
index 70c813b0755f..000000000000
--- a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-* Freescale MXS USB Phy Device
-
-Required properties:
-- compatible: should contain:
- * "fsl,imx23-usbphy" for imx23 and imx28
- * "fsl,imx6q-usbphy" for imx6dq and imx6dl
- * "fsl,imx6sl-usbphy" for imx6sl
- * "fsl,vf610-usbphy" for Vybrid vf610
- * "fsl,imx6sx-usbphy" for imx6sx
- * "fsl,imx7ulp-usbphy" for imx7ulp
- * "fsl,imx8dxl-usbphy" for imx8dxl
- "fsl,imx23-usbphy" is still a fallback for other strings
-- reg: Should contain registers location and length
-- interrupts: Should contain phy interrupt
-- fsl,anatop: phandle for anatop register, it is only for imx6 SoC series
-
-Optional properties:
-- fsl,tx-cal-45-dn-ohms: Integer [35-54]. Resistance (in ohms) of switchable
- high-speed trimming resistor connected in parallel with the 45 ohm resistor
- that terminates the DN output signal. Default: 45
-- fsl,tx-cal-45-dp-ohms: Integer [35-54]. Resistance (in ohms) of switchable
- high-speed trimming resistor connected in parallel with the 45 ohm resistor
- that terminates the DP output signal. Default: 45
-- fsl,tx-d-cal: Integer [79-119]. Current trimming value (as a percentage) of
- the 17.78mA TX reference current. Default: 100
-
-Example:
-usbphy1: usb-phy@20c9000 {
- compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
- reg = <0x020c9000 0x1000>;
- interrupts = <0 44 0x04>;
- fsl,anatop = <&anatop>;
-};
diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
index c4f8e6ffa5c3..6566353f1a02 100644
--- a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
@@ -43,6 +43,9 @@ properties:
"#phy-cells":
const: 0
+ power-domains:
+ maxItems: 1
+
vdda-phy-supply: true
vdda-pll-supply: true
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 62045dcfb20c..3d42ee3901a1 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -203,6 +203,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc8180x-qmp-pcie-phy
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
- qcom,sm8450-qmp-gen4x2-pcie-phy
@@ -224,7 +225,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sc8180x-qmp-pcie-phy
- qcom,sdm845-qmp-pcie-phy
- qcom,sdx55-qmp-pcie-phy
- qcom,sm8250-qmp-gen3x1-pcie-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml
index 80a5348dbfde..881ba543fd46 100644
--- a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml
@@ -160,6 +160,7 @@ allOf:
contains:
enum:
- qcom,msm8998-qmp-ufs-phy
+ - qcom,sc8180x-qmp-ufs-phy
- qcom,sdm845-qmp-ufs-phy
- qcom,sm6350-qmp-ufs-phy
- qcom,sm8150-qmp-ufs-phy
@@ -183,23 +184,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sc8180x-qmp-ufs-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
- - description: PCS_MISC
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,msm8996-qmp-ufs-phy
- qcom,sm6115-qmp-ufs-phy
then:
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
index e81a38281f8c..4c96dab5b9e3 100644
--- a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
@@ -23,14 +23,12 @@ properties:
- qcom,ipq8074-qmp-usb3-phy
- qcom,msm8996-qmp-usb3-phy
- qcom,msm8998-qmp-usb3-phy
- - qcom,qcm2290-qmp-usb3-phy
- qcom,sc7180-qmp-usb3-phy
- qcom,sc8180x-qmp-usb3-phy
- qcom,sdm845-qmp-usb3-phy
- qcom,sdm845-qmp-usb3-uni-phy
- qcom,sdx55-qmp-usb3-uni-phy
- qcom,sdx65-qmp-usb3-uni-phy
- - qcom,sm6115-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-uni-phy
- qcom,sm8250-qmp-usb3-phy
@@ -253,29 +251,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,qcm2290-qmp-usb3-phy
- - qcom,sm6115-qmp-usb3-phy
- then:
- properties:
- clocks:
- maxItems: 3
- clock-names:
- items:
- - const: cfg_ahb
- - const: ref
- - const: com_aux
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy_phy
- - const: phy
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,sdm845-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-phy
- qcom,sm8350-qmp-usb3-phy
@@ -318,12 +293,10 @@ allOf:
enum:
- qcom,ipq6018-qmp-usb3-phy
- qcom,ipq8074-qmp-usb3-phy
- - qcom,qcm2290-qmp-usb3-phy
- qcom,sc7180-qmp-usb3-phy
- qcom,sc8180x-qmp-usb3-phy
- qcom,sdx55-qmp-usb3-uni-phy
- qcom,sdx65-qmp-usb3-uni-phy
- - qcom,sm6115-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-uni-phy
- qcom,sm8250-qmp-usb3-phy
then:
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index 543c1a2811a5..95eecbaef05c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -18,13 +18,14 @@ properties:
oneOf:
- items:
- enum:
+ - qcom,ipq6018-qusb2-phy
- qcom,ipq8074-qusb2-phy
+ - qcom,ipq9574-qusb2-phy
- qcom,msm8953-qusb2-phy
- qcom,msm8996-qusb2-phy
- qcom,msm8998-qusb2-phy
- qcom,qcm2290-qusb2-phy
- qcom,sdm660-qusb2-phy
- - qcom,ipq6018-qusb2-phy
- qcom,sm4250-qusb2-phy
- qcom,sm6115-qusb2-phy
- items:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml
new file mode 100644
index 000000000000..b9107759b2a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,sa8775p-dwmac-sgmii-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SerDes/SGMII ethernet PHY controller
+
+maintainers:
+ - Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+description:
+ The SerDes PHY sits between the MAC and the external PHY and provides
+ separate Rx Tx lines.
+
+properties:
+ compatible:
+ const: qcom,sa8775p-dwmac-sgmii-phy
+
+ reg:
+ items:
+ - description: serdes
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: sgmi_ref
+
+ phy-supply:
+ description:
+ Phandle to a regulator that provides power to the PHY.
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,sa8775p-gcc.h>
+ serdes_phy: phy@8901000 {
+ compatible = "qcom,sa8775p-dwmac-sgmii-phy";
+ reg = <0x08901000 0xe10>;
+ clocks = <&gcc GCC_SGMI_CLKREF_EN>;
+ clock-names = "sgmi_ref";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml
index 0ef2c9b9d466..d30734338888 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml
@@ -61,6 +61,10 @@ properties:
power-domains:
maxItems: 1
+ orientation-switch:
+ description: Flag the port as possible handler of orientation switching
+ type: boolean
+
resets:
items:
- description: reset of phy block.
@@ -251,6 +255,8 @@ examples:
vdda-phy-supply = <&vdda_usb2_ss_1p2>;
vdda-pll-supply = <&vdda_usb2_ss_core>;
+ orientation-switch;
+
usb3-phy@200 {
reg = <0x200 0x128>,
<0x400 0x200>,
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index 94c0fab065a8..a1897a7606df 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -78,9 +78,9 @@ allOf:
then:
properties:
clocks:
- maxItems: 3
+ minItems: 3
clock-names:
- maxItems: 3
+ minItems: 3
else:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index 16fce1038285..f99fbbcd68fb 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -16,7 +16,11 @@ description:
properties:
compatible:
enum:
+ - qcom,ipq9574-qmp-usb3-phy
+ - qcom,qcm2290-qmp-usb3-phy
+ - qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
+ - qcom,sm6115-qmp-usb3-phy
reg:
maxItems: 1
@@ -25,11 +29,7 @@ properties:
maxItems: 4
clock-names:
- items:
- - const: aux
- - const: ref
- - const: com_aux
- - const: pipe
+ maxItems: 4
power-domains:
maxItems: 1
@@ -60,7 +60,6 @@ required:
- reg
- clocks
- clock-names
- - power-domains
- resets
- reset-names
- vdda-phy-supply
@@ -69,6 +68,60 @@ required:
- clock-output-names
- "#phy-cells"
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq9574-qmp-usb3-phy
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: aux
+ - const: ref
+ - const: cfg_ahb
+ - const: pipe
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qcm2290-qmp-usb3-phy
+ - qcom,sm6115-qmp-usb3-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ items:
+ - const: cfg_ahb
+ - const: ref
+ - const: com_aux
+ - const: pipe
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8280xp-qmp-usb3-uni-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ items:
+ - const: aux
+ - const: ref
+ - const: com_aux
+ - const: pipe
+ required:
+ - power-domains
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index 3cd5fc3e8fab..ef1c02d8ac88 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -60,6 +60,26 @@ properties:
description:
See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
+ orientation-switch:
+ description:
+ Flag the PHY as possible handler of USB Type-C orientation switching
+ type: boolean
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Output endpoint of the PHY
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Incoming endpoint from the USB controller
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Incoming endpoint from the DisplayPort controller
+
required:
- compatible
- reg
@@ -98,6 +118,37 @@ examples:
vdda-phy-supply = <&vreg_l9d>;
vdda-pll-supply = <&vreg_l4d>;
+ orientation-switch;
+
#clock-cells = <1>;
#phy-cells = <1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&typec_connector_ss>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ endpoint {
+ remote-endpoint = <&dwc3_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&mdss_dp_out>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
index aa97478dd016..f042d6af1594 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
@@ -13,7 +13,9 @@ if:
properties:
compatible:
contains:
- const: qcom,usb-hs-phy-apq8064
+ enum:
+ - qcom,usb-hs-phy-apq8064
+ - qcom,usb-hs-phy-msm8960
then:
properties:
resets:
@@ -40,6 +42,7 @@ properties:
- qcom,usb-hs-phy-apq8064
- qcom,usb-hs-phy-msm8226
- qcom,usb-hs-phy-msm8916
+ - qcom,usb-hs-phy-msm8960
- qcom,usb-hs-phy-msm8974
- const: qcom,usb-hs-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index a26524b7e7b7..0f200e3f97a9 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,usb-snps-femto-v2-phy
- items:
- enum:
+ - qcom,sa8775p-usb-hs-phy
- qcom,sc8280xp-usb-hs-phy
- const: qcom,usb-snps-hs-5nm-phy
- items:
diff --git a/Documentation/devicetree/bindings/power/reset/atmel,at91sam9260-shdwc.yaml b/Documentation/devicetree/bindings/power/reset/atmel,at91sam9260-shdwc.yaml
new file mode 100644
index 000000000000..f559a2cfd82e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/atmel,at91sam9260-shdwc.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/atmel,at91sam9260-shdwc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip AT91 SHDWC Shutdown Controller
+
+maintainers:
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description: |
+ Microchip AT91 SHDWC shutdown controller controls the power supplies VDDIO
+ and VDDCORE and the wake-up detection on debounced input lines.
+
+properties:
+ compatible:
+ enum:
+ - atmel,at91sam9260-shdwc
+ - atmel,at91sam9rl-shdwc
+ - atmel,at91sam9x5-shdwc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ atmel,wakeup-mode:
+ description: operation mode of the wakeup mode
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ none, high, low, any ]
+
+ atmel,wakeup-counter:
+ description: counter on wake-up 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+
+ atmel,wakeup-rtt-timer:
+ description: enable real-time timer wake-up
+ type: boolean
+
+ atmel,wakeup-rtc-timer:
+ description: enable real-time clock wake-up
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: atmel,at91sam9x5-shdwc
+ then:
+ properties:
+ atmel,wakeup-rtt-timer: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: atmel,at91sam9260-shdwc
+ then:
+ properties:
+ atmel,wakeup-rtc-timer: false
+
+additionalProperties: false
+
+examples:
+ - |
+ shdwc: poweroff@fffffd10 {
+ compatible = "atmel,at91sam9260-shdwc";
+ reg = <0xfffffd10 0x10>;
+ clocks = <&clk32k>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml b/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
new file mode 100644
index 000000000000..8c58e12cdb60
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/atmel,sama5d2-shdwc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip AT91 SAMA5D2 SHDWC Shutdown Controller
+
+maintainers:
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description: |
+ Microchip AT91 SHDWC shutdown controller controls the power supplies VDDIO
+ and VDDCORE and the wake-up detection on debounced input lines.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: microchip,sama7g5-shdwc
+ - const: syscon
+ - enum:
+ - atmel,sama5d2-shdwc
+ - microchip,sam9x60-shdwc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ debounce-delay-us:
+ description:
+ Minimum wake-up inputs debouncer period in microseconds. It is usually a
+ board-related property.
+
+ atmel,wakeup-rtc-timer:
+ description: enable real-time clock wake-up
+ type: boolean
+
+ atmel,wakeup-rtt-timer:
+ description: enable real-time timer wake-up
+ type: boolean
+
+patternProperties:
+ "^input@[0-15]$":
+ description:
+ Wake-up input nodes. These are usually described in the "board" part of
+ the Device Tree. Note also that input 0 is linked to the wake-up pin and
+ is frequently used.
+ type: object
+ properties:
+ reg:
+ description: contains the wake-up input index
+ minimum: 0
+ maximum: 15
+
+ atmel,wakeup-active-high:
+ description:
+ The corresponding wake-up input described by the child forces the
+ wake-up of the core power supply on a high level. The default is to
+ be active low.
+ type: boolean
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: atmel,sama5d2-shdwc
+ then:
+ properties:
+ atmel,wakeup-rtt-timer: false
+
+additionalProperties: false
+
+examples:
+ - |
+ shdwc: poweroff@f8048010 {
+ compatible = "atmel,sama5d2-shdwc";
+ reg = <0xf8048010 0x10>;
+ clocks = <&clk32k>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,wakeup-rtc-timer;
+ debounce-delay-us = <976>;
+
+ input@0 {
+ reg = <0>;
+ };
+
+ input@1 {
+ reg = <1>;
+ atmel,wakeup-active-high;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt
deleted file mode 100644
index 752d6126d5da..000000000000
--- a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-NVMEM reboot mode driver
-
-This driver gets reboot mode magic value from reboot-mode driver
-and stores it in a NVMEM cell named "reboot-mode". Then the bootloader
-can read it and take different action according to the magic
-value stored.
-
-Required properties:
-- compatible: should be "nvmem-reboot-mode".
-- nvmem-cells: A phandle to the reboot mode provided by a nvmem device.
-- nvmem-cell-names: Should be "reboot-mode".
-
-The rest of the properties should follow the generic reboot-mode description
-found in reboot-mode.txt
-
-Example:
- reboot-mode {
- compatible = "nvmem-reboot-mode";
- nvmem-cells = <&reboot_mode>;
- nvmem-cell-names = "reboot-mode";
-
- mode-normal = <0xAAAA5501>;
- mode-bootloader = <0xBBBB5500>;
- mode-recovery = <0xCCCC5502>;
- mode-test = <0xDDDD5503>;
- };
diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
new file mode 100644
index 000000000000..14a262bcbf7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/nvmem-reboot-mode.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic NVMEM reboot mode
+
+maintainers:
+ - Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+description:
+ This driver gets the reboot mode magic value from the reboot-mode driver
+ and stores it in the NVMEM cell named "reboot-mode". The bootloader can
+ then read it and take different action according to the value.
+
+properties:
+ compatible:
+ const: nvmem-reboot-mode
+
+ nvmem-cells:
+ description:
+ A phandle pointing to the nvmem-cells node where the vendor-specific
+ magic value representing the reboot mode is stored.
+ maxItems: 1
+
+ nvmem-cell-names:
+ items:
+ - const: reboot-mode
+
+patternProperties:
+ "^mode-.+":
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Vendor-specific mode value written to the mode register
+
+required:
+ - compatible
+ - nvmem-cells
+ - nvmem-cell-names
+
+additionalProperties: false
+
+examples:
+ - |
+ reboot-mode {
+ compatible = "nvmem-reboot-mode";
+ nvmem-cells = <&reboot_reason>;
+ nvmem-cell-names = "reboot-mode";
+ mode-recovery = <0x01>;
+ mode-bootloader = <0x02>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
index d96170eecbd2..5e460128b0d1 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
@@ -19,6 +19,7 @@ properties:
compatible:
enum:
- qcom,pm8916-pon
+ - qcom,pm8941-pon
- qcom,pms405-pon
- qcom,pm8998-pon
- qcom,pmk8350-pon
@@ -56,7 +57,6 @@ required:
unevaluatedProperties: false
allOf:
- - $ref: reboot-mode.yaml#
- if:
properties:
compatible:
@@ -66,12 +66,30 @@ allOf:
- qcom,pms405-pon
- qcom,pm8998-pon
then:
+ allOf:
+ - $ref: reboot-mode.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+ reg-names:
+ items:
+ - const: pon
+
+ # Special case for pm8941, which doesn't store reset mode
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,pm8941-pon
+ then:
properties:
reg:
maxItems: 1
reg-names:
items:
- const: pon
+
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
index 82f382a7ffb3..4fe9c3705265 100644
--- a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
@@ -68,11 +68,29 @@ properties:
Interrupt sends an active low, 256 μs pulse to host to report the charger
device status and faults.
+ ti,no-thermistor:
+ type: boolean
+ description: Indicates that no thermistor is connected to the TS pin
+
required:
- compatible
- reg
- monitored-battery
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,bq25600
+ - ti,bq25601
+ - ti,bq25600d
+ - ti,bq25601d
+ then:
+ properties:
+ ti,no-thermistor: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
new file mode 100644
index 000000000000..277c47e048b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom,pmi8998-charger.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/qcom,pmi8998-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PMI8998/PM660 Switch-Mode Battery Charger "2"
+
+maintainers:
+ - Caleb Connolly <caleb.connolly@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,pmi8998-charger
+ - qcom,pm660-charger
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 4
+
+ interrupt-names:
+ items:
+ - const: usb-plugin
+ - const: bat-ov
+ - const: wdog-bark
+ - const: usbin-icl-change
+
+ io-channels:
+ items:
+ - description: USB in current in uA
+ - description: USB in voltage in uV
+
+ io-channel-names:
+ items:
+ - const: usbin_i
+ - const: usbin_v
+
+ monitored-battery:
+ description: phandle to the simple-battery node
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - io-channels
+ - io-channel-names
+ - monitored-battery
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <4>;
+
+ charger@1000 {
+ compatible = "qcom,pmi8998-charger";
+ reg = <0x1000>;
+
+ interrupts = <0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "usb-plugin", "bat-ov", "wdog-bark", "usbin-icl-change";
+
+ io-channels = <&pmi8998_rradc 3>,
+ <&pmi8998_rradc 4>;
+ io-channel-names = "usbin_i",
+ "usbin_v";
+
+ monitored-battery = <&battery>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
index 756c16d1727d..b5d8888d03d2 100644
--- a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
@@ -26,7 +26,7 @@ required:
- compatible
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml
new file mode 100644
index 000000000000..5b3edd79a523
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-charger.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/richtek,rt5033-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT5033 PMIC Battery Charger
+
+maintainers:
+ - Jakob Hauser <jahau@rocketmail.com>
+
+description:
+ The battery charger of the multifunction device RT5033 has to be instantiated
+ under sub-node named "charger" using the following format.
+
+properties:
+ compatible:
+ const: richtek,rt5033-charger
+
+ monitored-battery:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ Phandle to the monitored battery according to battery.yaml. The battery
+ node needs to contain five parameters.
+
+ precharge-current-microamp:
+ Current of pre-charge mode. The pre-charge current levels are 350 mA
+ to 650 mA programmed by I2C per 100 mA.
+
+ constant-charge-current-max-microamp:
+ Current of fast-charge mode. The fast-charge current levels are 700 mA
+ to 2000 mA programmed by I2C per 100 mA.
+
+ charge-term-current-microamp:
+ This property is end of charge current. Its level ranges from 150 mA
+ to 600 mA. Between 150 mA and 300 mA in 50 mA steps, between 300 mA and
+ 600 mA in 100 mA steps.
+
+ precharge-upper-limit-microvolt:
+ Voltage of pre-charge mode. If the battery voltage is below the pre-charge
+ threshold voltage, the charger is in pre-charge mode with pre-charge
+ current. Its levels are 2.3 V to 3.8 V programmed by I2C per 0.1 V.
+
+ constant-charge-voltage-max-microvolt:
+ Battery regulation voltage of constant voltage mode. This voltage levels
+ from 3.65 V to 4.4 V by I2C per 0.025 V.
+
+ richtek,usb-connector:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to a USB connector according to usb-connector.yaml. The connector
+ should be a child of the extcon device.
+
+required:
+ - monitored-battery
+
+additionalProperties: false
+
+examples:
+ - |
+ charger {
+ compatible = "richtek,rt5033-charger";
+ monitored-battery = <&battery>;
+ richtek,usb-connector = <&usb_con>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
index 3ce648dd91bd..34b7959d6772 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
@@ -22,6 +22,7 @@ properties:
compatible:
oneOf:
- enum:
+ - x-powers,axp192-usb-power-supply
- x-powers,axp202-usb-power-supply
- x-powers,axp221-usb-power-supply
- x-powers,axp223-usb-power-supply
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
index b3da4e629341..c01dff3b7f84 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
@@ -43,6 +43,7 @@ properties:
- fsl,imx8mn-pwm
- fsl,imx8mp-pwm
- fsl,imx8mq-pwm
+ - fsl,imx8qxp-pwm
- const: fsl,imx27-pwm
reg:
@@ -61,6 +62,9 @@ properties:
interrupts:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml b/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
index 8e176ba7a525..0fbe8a6469eb 100644
--- a/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/mediatek,mt2712-pwm.yaml
@@ -22,6 +22,7 @@ properties:
- mediatek,mt7623-pwm
- mediatek,mt7628-pwm
- mediatek,mt7629-pwm
+ - mediatek,mt7981-pwm
- mediatek,mt7986-pwm
- mediatek,mt8183-pwm
- mediatek,mt8365-pwm
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
deleted file mode 100644
index f5753b3f79df..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-BCM2835 PWM controller (Raspberry Pi controller)
-
-Required properties:
-- compatible: should be "brcm,bcm2835-pwm"
-- reg: physical base address and length of the controller's registers
-- clocks: This clock defines the base clock frequency of the PWM hardware
- system, the period and the duty_cycle of the PWM signal is a multiple of
- the base period.
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
- the cells format.
-
-Examples:
-
-pwm@2020c000 {
- compatible = "brcm,bcm2835-pwm";
- reg = <0x2020c000 0x28>;
- clocks = <&clk_pwm>;
- #pwm-cells = <3>;
-};
-
-clocks {
- ....
- clk_pwm: pwm {
- compatible = "fixed-clock";
- reg = <3>;
- #clock-cells = <0>;
- clock-frequency = <9200000>;
- };
- ....
-};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml
new file mode 100644
index 000000000000..15e7fd98defc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-bcm2835.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835 PWM controller (Raspberry Pi controller)
+
+maintainers:
+ - Stefan Wahren <stefan.wahren@i2se.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm2835-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - "#pwm-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ pwm@2020c000 {
+ compatible = "brcm,bcm2835-pwm";
+ reg = <0x2020c000 0x28>;
+ clocks = <&clk_pwm>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm.yaml b/Documentation/devicetree/bindings/pwm/pwm.yaml
index 3c01f85029e5..abd9fa873354 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm.yaml
@@ -13,7 +13,7 @@ select: false
properties:
$nodename:
- pattern: "^pwm(@.*|-[0-9a-f])*$"
+ pattern: "^pwm(@.*|-([0-9]|[1-9][0-9]+))?$"
"#pwm-cells":
description:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
index 4c8097010687..6b6a302a175c 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
@@ -35,6 +35,7 @@ properties:
- renesas,pwm-r8a77980 # R-Car V3H
- renesas,pwm-r8a77990 # R-Car E3
- renesas,pwm-r8a77995 # R-Car D3
+ - renesas,pwm-r8a779a0 # R-Car V3U
- renesas,pwm-r8a779g0 # R-Car V4H
- const: renesas,pwm-rcar
diff --git a/Documentation/devicetree/bindings/regulator/adi,max77541-regulator.yaml b/Documentation/devicetree/bindings/regulator/adi,max77541-regulator.yaml
new file mode 100644
index 000000000000..9e36d5467b56
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/adi,max77541-regulator.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/adi,max77541-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Buck Converter for MAX77540/MAX77541
+
+maintainers:
+ - Okan Sahin <okan.sahin@analog.com>
+
+description: |
+ This is a part of device tree bindings for ADI MAX77540/MAX77541
+
+ The buck converter is represented as a sub-node of the PMIC node on the device tree.
+
+ The device has two buck regulators.
+ See also Documentation/devicetree/bindings/mfd/adi,max77541.yaml for
+ additional information and example.
+
+patternProperties:
+ "^buck[12]$":
+ type: object
+ $ref: regulator.yaml#
+ additionalProperties: false
+ description: |
+ Buck regulator.
+
+ properties:
+ regulator-name: true
+ regulator-always-on: true
+ regulator-boot-on: true
+ regulator-min-microvolt:
+ minimum: 300000
+ regulator-max-microvolt:
+ maximum: 5200000
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
index b1cff3adb21b..89c564dfa5db 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
@@ -14,6 +14,9 @@ description: |
regulator will be enabled in situations where the device is required to
provide power to the connected peripheral.
+allOf:
+ - $ref: regulator.yaml#
+
properties:
compatible:
enum:
@@ -25,8 +28,11 @@ properties:
required:
- compatible
+ - reg
+ - regulator-min-microamp
+ - regulator-max-microamp
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -36,6 +42,8 @@ examples:
pm8150b_vbus: usb-vbus-regulator@1100 {
compatible = "qcom,pm8150b-vbus-reg";
reg = <0x1100>;
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <3000000>;
};
};
...
diff --git a/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml b/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml
index 3100cb870170..76e8ca44906a 100644
--- a/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/amlogic,meson-mx-ao-arc.yaml
@@ -75,7 +75,7 @@ additionalProperties: false
examples:
- |
remoteproc@1c {
- compatible= "amlogic,meson8-ao-arc", "amlogic,meson-mx-ao-arc";
+ compatible = "amlogic,meson8-ao-arc", "amlogic,meson-mx-ao-arc";
reg = <0x1c 0x8>, <0x38 0x8>;
reg-names = "remap", "cpu";
resets = <&media_cpu_reset>;
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index 959a56f1b6c7..370af61d8f28 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -25,7 +25,14 @@ properties:
maxItems: 3
resets:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: mcu_rst
+ - const: hold_boot
+ minItems: 1
st,syscfg-holdboot:
description: remote processor reset hold boot
@@ -37,6 +44,7 @@ properties:
- description: The field mask of the hold boot
st,syscfg-tz:
+ deprecated: true
description:
Reference to the system configuration which holds the RCC trust zone mode
$ref: /schemas/types.yaml#/definitions/phandle-array
@@ -135,22 +143,48 @@ required:
- compatible
- reg
- resets
- - st,syscfg-holdboot
- - st,syscfg-tz
+
+allOf:
+ - if:
+ properties:
+ reset-names:
+ not:
+ contains:
+ const: hold_boot
+ then:
+ required:
+ - st,syscfg-holdboot
+ else:
+ properties:
+ st,syscfg-holdboot: false
additionalProperties: false
examples:
- |
#include <dt-bindings/reset/stm32mp1-resets.h>
- m4_rproc: m4@10000000 {
+ m4@10000000 {
compatible = "st,stm32mp1-m4";
reg = <0x10000000 0x40000>,
<0x30000000 0x40000>,
<0x38000000 0x10000>;
resets = <&rcc MCU_R>;
+ reset-names = "mcu_rst";
+ /* Hold boot managed using system config*/
st,syscfg-holdboot = <&rcc 0x10C 0x1>;
- st,syscfg-tz = <&rcc 0x000 0x1>;
+ st,syscfg-rsc-tbl = <&tamp 0x144 0xFFFFFFFF>;
+ st,syscfg-m4-state = <&tamp 0x148 0xFFFFFFFF>;
+ };
+ - |
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ m4@10000000 {
+ compatible = "st,stm32mp1-m4";
+ reg = <0x10000000 0x40000>,
+ <0x30000000 0x40000>,
+ <0x38000000 0x10000>;
+ /* Hold boot managed using SCMI reset controller */
+ resets = <&scmi MCU_R>, <&scmi MCU_HOLD_BOOT_R>;
+ reset-names = "mcu_rst", "hold_boot";
st,syscfg-rsc-tbl = <&tamp 0x144 0xFFFFFFFF>;
st,syscfg-m4-state = <&tamp 0x148 0xFFFFFFFF>;
};
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt
deleted file mode 100644
index ed836868dbf1..000000000000
--- a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt
+++ /dev/null
@@ -1,55 +0,0 @@
---------------------------------------------------------------------------
- = Zynq UltraScale+ MPSoC and Versal reset driver binding =
---------------------------------------------------------------------------
-The Zynq UltraScale+ MPSoC and Versal has several different resets.
-
-See Chapter 36 of the Zynq UltraScale+ MPSoC TRM (UG) for more information
-about zynqmp resets.
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required Properties:
-- compatible: "xlnx,zynqmp-reset" for Zynq UltraScale+ MPSoC platform
- "xlnx,versal-reset" for Versal platform
-- #reset-cells: Specifies the number of cells needed to encode reset
- line, should be 1
-
--------
-Example
--------
-
-firmware {
- zynqmp_firmware: zynqmp-firmware {
- compatible = "xlnx,zynqmp-firmware";
- method = "smc";
-
- zynqmp_reset: reset-controller {
- compatible = "xlnx,zynqmp-reset";
- #reset-cells = <1>;
- };
- };
-};
-
-Specifying reset lines connected to IP modules
-==============================================
-
-Device nodes that need access to reset lines should
-specify them as a reset phandle in their corresponding node as
-specified in reset.txt.
-
-For list of all valid reset indices for Zynq UltraScale+ MPSoC see
-<dt-bindings/reset/xlnx-zynqmp-resets.h>
-For list of all valid reset indices for Versal see
-<dt-bindings/reset/xlnx-versal-resets.h>
-
-Example:
-
-serdes: zynqmp_phy@fd400000 {
- ...
-
- resets = <&zynqmp_reset ZYNQMP_RESET_SATA>;
- reset-names = "sata_rst";
-
- ...
-};
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
new file mode 100644
index 000000000000..0d50f6a54af3
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/xlnx,zynqmp-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Zynq UltraScale+ MPSoC and Versal reset
+
+maintainers:
+ - Piyush Mehta <piyush.mehta@amd.com>
+
+description: |
+ The Zynq UltraScale+ MPSoC and Versal has several different resets.
+
+ The PS reset subsystem is responsible for handling the external reset
+ input to the device and that all internal reset requirements are met
+ for the system (as a whole) and for the functional units.
+
+ Please also refer to reset.txt in this directory for common reset
+ controller binding usage. Device nodes that need access to reset
+ lines should specify them as a reset phandle in their corresponding
+ node as specified in reset.txt.
+
+ For list of all valid reset indices for Zynq UltraScale+ MPSoC
+ <dt-bindings/reset/xlnx-zynqmp-resets.h>
+
+ For list of all valid reset indices for Versal
+ <dt-bindings/reset/xlnx-versal-resets.h>
+
+properties:
+ compatible:
+ enum:
+ - xlnx,zynqmp-reset
+ - xlnx,versal-reset
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 67bd239ead0b..38c0b5213736 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -25,6 +25,7 @@ description: |
allOf:
- $ref: /schemas/cpu.yaml#
+ - $ref: extensions.yaml
properties:
compatible:
@@ -82,25 +83,6 @@ properties:
description:
The blocksize in bytes for the Zicboz cache operations.
- riscv,isa:
- description:
- Identifies the specific RISC-V instruction set architecture
- supported by the hart. These are documented in the RISC-V
- User-Level ISA document, available from
- https://riscv.org/specifications/
-
- Due to revisions of the ISA specification, some deviations
- have arisen over time.
- Notably, riscv,isa was defined prior to the creation of the
- Zicntr, Zicsr, Zifencei and Zihpm extensions and thus "i"
- implies "zicntr_zicsr_zifencei_zihpm".
-
- While the isa strings in ISA specification are case
- insensitive, letters in the riscv,isa string must be all
- lowercase.
- $ref: /schemas/types.yaml#/definitions/string
- pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
-
# RISC-V has multiple properties for cache op block sizes as the sizes
# differ between individual CBO extensions
cache-op-block-size: false
@@ -139,8 +121,17 @@ properties:
DMIPS/MHz, relative to highest capacity-dmips-mhz
in the system.
+anyOf:
+ - required:
+ - riscv,isa
+ - required:
+ - riscv,isa-base
+
+dependencies:
+ riscv,isa-base: [ "riscv,isa-extensions" ]
+ riscv,isa-extensions: [ "riscv,isa-base" ]
+
required:
- - riscv,isa
- interrupt-controller
unevaluatedProperties: false
@@ -160,7 +151,9 @@ examples:
i-cache-sets = <128>;
i-cache-size = <16384>;
reg = <0>;
- riscv,isa = "rv64imac";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "c";
+
cpu_intc0: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -183,8 +176,10 @@ examples:
i-tlb-size = <32>;
mmu-type = "riscv,sv39";
reg = <1>;
- riscv,isa = "rv64imafdc";
tlb-split;
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c";
+
cpu_intc1: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
@@ -202,8 +197,10 @@ examples:
device_type = "cpu";
reg = <0>;
compatible = "riscv";
- riscv,isa = "rv64imafdc";
mmu-type = "riscv,sv48";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c";
+
interrupt-controller {
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
new file mode 100644
index 000000000000..cc1f546fdbdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -0,0 +1,250 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/riscv/extensions.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RISC-V ISA extensions
+
+maintainers:
+ - Paul Walmsley <paul.walmsley@sifive.com>
+ - Palmer Dabbelt <palmer@sifive.com>
+ - Conor Dooley <conor@kernel.org>
+
+description: |
+ RISC-V has a large number of extensions, some of which are "standard"
+ extensions, meaning they are ratified by RISC-V International, and others
+ are "vendor" extensions.
+ This document defines properties that indicate whether a hart supports a
+ given extension.
+
+ Once a standard extension has been ratified, no changes in behaviour can be
+ made without the creation of a new extension.
+ The properties for standard extensions therefore map to their originally
+ ratified states, with the exception of the I, Zicntr & Zihpm extensions.
+ See the "i" property for more information.
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: riscv
+
+properties:
+ riscv,isa:
+ description:
+ Identifies the specific RISC-V instruction set architecture
+ supported by the hart. These are documented in the RISC-V
+ User-Level ISA document, available from
+ https://riscv.org/specifications/
+
+ Due to revisions of the ISA specification, some deviations
+ have arisen over time.
+ Notably, riscv,isa was defined prior to the creation of the
+ Zicntr, Zicsr, Zifencei and Zihpm extensions and thus "i"
+ implies "zicntr_zicsr_zifencei_zihpm".
+
+ While the isa strings in ISA specification are case
+ insensitive, letters in the riscv,isa string must be all
+ lowercase.
+ $ref: /schemas/types.yaml#/definitions/string
+ pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
+ deprecated: true
+
+ riscv,isa-base:
+ description:
+ The base ISA implemented by this hart, as described by the 20191213
+ version of the unprivileged ISA specification.
+ enum:
+ - rv32i
+ - rv64i
+
+ riscv,isa-extensions:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ minItems: 1
+ description: Extensions supported by the hart.
+ items:
+ anyOf:
+ # single letter extensions, in canonical order
+ - const: i
+ description: |
+ The base integer instruction set, as ratified in the 20191213
+ version of the unprivileged ISA specification.
+
+ This does not include Chapter 10, "Counters", which was moved into
+ the Zicntr and Zihpm extensions after the ratification of the
+ 20191213 version of the unprivileged specification.
+
+ - const: m
+ description:
+ The standard M extension for integer multiplication and division, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: a
+ description:
+ The standard A extension for atomic instructions, as ratified in the
+ 20191213 version of the unprivileged ISA specification.
+
+ - const: f
+ description:
+ The standard F extension for single-precision floating point, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: d
+ description:
+ The standard D extension for double-precision floating-point, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: q
+ description:
+ The standard Q extension for quad-precision floating-point, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: c
+ description:
+ The standard C extension for compressed instructions, as ratified in
+ the 20191213 version of the unprivileged ISA specification.
+
+ - const: v
+ description:
+ The standard V extension for vector operations, as ratified
+ in-and-around commit 7a6c8ae ("Fix text that describes vfmv.v.f
+ encoding") of the riscv-v-spec.
+
+ - const: h
+ description:
+ The standard H extension for hypervisors as ratified in the 20191213
+ version of the privileged ISA specification.
+
+ # multi-letter extensions, sorted alphanumerically
+ - const: smaia
+ description: |
+ The standard Smaia supervisor-level extension for the advanced
+ interrupt architecture for machine-mode-visible csr and behavioural
+ changes to interrupts as frozen at commit ccbddab ("Merge pull
+ request #42 from riscv/jhauser-2023-RC4") of riscv-aia.
+
+ - const: ssaia
+ description: |
+ The standard Ssaia supervisor-level extension for the advanced
+ interrupt architecture for supervisor-mode-visible csr and
+ behavioural changes to interrupts as frozen at commit ccbddab
+ ("Merge pull request #42 from riscv/jhauser-2023-RC4") of riscv-aia.
+
+ - const: sscofpmf
+ description: |
+ The standard Sscofpmf supervisor-level extension for count overflow
+ and mode-based filtering as ratified at commit 01d1df0 ("Add ability
+ to manually trigger workflow. (#2)") of riscv-count-overflow.
+
+ - const: sstc
+ description: |
+ The standard Sstc supervisor-level extension for time compare as
+ ratified at commit 3f9ed34 ("Add ability to manually trigger
+ workflow. (#2)") of riscv-time-compare.
+
+ - const: svinval
+ description:
+ The standard Svinval supervisor-level extension for fine-grained
+ address-translation cache invalidation as ratified in the 20191213
+ version of the privileged ISA specification.
+
+ - const: svnapot
+ description:
+ The standard Svnapot supervisor-level extensions for napot
+ translation contiguity as ratified in the 20191213 version of the
+ privileged ISA specification.
+
+ - const: svpbmt
+ description:
+ The standard Svpbmt supervisor-level extensions for page-based
+ memory types as ratified in the 20191213 version of the privileged
+ ISA specification.
+
+ - const: zba
+ description: |
+ The standard Zba bit-manipulation extension for address generation
+ acceleration instructions as ratified at commit 6d33919 ("Merge pull
+ request #158 from hirooih/clmul-fix-loop-end-condition") of
+ riscv-bitmanip.
+
+ - const: zbb
+ description: |
+ The standard Zbb bit-manipulation extension for basic bit-manipulation
+ as ratified at commit 6d33919 ("Merge pull request #158 from
+ hirooih/clmul-fix-loop-end-condition") of riscv-bitmanip.
+
+ - const: zbc
+ description: |
+ The standard Zbc bit-manipulation extension for carry-less
+ multiplication as ratified at commit 6d33919 ("Merge pull request
+ #158 from hirooih/clmul-fix-loop-end-condition") of riscv-bitmanip.
+
+ - const: zbs
+ description: |
+ The standard Zbs bit-manipulation extension for single-bit
+ instructions as ratified at commit 6d33919 ("Merge pull request #158
+ from hirooih/clmul-fix-loop-end-condition") of riscv-bitmanip.
+
+ - const: zicbom
+ description:
+ The standard Zicbom extension for base cache management operations as
+ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
+
+ - const: zicbop
+ description:
+ The standard Zicbop extension for cache-block prefetch instructions
+ as ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of
+ riscv-CMOs.
+
+ - const: zicboz
+ description:
+ The standard Zicboz extension for cache-block zeroing as ratified
+ in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
+
+ - const: zicntr
+ description:
+ The standard Zicntr extension for base counters and timers, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: zicsr
+ description: |
+ The standard Zicsr extension for control and status register
+ instructions, as ratified in the 20191213 version of the
+ unprivileged ISA specification.
+
+ This does not include Chapter 10, "Counters", which documents
+ special case read-only CSRs, that were moved into the Zicntr and
+ Zihpm extensions after the ratification of the 20191213 version of
+ the unprivileged specification.
+
+ - const: zifencei
+ description:
+ The standard Zifencei extension for instruction-fetch fence, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: zihintpause
+ description:
+ The standard Zihintpause extension for pause hints, as ratified in
+ commit d8ab5c7 ("Zihintpause is ratified") of the riscv-isa-manual.
+
+ - const: zihpm
+ description:
+ The standard Zihpm extension for hardware performance counters, as
+ ratified in the 20191213 version of the unprivileged ISA
+ specification.
+
+ - const: ztso
+ description:
+ The standard Ztso extension for total store ordering, as ratified
+ in commit 2e5236 ("Ztso is now ratified.") of the
+ riscv-isa-manual.
+
+additionalProperties: true
+...
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl1208.txt b/Documentation/devicetree/bindings/rtc/isil,isl1208.txt
deleted file mode 100644
index 51f003006f04..000000000000
--- a/Documentation/devicetree/bindings/rtc/isil,isl1208.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-Intersil ISL1209/19 I2C RTC/Alarm chip with event in
-
-ISL12X9 have additional pins EVIN and #EVDET for tamper detection, while the
-ISL1208 and ISL1218 do not. They are all use the same driver with the bindings
-described here, with chip specific properties as noted.
-
-Required properties supported by the device:
- - "compatible": Should be one of the following:
- - "isil,isl1208"
- - "isil,isl1209"
- - "isil,isl1218"
- - "isil,isl1219"
- - "reg": I2C bus address of the device
-
-Optional properties:
- - "interrupt-names": list which may contains "irq" and "evdet"
- evdet applies to isl1209 and isl1219 only
- - "interrupts": list of interrupts for "irq" and "evdet"
- evdet applies to isl1209 and isl1219 only
- - "isil,ev-evienb": Enable or disable internal pull on EVIN pin
- Applies to isl1209 and isl1219 only
- Possible values are 0 and 1
- Value 0 enables internal pull-up on evin pin, 1 disables it.
- Default will leave the non-volatile configuration of the pullup
- as is.
-
-Example isl1219 node with #IRQ pin connected to SoC gpio1 pin12 and #EVDET pin
-connected to SoC gpio2 pin 24 and internal pull-up enabled in EVIN pin.
-
- isl1219: rtc@68 {
- compatible = "isil,isl1219";
- reg = <0x68>;
- interrupt-names = "irq", "evdet";
- interrupts-extended = <&gpio1 12 IRQ_TYPE_EDGE_FALLING>,
- <&gpio2 24 IRQ_TYPE_EDGE_FALLING>;
- isil,ev-evienb = <1>;
- };
-
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl1208.yaml b/Documentation/devicetree/bindings/rtc/isil,isl1208.yaml
new file mode 100644
index 000000000000..11f7378d4997
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/isil,isl1208.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/isil,isl1208.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intersil ISL1209/19 I2C RTC/Alarm chip with event in
+
+maintainers:
+ - Biju Das <biju.das.jz@bp.renesas.com>
+ - Trent Piepho <tpiepho@gmail.com>
+
+description:
+ ISL12X9 have additional pins EVIN and EVDET for tamper detection, while the
+ ISL1208 and ISL1218 do not.
+
+properties:
+ compatible:
+ enum:
+ - isil,isl1208
+ - isil,isl1209
+ - isil,isl1218
+ - isil,isl1219
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ description: |
+ Use xin, if connected to an external crystal.
+ Use clkin, if connected to an external clock signal.
+ enum:
+ - xin
+ - clkin
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: irq
+ - const: evdet
+
+ isil,ev-evienb:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description: |
+ Enable or disable internal pull on EVIN pin
+ Default will leave the non-volatile configuration of the pullup
+ as is.
+ <0> : Enables internal pull-up on evin pin
+ <1> : Disables internal pull-up on evin pin
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: rtc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - isil,isl1209
+ - isil,isl1219
+ then:
+ properties:
+ interrupts:
+ maxItems: 2
+ interrupt-names:
+ items:
+ - const: irq
+ - const: evdet
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: irq
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc_twi: rtc@6f {
+ compatible = "isil,isl1208";
+ reg = <0x6f>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/loongson,rtc.yaml b/Documentation/devicetree/bindings/rtc/loongson,rtc.yaml
new file mode 100644
index 000000000000..f89c1f660aee
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/loongson,rtc.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/loongson,rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson Real-Time Clock
+
+description:
+ The Loongson family chips use an on-chip counter 0 (Time Of Year
+ counter) as the RTC.
+
+maintainers:
+ - Binbin Zhou <zhoubinbin@loongson.cn>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - loongson,ls1b-rtc
+ - loongson,ls1c-rtc
+ - loongson,ls7a-rtc
+ - loongson,ls2k1000-rtc
+ - items:
+ - enum:
+ - loongson,ls2k2000-rtc
+ - loongson,ls2k0500-rtc
+ - const: loongson,ls7a-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rtc@1fe27800 {
+ compatible = "loongson,ls2k1000-rtc";
+ reg = <0x1fe27800 0x100>;
+
+ interrupt-parent = <&liointc1>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/rtc.yaml b/Documentation/devicetree/bindings/rtc/rtc.yaml
index c6fff5486fe6..efb66df82782 100644
--- a/Documentation/devicetree/bindings/rtc/rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/rtc.yaml
@@ -15,7 +15,7 @@ description: |
properties:
$nodename:
- pattern: "^rtc(@.*|-[0-9a-f])*$"
+ pattern: "^rtc(@.*|-([0-9]|[1-9][0-9]+))?$"
aux-voltage-chargeable:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index a3603e638c37..9af77f21bb7f 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -47,8 +47,6 @@ properties:
- isil,isl1218
# Intersil ISL12022 Real-time Clock
- isil,isl12022
- # Loongson-2K Socs/LS7A bridge Real-time Clock
- - loongson,ls2x-rtc
# Real Time Clock Module with I2C-Bus
- microcrystal,rv3029
# Real Time Clock
diff --git a/Documentation/devicetree/bindings/slimbus/slimbus.yaml b/Documentation/devicetree/bindings/slimbus/slimbus.yaml
index 22513fb7c59a..3b8cae9d1016 100644
--- a/Documentation/devicetree/bindings/slimbus/slimbus.yaml
+++ b/Documentation/devicetree/bindings/slimbus/slimbus.yaml
@@ -15,7 +15,7 @@ description:
properties:
$nodename:
- pattern: "^slim(@.*|-[0-9a-f])*$"
+ pattern: "^slim(@.*|-([0-9]|[1-9][0-9]+))?$"
"#address-cells":
const: 2
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
index 7ab8cfff18c1..96a7f1822022 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies, Inc. (QTI) Stats
maintainers:
- - Maulik Shah <mkshah@codeaurora.org>
+ - Maulik Shah <quic_mkshah@quicinc.com>
description:
Always On Processor/Resource Power Manager maintains statistics of the SoC
diff --git a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
index e4dba825ab11..fb44b89a754e 100644
--- a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
+++ b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,soundwire-v1.5.1
- qcom,soundwire-v1.6.0
- qcom,soundwire-v1.7.0
+ - qcom,soundwire-v2.0.0
reg:
maxItems: 1
@@ -80,18 +81,29 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-sinterval-low:
$ref: /schemas/types.yaml#/definitions/uint8-array
description:
- Sample interval low of each data port.
+ Sample interval (only lowest byte) of each data port.
Out ports followed by In ports. Used for Sample Interval calculation.
Value of 0xff indicates that this option is not implemented
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
+
+ qcom,ports-sinterval:
+ $ref: /schemas/types.yaml#/definitions/uint16-array
+ description:
+ Sample interval of each data port.
+ Out ports followed by In ports. Used for Sample Interval calculation.
+ Value of 0xffff indicates that this option is not implemented
+ or applicable for the respective data port.
+ More info in MIPI Alliance SoundWire 1.0 Specifications.
+ minItems: 3
+ maxItems: 16
qcom,ports-offset1:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -102,7 +114,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-offset2:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -113,7 +125,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-lane-control:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -124,7 +136,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-block-pack-mode:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -137,7 +149,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -154,7 +166,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -171,7 +183,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -187,7 +199,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -219,10 +231,15 @@ required:
- '#size-cells'
- qcom,dout-ports
- qcom,din-ports
- - qcom,ports-sinterval-low
- qcom,ports-offset1
- qcom,ports-offset2
+oneOf:
+ - required:
+ - qcom,ports-sinterval-low
+ - required:
+ - qcom,ports-sinterval
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml b/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
index fd6a7b51f571..95f42acd0c54 100644
--- a/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
+++ b/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
@@ -17,7 +17,7 @@ description:
properties:
$nodename:
- pattern: "^timestamp(@.*|-[0-9a-f])?$"
+ pattern: "^timestamp(@.*|-([0-9]|[1-9][0-9]+))?$"
"#timestamp-cells":
description:
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index 943dafb69529..bdfa86a0cc98 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -194,9 +194,8 @@ allOf:
# TODO: define clock bindings for qcom,msm8994-ufshc
- if:
- properties:
- qcom,ice:
- maxItems: 1
+ required:
+ - qcom,ice
then:
properties:
reg:
@@ -207,10 +206,10 @@ allOf:
else:
properties:
reg:
- minItems: 2
+ minItems: 1
maxItems: 2
clocks:
- minItems: 9
+ minItems: 8
maxItems: 11
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
index b26d26c2b023..782402800d4a 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
@@ -45,7 +45,9 @@ properties:
- fsl,vf610-usb
- const: fsl,imx27-usb
- items:
- - const: fsl,imx8dxl-usb
+ - enum:
+ - fsl,imx8dxl-usb
+ - fsl,imx8ulp-usb
- const: fsl,imx7ulp-usb
- const: fsl,imx6ul-usb
- items:
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index d3506090f8b1..0a5c98ea711d 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -53,6 +53,7 @@ properties:
- amlogic,meson8b-usb
- amlogic,meson-gxbb-usb
- amlogic,meson-g12a-usb
+ - amlogic,meson-a1-usb
- intel,socfpga-agilex-hsotg
- const: snps,dwc2
- const: amcc,dwc-otg
diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8qm-cdns3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8qm-cdns3.yaml
new file mode 100644
index 000000000000..ceb76394af60
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fsl,imx8qm-cdns3.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 NXP
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/fsl,imx8qm-cdns3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP iMX8QM Soc USB Controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8qm-usb3
+
+ reg:
+ items:
+ - description: Register set for iMX USB3 Platform Control
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ clocks:
+ items:
+ - description: Standby clock. Used during ultra low power states.
+ - description: USB bus clock for usb3 controller.
+ - description: AXI clock for AXI interface.
+ - description: ipg clock for register access.
+ - description: Core clock for usb3 controller.
+
+ clock-names:
+ items:
+ - const: lpm
+ - const: bus
+ - const: aclk
+ - const: ipg
+ - const: core
+
+ power-domains:
+ maxItems: 1
+
+# Required child node:
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ $ref: cdns,usb3.yaml#
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - clocks
+ - clock-names
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-lpcg.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ usb@5b110000 {
+ compatible = "fsl,imx8qm-usb3";
+ reg = <0x5b110000 0x10000>;
+ ranges;
+ clocks = <&usb3_lpcg IMX_LPCG_CLK_1>,
+ <&usb3_lpcg IMX_LPCG_CLK_0>,
+ <&usb3_lpcg IMX_LPCG_CLK_7>,
+ <&usb3_lpcg IMX_LPCG_CLK_4>,
+ <&usb3_lpcg IMX_LPCG_CLK_5>;
+ clock-names = "lpm", "bus", "aclk", "ipg", "core";
+ assigned-clocks = <&clk IMX_SC_R_USB_2 IMX_SC_PM_CLK_MST_BUS>;
+ assigned-clock-rates = <250000000>;
+ power-domains = <&pd IMX_SC_R_USB_2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usb@5b120000 {
+ compatible = "cdns,usb3";
+ reg = <0x5b120000 0x10000>, /* memory area for OTG/DRD registers */
+ <0x5b130000 0x10000>, /* memory area for HOST registers */
+ <0x5b140000 0x10000>; /* memory area for DEVICE registers */
+ reg-names = "otg", "xhci", "dev";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host", "peripheral", "otg", "wakeup";
+ phys = <&usb3_phy>;
+ phy-names = "cdns3,usb3-phy";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 9445764bd8de..b956bb5fada7 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -61,6 +61,7 @@ properties:
- ibm,476gtr-ehci
- nxp,lpc1850-ehci
- qca,ar7100-ehci
+ - rockchip,rk3588-ehci
- snps,hsdk-v1.0-ehci
- socionext,uniphier-ehci
- const: generic-ehci
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
index d06d1e7d8876..be268e23ca79 100644
--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -44,6 +44,7 @@ properties:
- hpe,gxp-ohci
- ibm,476gtr-ohci
- ingenic,jz4740-ohci
+ - rockchip,rk3588-ohci
- snps,hsdk-v1.0-ohci
- const: generic-ohci
- enum:
@@ -69,7 +70,7 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 4
description: |
In case the Renesas R-Car Gen3 SoCs:
- if a host only channel: first clock should be host.
@@ -147,6 +148,20 @@ allOf:
then:
properties:
transceiver: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3588-ohci
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ else:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 3
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
index 478214ab045e..a59d91243ac8 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
@@ -304,7 +304,7 @@ examples:
# Dual role switch with type-c
- |
usb@11201000 {
- compatible ="mediatek,mt8183-mtu3", "mediatek,mtu3";
+ compatible = "mediatek,mt8183-mtu3", "mediatek,mtu3";
reg = <0x11201000 0x2e00>, <0x11203e00 0x0100>;
reg-names = "mac", "ippc";
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
new file mode 100644
index 000000000000..ff3a1707ef57
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/microchip,usb5744.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip USB5744 4-port Hub Controller
+
+description:
+ Microchip's USB5744 SmartHubTM IC is a 4 port, SuperSpeed (SS)/Hi-Speed (HS),
+ low power, low pin count configurable and fully compliant with the USB 3.1
+ Gen 1 specification. The USB5744 also supports Full Speed (FS) and Low Speed
+ (LS) USB signaling, offering complete coverage of all defined USB operating
+ speeds. The new SuperSpeed hubs operate in parallel with the USB 2.0
+ controller, so 5 Gbps SuperSpeed data transfers are not affected by slower
+ USB 2.0 traffic.
+
+maintainers:
+ - Piyush Mehta <piyush.mehta@amd.com>
+ - Michal Simek <michal.simek@amd.com>
+
+properties:
+ compatible:
+ enum:
+ - usb424,2744
+ - usb424,5744
+ - microchip,usb5744
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ GPIO controlling the GRST# pin.
+
+ vdd-supply:
+ description:
+ VDD power supply to the hub
+
+ peer-hub:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to the peer hub on the controller.
+
+ i2c-bus:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle of an usb hub connected via i2c bus.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,usb5744
+ then:
+ properties:
+ reset-gpios: false
+ vdd-supply: false
+ peer-hub: false
+ i2c-bus: false
+ else:
+ $ref: /schemas/usb/usb-device.yaml
+ required:
+ - peer-hub
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c: i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hub: usb-hub@2d {
+ compatible = "microchip,usb5744";
+ reg = <0x2d>;
+ };
+ };
+
+ usb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 2.0 hub on port 1 */
+ hub_2_0: hub@1 {
+ compatible = "usb424,2744";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ i2c-bus = <&hub>;
+ reset-gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ };
+
+ /* 3.0 hub on port 2 */
+ hub_3_0: hub@2 {
+ compatible = "usb424,5744";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ i2c-bus = <&hub>;
+ reset-gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
new file mode 100644
index 000000000000..c0201da002f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/onnn,nb7vpq904m.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ON Semiconductor Type-C DisplayPort ALT Mode Linear Redriver
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - onnn,nb7vpq904m
+
+ reg:
+ maxItems: 1
+
+ vcc-supply:
+ description: power supply (1.8V)
+
+ enable-gpios: true
+
+ retimer-switch:
+ description: Flag the port as possible handle of SuperSpeed signals retiming
+ type: boolean
+
+ orientation-switch:
+ description: Flag the port as possible handler of orientation switching
+ type: boolean
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description: Super Speed (SS) Input endpoint from the Super-Speed PHY
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ An array of physical data lane indexes. Position determines how
+ lanes are connected to the redriver, It is assumed the same order
+ is kept on the other side of the redriver.
+ Lane number represents the following
+ - 0 is RX2 lane
+ - 1 is TX2 lane
+ - 2 is TX1 lane
+ - 3 is RX1 lane
+ The position determines the physical port of the redriver, in the
+ order A, B, C & D.
+ oneOf:
+ - items:
+ - const: 0
+ - const: 1
+ - const: 2
+ - const: 3
+ description: |
+ This is the lanes default layout
+ - Port A to RX2 lane
+ - Port B to TX2 lane
+ - Port C to TX1 lane
+ - Port D to RX1 lane
+ - items:
+ - const: 3
+ - const: 2
+ - const: 1
+ - const: 0
+ description: |
+ This is the USBRX2/USBTX2 and USBRX1/USBTX1 swapped lanes layout
+ - Port A to RX1 lane
+ - Port B to TX1 lane
+ - Port C to TX2 lane
+ - Port D to RX2 lane
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Sideband Use (SBU) AUX lines endpoint to the Type-C connector for the purpose of
+ handling altmode muxing and orientation switching.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ typec-mux@32 {
+ compatible = "onnn,nb7vpq904m";
+ reg = <0x32>;
+
+ vcc-supply = <&vreg_l15b_1p8>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ phy_con_ss: endpoint {
+ remote-endpoint = <&usb_phy_ss>;
+ data-lanes = <3 2 1 0>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ usb_con_sbu: endpoint {
+ remote-endpoint = <&typec_dp_aux>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index d84281926f10..ae24dac78d9a 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -17,12 +17,14 @@ properties:
- qcom,ipq6018-dwc3
- qcom,ipq8064-dwc3
- qcom,ipq8074-dwc3
+ - qcom,ipq9574-dwc3
- qcom,msm8953-dwc3
- qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
- qcom,qcm2290-dwc3
- qcom,qcs404-dwc3
+ - qcom,sa8775p-dwc3
- qcom,sc7180-dwc3
- qcom,sc7280-dwc3
- qcom,sc8280xp-dwc3
@@ -133,7 +135,6 @@ required:
- "#address-cells"
- "#size-cells"
- ranges
- - power-domains
- clocks
- clock-names
- interrupts
@@ -177,9 +178,11 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq9574-dwc3
- qcom,msm8953-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
+ - qcom,sa8775p-dwc3
- qcom,sc7180-dwc3
- qcom,sc7280-dwc3
- qcom,sdm670-dwc3
@@ -455,6 +458,25 @@ allOf:
- const: dm_hs_phy_irq
- const: ss_phy_irq
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sa8775p-dwc3
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 4
+ interrupt-names:
+ minItems: 3
+ items:
+ - const: pwr_event
+ - const: dp_hs_phy_irq
+ - const: dm_hs_phy_irq
+ - const: ss_phy_irq
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/qcom,pmic-typec.yaml b/Documentation/devicetree/bindings/usb/qcom,pmic-typec.yaml
new file mode 100644
index 000000000000..55df3129a0bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/qcom,pmic-typec.yaml
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/qcom,pmic-typec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PMIC based USB Type-C block
+
+maintainers:
+ - Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+description:
+ Qualcomm PMIC Type-C block
+
+properties:
+ compatible:
+ enum:
+ - qcom,pm8150b-typec
+
+ connector:
+ type: object
+ $ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
+
+ reg:
+ description: Type-C port and pdphy SPMI register base offsets
+ maxItems: 2
+
+ interrupts:
+ items:
+ - description: Type-C CC attach notification, VBUS error, tCCDebounce done
+ - description: Type-C VCONN powered
+ - description: Type-C CC state change
+ - description: Type-C VCONN over-current
+ - description: Type-C VBUS state change
+ - description: Type-C Attach/detach notification
+ - description: Type-C Legacy cable detect
+ - description: Type-C Try.Src Try.Snk state change
+ - description: Power Domain Signal TX - HardReset or CableReset signal TX
+ - description: Power Domain Signal RX - HardReset or CableReset signal RX
+ - description: Power Domain TX complete
+ - description: Power Domain RX complete
+ - description: Power Domain TX fail
+ - description: Power Domain TX message discard
+ - description: Power Domain RX message discard
+ - description: Power Domain Fast Role Swap event
+
+ interrupt-names:
+ items:
+ - const: or-rid-detect-change
+ - const: vpd-detect
+ - const: cc-state-change
+ - const: vconn-oc
+ - const: vbus-change
+ - const: attach-detach
+ - const: legacy-cable-detect
+ - const: try-snk-src-detect
+ - const: sig-tx
+ - const: sig-rx
+ - const: msg-tx
+ - const: msg-rx
+ - const: msg-tx-failed
+ - const: msg-tx-discarded
+ - const: msg-rx-discarded
+ - const: fr-swap
+
+ vdd-vbus-supply:
+ description: VBUS power supply.
+
+ vdd-pdphy-supply:
+ description: VDD regulator supply to the PDPHY.
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Contains a port which produces data-role switching messages.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - vdd-vbus-supply
+ - vdd-pdphy-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/usb/pd.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8150b_typec: typec@1500 {
+ compatible = "qcom,pm8150b-typec";
+ reg = <0x1500>,
+ <0x1700>;
+
+ interrupts = <0x2 0x15 0x00 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x15 0x01 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x15 0x02 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x15 0x03 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x15 0x04 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x15 0x05 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x15 0x06 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x15 0x07 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x00 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x01 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x02 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x03 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x04 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x05 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x06 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x07 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "or-rid-detect-change",
+ "vpd-detect",
+ "cc-state-change",
+ "vconn-oc",
+ "vbus-change",
+ "attach-detach",
+ "legacy-cable-detect",
+ "try-snk-src-detect",
+ "sig-tx",
+ "sig-rx",
+ "msg-tx",
+ "msg-rx",
+ "msg-tx-failed",
+ "msg-tx-discarded",
+ "msg-rx-discarded",
+ "fr-swap";
+
+ vdd-vbus-supply = <&pm8150b_vbus>;
+ vdd-pdphy-supply = <&vreg_l2a_3p1>;
+
+ connector {
+ compatible = "usb-c-connector";
+
+ power-role = "source";
+ data-role = "dual";
+ self-powered;
+
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_USB_COMM | PDO_FIXED_DATA_SWAP)>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ pmic_typec_mux_out: endpoint {
+ remote-endpoint = <&usb_phy_typec_mux_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ pmic_typec_role_switch_out: endpoint {
+ remote-endpoint = <&usb_role_switch_in>;
+ };
+ };
+ };
+ };
+ };
+ };
+
+ usb {
+ dr_mode = "otg";
+ usb-role-switch;
+ port {
+ usb_role_switch_in: endpoint {
+ remote-endpoint = <&pmic_typec_role_switch_out>;
+ };
+ };
+ };
+
+ usb-phy {
+ orientation-switch;
+ port {
+ usb_phy_typec_mux_in: endpoint {
+ remote-endpoint = <&pmic_typec_mux_out>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
index 4f7625955ccc..a696f23730d3 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -44,15 +44,15 @@ properties:
It's either a single common DWC3 interrupt (dwc_usb3) or individual
interrupts for the host, gadget and DRD modes.
minItems: 1
- maxItems: 3
+ maxItems: 4
interrupt-names:
minItems: 1
- maxItems: 3
+ maxItems: 4
oneOf:
- const: dwc_usb3
- items:
- enum: [host, peripheral, otg]
+ enum: [host, peripheral, otg, wakeup]
clocks:
description:
diff --git a/Documentation/devicetree/bindings/usb/starfive,jh7110-usb.yaml b/Documentation/devicetree/bindings/usb/starfive,jh7110-usb.yaml
new file mode 100644
index 000000000000..24aa9c10d6ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/starfive,jh7110-usb.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/starfive,jh7110-usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive JH7110 wrapper module for the Cadence USBSS-DRD controller
+
+maintainers:
+ - Minda Chen <minda.chen@starfivetech.com>
+
+properties:
+ compatible:
+ const: starfive,jh7110-usb
+
+ ranges: true
+
+ starfive,stg-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to System Register Controller stg_syscon node.
+ - description: dr mode register offset of STG_SYSCONSAIF__SYSCFG register for USB.
+ description:
+ The phandle to System Register Controller syscon node and the offset
+ of STG_SYSCONSAIF__SYSCFG register for USB.
+
+ dr_mode:
+ enum: [host, otg, peripheral]
+
+ "#address-cells":
+ enum: [1, 2]
+
+ "#size-cells":
+ enum: [1, 2]
+
+ clocks:
+ items:
+ - description: link power management clock
+ - description: standby clock
+ - description: APB clock
+ - description: AXI clock
+ - description: UTMI APB clock
+
+ clock-names:
+ items:
+ - const: lpm
+ - const: stb
+ - const: apb
+ - const: axi
+ - const: utmi_apb
+
+ resets:
+ items:
+ - description: Power up reset
+ - description: APB clock reset
+ - description: AXI clock reset
+ - description: UTMI APB clock reset
+
+ reset-names:
+ items:
+ - const: pwrup
+ - const: apb
+ - const: axi
+ - const: utmi_apb
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ $ref: cdns,usb3.yaml#
+ description: Required child node
+
+required:
+ - compatible
+ - ranges
+ - starfive,stg-syscon
+ - '#address-cells'
+ - '#size-cells'
+ - dr_mode
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ usb@10100000 {
+ compatible = "starfive,jh7110-usb";
+ ranges = <0x0 0x10100000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ starfive,stg-syscon = <&stg_syscon 0x4>;
+ clocks = <&syscrg 4>,
+ <&stgcrg 5>,
+ <&stgcrg 1>,
+ <&stgcrg 3>,
+ <&stgcrg 2>;
+ clock-names = "lpm", "stb", "apb", "axi", "utmi_apb";
+ resets = <&stgcrg 10>,
+ <&stgcrg 8>,
+ <&stgcrg 7>,
+ <&stgcrg 9>;
+ reset-names = "pwrup", "apb", "axi", "utmi_apb";
+ dr_mode = "host";
+
+ usb@0 {
+ compatible = "cdns,usb3";
+ reg = <0x0 0x10000>,
+ <0x10000 0x10000>,
+ <0x20000 0x10000>;
+ reg-names = "otg", "xhci", "dev";
+ interrupts = <100>, <108>, <110>;
+ interrupt-names = "host", "peripheral", "otg";
+ maximum-speed = "super-speed";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml b/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
index d25fc708e32c..fec5651f5602 100644
--- a/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
@@ -92,7 +92,7 @@ examples:
usb@31100000 {
compatible = "snps,dwc3";
- reg =<0x00 0x31100000 0x00 0x50000>;
+ reg = <0x00 0x31100000 0x00 0x50000>;
interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
<GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
interrupt-names = "host", "peripheral";
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.yaml b/Documentation/devicetree/bindings/usb/usb251xb.yaml
index 4d1530816817..ac5b99710332 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.yaml
+++ b/Documentation/devicetree/bindings/usb/usb251xb.yaml
@@ -231,7 +231,7 @@ properties:
power-on sequence to a port until the port has adequate power.
swap-dx-lanes:
- $ref: /schemas/types.yaml#/definitions/uint8-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Specifies the ports which will swap the differential-pair (D+/D-),
default is not-swapped.
diff --git a/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt b/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt
deleted file mode 100644
index 2b86a00e351d..000000000000
--- a/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Broadcom Kona Family Watchdog Timer
------------------------------------
-
-This watchdog timer is used in the following Broadcom SoCs:
- BCM11130, BCM11140, BCM11351, BCM28145, BCM28155
-
-Required properties:
- - compatible = "brcm,bcm11351-wdt", "brcm,kona-wdt";
- - reg: memory address & range
-
-Example:
- watchdog@35002f40 {
- compatible = "brcm,bcm11351-wdt", "brcm,kona-wdt";
- reg = <0x35002f40 0x6c>;
- };
diff --git a/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.yaml b/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.yaml
new file mode 100644
index 000000000000..3d4403b41cbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/brcm,kona-wdt.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/brcm,kona-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Kona Family Watchdog Timer
+
+description: |
+ This watchdog timer is used in the following Broadcom SoCs:
+ BCM11130, BCM11140, BCM11351, BCM28145, BCM28155
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: brcm,bcm11351-wdt
+ - const: brcm,kona-wdt
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@35002f40 {
+ compatible = "brcm,bcm11351-wdt", "brcm,kona-wdt";
+ reg = <0x35002f40 0x6c>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
deleted file mode 100644
index 750a87657448..000000000000
--- a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Zynq Watchdog Device Tree Bindings
--------------------------------------------
-
-Required properties:
-- compatible : Should be "cdns,wdt-r1p2".
-- clocks : This is pclk (APB clock).
-- interrupts : This is wd_irq - watchdog timeout interrupt.
-
-Optional properties
-- reset-on-timeout : If this property exists, then a reset is done
- when watchdog times out.
-- timeout-sec : Watchdog timeout value (in seconds).
-
-Example:
- watchdog@f8005000 {
- compatible = "cdns,wdt-r1p2";
- clocks = <&clkc 45>;
- interrupt-parent = <&intc>;
- interrupts = <0 9 1>;
- reg = <0xf8005000 0x1000>;
- reset-on-timeout;
- timeout-sec = <10>;
- };
diff --git a/Documentation/devicetree/bindings/watchdog/cdns,wdt-r1p2.yaml b/Documentation/devicetree/bindings/watchdog/cdns,wdt-r1p2.yaml
new file mode 100644
index 000000000000..3c17c5883bce
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/cdns,wdt-r1p2.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/cdns,wdt-r1p2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence watchdog timer controller
+
+maintainers:
+ - Neeli Srinivas <srinivas.neeli@amd.com>
+
+description:
+ The cadence watchdog timer is used to detect and recover from
+ system malfunctions. This watchdog contains 24 bit counter and
+ a programmable reset period. The timeout period varies from 1 ms
+ to 30 seconds while using a 100Mhz clock.
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ enum:
+ - cdns,wdt-r1p2
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reset-on-timeout:
+ type: boolean
+ description: |
+ If this property exists, then a reset is done when watchdog
+ times out.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ watchdog@f8005000 {
+ compatible = "cdns,wdt-r1p2";
+ reg = <0xf8005000 0x1000>;
+ clocks = <&clkc 45>;
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_EDGE_RISING>;
+ reset-on-timeout;
+ timeout-sec = <10>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/watchdog/watchdog.yaml b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
index 519b48889eb1..f0a584af1223 100644
--- a/Documentation/devicetree/bindings/watchdog/watchdog.yaml
+++ b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
@@ -17,11 +17,11 @@ description: |
select:
properties:
$nodename:
- pattern: "^watchdog(@.*|-[0-9a-f])?$"
+ pattern: "^watchdog(@.*|-([0-9]|[1-9][0-9]+))?$"
properties:
$nodename:
- pattern: "^(timer|watchdog)(@.*|-[0-9a-f])?$"
+ pattern: "^(timer|watchdog)(@.*|-([0-9]|[1-9][0-9]+))?$"
timeout-sec:
description:
diff --git a/Documentation/devicetree/bindings/watchdog/xlnx,versal-wwdt.yaml b/Documentation/devicetree/bindings/watchdog/xlnx,versal-wwdt.yaml
new file mode 100644
index 000000000000..14b069599740
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/xlnx,versal-wwdt.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/xlnx,versal-wwdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Versal window watchdog timer controller
+
+maintainers:
+ - Neeli Srinivas <srinivas.neeli@amd.com>
+
+description:
+ Versal watchdog intellectual property uses window watchdog mode.
+ Window watchdog timer(WWDT) contains closed(first) and open(second)
+ window with 32 bit width. Write to the watchdog timer within
+ predefined window periods of time. This means a period that is not
+ too soon and a period that is not too late. The WWDT has to be
+ restarted within the open window time. If software tries to restart
+ WWDT outside of the open window time period, it generates a reset.
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ enum:
+ - xlnx,versal-wwdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@fd4d0000 {
+ compatible = "xlnx,versal-wwdt";
+ reg = <0xfd4d0000 0x10000>;
+ clocks = <&clock25>;
+ timeout-sec = <30>;
+ };
+...
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 4249eb4239e0..8be086b3f829 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -364,6 +364,7 @@ MEM
devm_kmalloc_array()
devm_kmemdup()
devm_krealloc()
+ devm_krealloc_array()
devm_kstrdup()
devm_kstrdup_const()
devm_kvasprintf()
diff --git a/Documentation/driver-api/media/camera-sensor.rst b/Documentation/driver-api/media/camera-sensor.rst
index c7d4891bd24e..93f4f2536c25 100644
--- a/Documentation/driver-api/media/camera-sensor.rst
+++ b/Documentation/driver-api/media/camera-sensor.rst
@@ -151,3 +151,25 @@ used to obtain device's power state after the power state transition:
The function returns a non-zero value if it succeeded getting the power count or
runtime PM was disabled, in either of which cases the driver may proceed to
access the device.
+
+Rotation, orientation and flipping
+----------------------------------
+
+Some systems have the camera sensor mounted upside down compared to its natural
+mounting rotation. In such cases, drivers shall expose the information to
+userspace with the :ref:`V4L2_CID_CAMERA_SENSOR_ROTATION
+<v4l2-camera-sensor-rotation>` control.
+
+Sensor drivers shall also report the sensor's mounting orientation with the
+:ref:`V4L2_CID_CAMERA_SENSOR_ORIENTATION <v4l2-camera-sensor-orientation>`.
+
+Use ``v4l2_fwnode_device_parse()`` to obtain rotation and orientation
+information from system firmware and ``v4l2_ctrl_new_fwnode_properties()`` to
+register the appropriate controls.
+
+Sensor drivers that have any vertical or horizontal flips embedded in the
+register programming sequences shall initialize the V4L2_CID_HFLIP and
+V4L2_CID_VFLIP controls with the values programmed by the register sequences.
+The default values of these controls shall be 0 (disabled). Especially these
+controls shall not be inverted, independently of the sensor's mounting
+rotation.
diff --git a/Documentation/driver-api/ntb.rst b/Documentation/driver-api/ntb.rst
index 11577c2105c5..e991d92b8b1d 100644
--- a/Documentation/driver-api/ntb.rst
+++ b/Documentation/driver-api/ntb.rst
@@ -207,9 +207,9 @@ The MSI test client serves to test and debug the MSI library which
allows for passing MSI interrupts across NTB memory windows. The
test client is interacted with through the debugfs filesystem:
-* *debugfs*/ntb\_tool/*hw*/
+* *debugfs*/ntb\_msi\_test/*hw*/
A directory in debugfs will be created for each
- NTB device probed by the tool. This directory is shortened to *hw*
+ NTB device probed by the msi test. This directory is shortened to *hw*
below.
* *hw*/port
This file describes the local port number
diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst
index 3abe84225613..1f087e502ca6 100644
--- a/Documentation/fault-injection/provoke-crashes.rst
+++ b/Documentation/fault-injection/provoke-crashes.rst
@@ -29,7 +29,7 @@ recur_count
cpoint_name
Where in the kernel to trigger the action. It can be
one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
- FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ, or DIRECT.
+ FS_SUBMIT_BH, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ, or DIRECT.
cpoint_type
Indicates the action to be taken on hitting the crash point.
diff --git a/Documentation/filesystems/dlmfs.rst b/Documentation/filesystems/dlmfs.rst
index 28dd41a63be2..7e2b1fd471d7 100644
--- a/Documentation/filesystems/dlmfs.rst
+++ b/Documentation/filesystems/dlmfs.rst
@@ -12,7 +12,7 @@ dlmfs is built with OCFS2 as it requires most of its infrastructure.
:Project web page: http://ocfs2.wiki.kernel.org
:Tools web page: https://github.com/markfasheh/ocfs2-tools
-:OCFS2 mailing lists: https://oss.oracle.com/projects/ocfs2/mailman/
+:OCFS2 mailing lists: https://subspace.kernel.org/lists.linux.dev.html
All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index c57745375edb..9359978a5af2 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -351,6 +351,22 @@ age_extent_cache Enable an age extent cache based on rb-tree. It records
data block update frequency of the extent per inode, in
order to provide better temperature hints for data block
allocation.
+errors=%s Specify f2fs behavior on critical errors. This supports modes:
+ "panic", "continue" and "remount-ro", respectively, trigger
+ panic immediately, continue without doing anything, and remount
+ the partition in read-only mode. By default it uses "continue"
+ mode.
+ ====================== =============== =============== ========
+ mode continue remount-ro panic
+ ====================== =============== =============== ========
+ access ops normal noraml N/A
+ syscall errors -EIO -EROFS N/A
+ mount option rw ro N/A
+ pending dir write keep keep N/A
+ pending non-dir write drop keep N/A
+ pending node write drop keep N/A
+ pending meta write keep keep N/A
+ ====================== =============== =============== ========
======================== ============================================================
Debugfs Entries
diff --git a/Documentation/filesystems/ocfs2.rst b/Documentation/filesystems/ocfs2.rst
index 42ca9a3d4c6e..5827062995cb 100644
--- a/Documentation/filesystems/ocfs2.rst
+++ b/Documentation/filesystems/ocfs2.rst
@@ -14,7 +14,7 @@ get "mount.ocfs2" and "ocfs2_hb_ctl".
Project web page: http://ocfs2.wiki.kernel.org
Tools git tree: https://github.com/markfasheh/ocfs2-tools
-OCFS2 mailing lists: https://oss.oracle.com/projects/ocfs2/mailman/
+OCFS2 mailing lists: https://subspace.kernel.org/lists.linux.dev.html
All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
index ab9e850e8fe0..e76e68ccf718 100644
--- a/Documentation/i2c/busses/i2c-i801.rst
+++ b/Documentation/i2c/busses/i2c-i801.rst
@@ -46,7 +46,7 @@ Supported adapters:
* Intel Emmitsburg (PCH)
* Intel Alder Lake (PCH)
* Intel Raptor Lake (PCH)
- * Intel Meteor Lake (SOC)
+ * Intel Meteor Lake (SOC and PCH)
Datasheets: Publicly available at the Intel website
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index 2a22ddb1b848..bd906407e307 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -150,6 +150,12 @@ the UTS_MACHINE variable, and on some architectures also the kernel config.
The value of KBUILD_DEBARCH is assumed (not checked) to be a valid Debian
architecture.
+KDOCFLAGS
+---------
+Specify extra (warning/error) flags for kernel-doc checks during the build,
+see scripts/kernel-doc for which flags are supported. Note that this doesn't
+(currently) apply to documentation builds.
+
ARCH
----
Set ARCH to the architecture to be built.
diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst
index ce57254cb871..3ade16c18328 100644
--- a/Documentation/leds/index.rst
+++ b/Documentation/leds/index.rst
@@ -17,6 +17,7 @@ LEDs
uleds
leds-blinkm
+ leds-cht-wcove
leds-el15203000
leds-lm3556
leds-lp3944
diff --git a/Documentation/leds/leds-cht-wcove.rst b/Documentation/leds/leds-cht-wcove.rst
new file mode 100644
index 000000000000..5ec7cb60c4aa
--- /dev/null
+++ b/Documentation/leds/leds-cht-wcove.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================================
+Kernel driver for Intel Cherry Trail Whiskey Cove PMIC LEDs
+===========================================================
+
+/sys/class/leds/<led>/hw_pattern
+--------------------------------
+
+Specify a hardware pattern for the Whiskey Cove PMIC LEDs.
+
+The only supported pattern is hardware breathing mode::
+
+ "0 2000 1 2000"
+
+ ^
+ |
+ Max-| ---
+ | / \
+ | / \
+ | / \ /
+ | / \ /
+ Min-|- ---
+ |
+ 0------2------4--> time (sec)
+
+The rise and fall times must be the same value.
+Supported values are 2000, 1000, 500 and 250 for
+breathing frequencies of 1/4, 1/2, 1 and 2 Hz.
+
+The set pattern only controls the timing. For max brightness the last
+set brightness is used and the max brightness can be changed
+while breathing by writing the brightness attribute.
+
+This is just like how blinking works in the LED subsystem,
+for both sw and hw blinking the brightness can also be changed
+while blinking. Breathing on this hw really is just a variant
+mode of blinking.
diff --git a/Documentation/leds/well-known-leds.txt b/Documentation/leds/well-known-leds.txt
index e9c30dc75884..67b44704801f 100644
--- a/Documentation/leds/well-known-leds.txt
+++ b/Documentation/leds/well-known-leds.txt
@@ -58,6 +58,7 @@ LEDs on notebook body, indicating that sound input / output is muted.
* System notification
+Good: "rgb:status"
Legacy: "status-led:{red,green,blue}" (Motorola Droid 4)
Legacy: "lp5523:{r,g,b}" (Nokia N900)
@@ -65,7 +66,7 @@ Phones usually have multi-color status LED.
* Power management
-Good: "platform:*:charging" (allwinner sun50i)
+Good: "platform:*:charging" (allwinner sun50i, leds-cht-wcove)
* Screen
diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst
index 756be15a49a4..ecc40fbbcfb8 100644
--- a/Documentation/misc-devices/index.rst
+++ b/Documentation/misc-devices/index.rst
@@ -28,5 +28,6 @@ fit into other categories.
oxsemi-tornado
pci-endpoint-test
spear-pcie-gadget
+ tps6594-pfsm
uacce
xilinx_sdfec
diff --git a/Documentation/misc-devices/tps6594-pfsm.rst b/Documentation/misc-devices/tps6594-pfsm.rst
new file mode 100644
index 000000000000..4ada37ccdcba
--- /dev/null
+++ b/Documentation/misc-devices/tps6594-pfsm.rst
@@ -0,0 +1,87 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Texas Instruments TPS6594 PFSM driver
+=====================================
+
+Author: Julien Panis (jpanis@baylibre.com)
+
+Overview
+========
+
+Strictly speaking, PFSM (Pre-configurable Finite State Machine) is not
+hardware. It is a piece of code.
+
+The TPS6594 PMIC (Power Management IC) integrates a state machine which
+manages operational modes. Depending on the current operational mode,
+some voltage domains remain energized while others can be off.
+
+The PFSM driver can be used to trigger transitions between configured
+states. It also provides R/W access to the device registers.
+
+Supported chips
+---------------
+
+- tps6594-q1
+- tps6593-q1
+- lp8764-q1
+
+Driver location
+===============
+
+drivers/misc/tps6594-pfsm.c
+
+Driver type definitions
+=======================
+
+include/uapi/linux/tps6594_pfsm.h
+
+Driver IOCTLs
+=============
+
+:c:macro::`PMIC_GOTO_STANDBY`
+All device resources are powered down. The processor is off, and
+no voltage domains are energized.
+
+:c:macro::`PMIC_GOTO_LP_STANDBY`
+The digital and analog functions of the PMIC, which are not
+required to be always-on, are turned off (low-power).
+
+:c:macro::`PMIC_UPDATE_PGM`
+Triggers a firmware update.
+
+:c:macro::`PMIC_SET_ACTIVE_STATE`
+One of the operational modes.
+The PMICs are fully functional and supply power to all PDN loads.
+All voltage domains are energized in both MCU and Main processor
+sections.
+
+:c:macro::`PMIC_SET_MCU_ONLY_STATE`
+One of the operational modes.
+Only the power resources assigned to the MCU Safety Island are on.
+
+:c:macro::`PMIC_SET_RETENTION_STATE`
+One of the operational modes.
+Depending on the triggers set, some DDR/GPIO voltage domains can
+remain energized, while all other domains are off to minimize
+total system power.
+
+Driver usage
+============
+
+See available PFSMs::
+
+ # ls /dev/pfsm*
+
+Dump the registers of pages 0 and 1::
+
+ # hexdump -C /dev/pfsm-0-0x48
+
+See PFSM events::
+
+ # cat /proc/interrupts
+
+Userspace code example
+----------------------
+
+samples/pfsm/pfsm-wakeup.c
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 247c6c4127e9..1cc35de336a4 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -433,6 +433,15 @@ start N bytes into the buffer leaving the first N bytes for the
application to use. The final option is the flags field, but it will
be dealt with in separate sections for each UMEM flag.
+SO_BINDTODEVICE setsockopt
+--------------------------
+
+This is a generic SOL_SOCKET option that can be used to tie AF_XDP
+socket to a particular network interface. It is useful when a socket
+is created by a privileged process and passed to a non-privileged one.
+Once the option is set, kernel will refuse attempts to bind that socket
+to a different interface. Updating the value requires CAP_NET_RAW.
+
XDP_STATISTICS getsockopt
-------------------------
diff --git a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
index 4118384cf8eb..289c146a8291 100644
--- a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
+++ b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -190,8 +190,7 @@ MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
3. Userspace configuration
==========================
-rmnet userspace configuration is done through netlink library librmnetctl
-and command line utility rmnetcli. Utility is hosted in codeaurora forum git.
-The driver uses rtnl_link_ops for communication.
+rmnet userspace configuration is done through netlink using iproute2
+https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/
-https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl
+The driver uses rtnl_link_ops for communication.
diff --git a/Documentation/networking/device_drivers/ethernet/amd/pds_vdpa.rst b/Documentation/networking/device_drivers/ethernet/amd/pds_vdpa.rst
new file mode 100644
index 000000000000..587927d3de92
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/amd/pds_vdpa.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: GPL-2.0+
+.. note: can be edited and viewed with /usr/bin/formiko-vim
+
+==========================================================
+PCI vDPA driver for the AMD/Pensando(R) DSC adapter family
+==========================================================
+
+AMD/Pensando vDPA VF Device Driver
+
+Copyright(c) 2023 Advanced Micro Devices, Inc
+
+Overview
+========
+
+The ``pds_vdpa`` driver is an auxiliary bus driver that supplies
+a vDPA device for use by the virtio network stack. It is used with
+the Pensando Virtual Function devices that offer vDPA and virtio queue
+services. It depends on the ``pds_core`` driver and hardware for the PF
+and VF PCI handling as well as for device configuration services.
+
+Using the device
+================
+
+The ``pds_vdpa`` device is enabled via multiple configuration steps and
+depends on the ``pds_core`` driver to create and enable SR-IOV Virtual
+Function devices. After the VFs are enabled, we enable the vDPA service
+in the ``pds_core`` device to create the auxiliary devices used by pds_vdpa.
+
+Example steps:
+
+.. code-block:: bash
+
+ #!/bin/bash
+
+ modprobe pds_core
+ modprobe vdpa
+ modprobe pds_vdpa
+
+ PF_BDF=`ls /sys/module/pds_core/drivers/pci\:pds_core/*/sriov_numvfs | awk -F / '{print $7}'`
+
+ # Enable vDPA VF auxiliary device(s) in the PF
+ devlink dev param set pci/$PF_BDF name enable_vnet cmode runtime value true
+
+ # Create a VF for vDPA use
+ echo 1 > /sys/bus/pci/drivers/pds_core/$PF_BDF/sriov_numvfs
+
+ # Find the vDPA services/devices available
+ PDS_VDPA_MGMT=`vdpa mgmtdev show | grep vDPA | head -1 | cut -d: -f1`
+
+ # Create a vDPA device for use in virtio network configurations
+ vdpa dev add name vdpa1 mgmtdev $PDS_VDPA_MGMT mac 00:11:22:33:44:55
+
+ # Set up an ethernet interface on the vdpa device
+ modprobe virtio_vdpa
+
+
+
+Enabling the driver
+===================
+
+The driver is enabled via the standard kernel configuration system,
+using the make command::
+
+ make oldconfig/menuconfig/etc.
+
+The driver is located in the menu structure at:
+
+ -> Device Drivers
+ -> Network device support (NETDEVICES [=y])
+ -> Ethernet driver support
+ -> Pensando devices
+ -> Pensando Ethernet PDS_VDPA Support
+
+Support
+=======
+
+For general Linux networking support, please use the netdev mailing
+list, which is monitored by Pensando personnel::
+
+ netdev@vger.kernel.org
+
+For more specific support needs, please use the Pensando driver support
+email::
+
+ drivers@pensando.io
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 417ca514a4d0..94ecb67c0885 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -15,6 +15,7 @@ Contents:
amazon/ena
altera/altera_tse
amd/pds_core
+ amd/pds_vdpa
aquantia/atlantic
chelsio/cxgb
cirrus/cs89x0
diff --git a/Documentation/process/6.Followthrough.rst b/Documentation/process/6.Followthrough.rst
index a173cd5f93d2..66fa400c6d94 100644
--- a/Documentation/process/6.Followthrough.rst
+++ b/Documentation/process/6.Followthrough.rst
@@ -51,6 +51,13 @@ mind:
working toward the creation of the best kernel they can; they are not
trying to create discomfort for their employers' competitors.
+ - Be prepared for seemingly silly requests for coding style changes
+ and requests to factor out some of your code to shared parts of
+ the kernel. One job the maintainers do is to keep things looking
+ the same. Sometimes this means that the clever hack in your driver
+ to get around a problem actually needs to become a generalized
+ kernel feature ready for next time.
+
What all of this comes down to is that, when reviewers send you comments,
you need to pay attention to the technical observations that they are
making. Do not let their form of expression or your own pride keep that
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 5cf6a5f8ca57..5561dae94f85 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -60,6 +60,8 @@ openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
Sphinx\ [#f1]_ 1.7 sphinx-build --version
cpio any cpio --version
+GNU tar 1.28 tar --version
+gtags (optional) 6.6.5 gtags --version
====================== =============== ========================================
.. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -174,6 +176,18 @@ You will need openssl to build kernels 3.7 and higher if module signing is
enabled. You will also need openssl development packages to build kernels 4.3
and higher.
+Tar
+---
+
+GNU tar is needed if you want to enable access to the kernel headers via sysfs
+(CONFIG_IKHEADERS).
+
+gtags / GNU GLOBAL (optional)
+-----------------------------
+
+The kernel build requires GNU GLOBAL version 6.6.5 or later to generate
+tag files through ``make gtags``. This is due to its use of the gtags
+``-C (--directory)`` flag.
System utilities
****************
diff --git a/Documentation/process/maintainer-handbooks.rst b/Documentation/process/maintainer-handbooks.rst
index fe24cb665fb7..9992bfd7eaa3 100644
--- a/Documentation/process/maintainer-handbooks.rst
+++ b/Documentation/process/maintainer-handbooks.rst
@@ -18,3 +18,4 @@ Contents:
maintainer-netdev
maintainer-soc
maintainer-tip
+ maintainer-kvm-x86
diff --git a/Documentation/process/maintainer-kvm-x86.rst b/Documentation/process/maintainer-kvm-x86.rst
new file mode 100644
index 000000000000..9183bd449762
--- /dev/null
+++ b/Documentation/process/maintainer-kvm-x86.rst
@@ -0,0 +1,390 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+KVM x86
+=======
+
+Foreword
+--------
+KVM strives to be a welcoming community; contributions from newcomers are
+valued and encouraged. Please do not be discouraged or intimidated by the
+length of this document and the many rules/guidelines it contains. Everyone
+makes mistakes, and everyone was a newbie at some point. So long as you make
+an honest effort to follow KVM x86's guidelines, are receptive to feedback,
+and learn from any mistakes you make, you will be welcomed with open arms, not
+torches and pitchforks.
+
+TL;DR
+-----
+Testing is mandatory. Be consistent with established styles and patterns.
+
+Trees
+-----
+KVM x86 is currently in a transition period from being part of the main KVM
+tree, to being "just another KVM arch". As such, KVM x86 is split across the
+main KVM tree, ``git.kernel.org/pub/scm/virt/kvm/kvm.git``, and a KVM x86
+specific tree, ``github.com/kvm-x86/linux.git``.
+
+Generally speaking, fixes for the current cycle are applied directly to the
+main KVM tree, while all development for the next cycle is routed through the
+KVM x86 tree. In the unlikely event that a fix for the current cycle is routed
+through the KVM x86 tree, it will be applied to the ``fixes`` branch before
+making its way to the main KVM tree.
+
+Note, this transition period is expected to last quite some time, i.e. will be
+the status quo for the foreseeable future.
+
+Branches
+~~~~~~~~
+The KVM x86 tree is organized into multiple topic branches. The purpose of
+using finer-grained topic branches is to make it easier to keep tabs on an area
+of development, and to limit the collateral damage of human errors and/or buggy
+commits, e.g. dropping the HEAD commit of a topic branch has no impact on other
+in-flight commits' SHA1 hashes, and having to reject a pull request due to bugs
+delays only that topic branch.
+
+All topic branches, except for ``next`` and ``fixes``, are rolled into ``next``
+via a Cthulhu merge on an as-needed basis, i.e. when a topic branch is updated.
+As a result, force pushes to ``next`` are common.
+
+Lifecycle
+~~~~~~~~~
+Fixes that target the current release, a.k.a. mainline, are typically applied
+directly to the main KVM tree, i.e. do not route through the KVM x86 tree.
+
+Changes that target the next release are routed through the KVM x86 tree. Pull
+requests (from KVM x86 to main KVM) are sent for each KVM x86 topic branch,
+typically the week before Linus' opening of the merge window, e.g. the week
+following rc7 for "normal" releases. If all goes well, the topic branches are
+rolled into the main KVM pull request sent during Linus' merge window.
+
+The KVM x86 tree doesn't have its own official merge window, but there's a soft
+close around rc5 for new features, and a soft close around rc6 for fixes (for
+the next release; see above for fixes that target the current release).
+
+Timeline
+~~~~~~~~
+Submissions are typically reviewed and applied in FIFO order, with some wiggle
+room for the size of a series, patches that are "cache hot", etc. Fixes,
+especially for the current release and or stable trees, get to jump the queue.
+Patches that will be taken through a non-KVM tree (most often through the tip
+tree) and/or have other acks/reviews also jump the queue to some extent.
+
+Note, the vast majority of review is done between rc1 and rc6, give or take.
+The period between rc6 and the next rc1 is used to catch up on other tasks,
+i.e. radio silence during this period isn't unusual.
+
+Pings to get a status update are welcome, but keep in mind the timing of the
+current release cycle and have realistic expectations. If you are pinging for
+acceptance, i.e. not just for feedback or an update, please do everything you
+can, within reason, to ensure that your patches are ready to be merged! Pings
+on series that break the build or fail tests lead to unhappy maintainers!
+
+Development
+-----------
+
+Base Tree/Branch
+~~~~~~~~~~~~~~~~
+Fixes that target the current release, a.k.a. mainline, should be based on
+``git://git.kernel.org/pub/scm/virt/kvm/kvm.git master``. Note, fixes do not
+automatically warrant inclusion in the current release. There is no singular
+rule, but typically only fixes for bugs that are urgent, critical, and/or were
+introduced in the current release should target the current release.
+
+Everything else should be based on ``kvm-x86/next``, i.e. there is no need to
+select a specific topic branch as the base. If there are conflicts and/or
+dependencies across topic branches, it is the maintainer's job to sort them
+out.
+
+The only exception to using ``kvm-x86/next`` as the base is if a patch/series
+is a multi-arch series, i.e. has non-trivial modifications to common KVM code
+and/or has more than superficial changes to other architectures' code. Multi-
+arch patch/series should instead be based on a common, stable point in KVM's
+history, e.g. the release candidate upon which ``kvm-x86 next`` is based. If
+you're unsure whether a patch/series is truly multi-arch, err on the side of
+caution and treat it as multi-arch, i.e. use a common base.
+
+Coding Style
+~~~~~~~~~~~~
+When it comes to style, naming, patterns, etc., consistency is the number one
+priority in KVM x86. If all else fails, match what already exists.
+
+With a few caveats listed below, follow the tip tree maintainers' preferred
+:ref:`maintainer-tip-coding-style`, as patches/series often touch both KVM and
+non-KVM x86 files, i.e. draw the attention of KVM *and* tip tree maintainers.
+
+Using reverse fir tree, a.k.a. reverse Christmas tree or reverse XMAS tree, for
+variable declarations isn't strictly required, though it is still preferred.
+
+Except for a handful of special snowflakes, do not use kernel-doc comments for
+functions. The vast majority of "public" KVM functions aren't truly public as
+they are intended only for KVM-internal consumption (there are plans to
+privatize KVM's headers and exports to enforce this).
+
+Comments
+~~~~~~~~
+Write comments using imperative mood and avoid pronouns. Use comments to
+provide a high level overview of the code, and/or to explain why the code does
+what it does. Do not reiterate what the code literally does; let the code
+speak for itself. If the code itself is inscrutable, comments will not help.
+
+SDM and APM References
+~~~~~~~~~~~~~~~~~~~~~~
+Much of KVM's code base is directly tied to architectural behavior defined in
+Intel's Software Development Manual (SDM) and AMD's Architecture Programmer’s
+Manual (APM). Use of "Intel's SDM" and "AMD's APM", or even just "SDM" or
+"APM", without additional context is a-ok.
+
+Do not reference specific sections, tables, figures, etc. by number, especially
+not in comments. Instead, if necessary (see below), copy-paste the relevant
+snippet and reference sections/tables/figures by name. The layouts of the SDM
+and APM are constantly changing, and so the numbers/labels aren't stable.
+
+Generally speaking, do not explicitly reference or copy-paste from the SDM or
+APM in comments. With few exceptions, KVM *must* honor architectural behavior,
+therefore it's implied that KVM behavior is emulating SDM and/or APM behavior.
+Note, referencing the SDM/APM in changelogs to justify the change and provide
+context is perfectly ok and encouraged.
+
+Shortlog
+~~~~~~~~
+The preferred prefix format is ``KVM: <topic>:``, where ``<topic>`` is one of::
+
+ - x86
+ - x86/mmu
+ - x86/pmu
+ - x86/xen
+ - selftests
+ - SVM
+ - nSVM
+ - VMX
+ - nVMX
+
+**DO NOT use x86/kvm!** ``x86/kvm`` is used exclusively for Linux-as-a-KVM-guest
+changes, i.e. for arch/x86/kernel/kvm.c. Do not use file names or complete file
+paths as the subject/shortlog prefix.
+
+Note, these don't align with the topics branches (the topic branches care much
+more about code conflicts).
+
+All names are case sensitive! ``KVM: x86:`` is good, ``kvm: vmx:`` is not.
+
+Capitalize the first word of the condensed patch description, but omit ending
+punctionation. E.g.::
+
+ KVM: x86: Fix a null pointer dereference in function_xyz()
+
+not::
+
+ kvm: x86: fix a null pointer dereference in function_xyz.
+
+If a patch touches multiple topics, traverse up the conceptual tree to find the
+first common parent (which is often simply ``x86``). When in doubt,
+``git log path/to/file`` should provide a reasonable hint.
+
+New topics do occasionally pop up, but please start an on-list discussion if
+you want to propose introducing a new topic, i.e. don't go rogue.
+
+See :ref:`the_canonical_patch_format` for more information, with one amendment:
+do not treat the 70-75 character limit as an absolute, hard limit. Instead,
+use 75 characters as a firm-but-not-hard limit, and use 80 characters as a hard
+limit. I.e. let the shortlog run a few characters over the standard limit if
+you have good reason to do so.
+
+Changelog
+~~~~~~~~~
+Most importantly, write changelogs using imperative mood and avoid pronouns.
+
+See :ref:`describe_changes` for more information, with one amendment: lead with
+a short blurb on the actual changes, and then follow up with the context and
+background. Note! This order directly conflicts with the tip tree's preferred
+approach! Please follow the tip tree's preferred style when sending patches
+that primarily target arch/x86 code that is _NOT_ KVM code.
+
+Stating what a patch does before diving into details is preferred by KVM x86
+for several reasons. First and foremost, what code is actually being changed
+is arguably the most important information, and so that info should be easy to
+find. Changelogs that bury the "what's actually changing" in a one-liner after
+3+ paragraphs of background make it very hard to find that information.
+
+For initial review, one could argue the "what's broken" is more important, but
+for skimming logs and git archaeology, the gory details matter less and less.
+E.g. when doing a series of "git blame", the details of each change along the
+way are useless, the details only matter for the culprit. Providing the "what
+changed" makes it easy to quickly determine whether or not a commit might be of
+interest.
+
+Another benefit of stating "what's changing" first is that it's almost always
+possible to state "what's changing" in a single sentence. Conversely, all but
+the most simple bugs require multiple sentences or paragraphs to fully describe
+the problem. If both the "what's changing" and "what's the bug" are super
+short then the order doesn't matter. But if one is shorter (almost always the
+"what's changing), then covering the shorter one first is advantageous because
+it's less of an inconvenience for readers/reviewers that have a strict ordering
+preference. E.g. having to skip one sentence to get to the context is less
+painful than having to skip three paragraphs to get to "what's changing".
+
+Fixes
+~~~~~
+If a change fixes a KVM/kernel bug, add a Fixes: tag even if the change doesn't
+need to be backported to stable kernels, and even if the change fixes a bug in
+an older release.
+
+Conversely, if a fix does need to be backported, explicitly tag the patch with
+"Cc: stable@vger.kernel" (though the email itself doesn't need to Cc: stable);
+KVM x86 opts out of backporting Fixes: by default. Some auto-selected patches
+do get backported, but require explicit maintainer approval (search MANUALSEL).
+
+Function References
+~~~~~~~~~~~~~~~~~~~
+When a function is mentioned in a comment, changelog, or shortlog (or anywhere
+for that matter), use the format ``function_name()``. The parentheses provide
+context and disambiguate the reference.
+
+Testing
+-------
+At a bare minimum, *all* patches in a series must build cleanly for KVM_INTEL=m
+KVM_AMD=m, and KVM_WERROR=y. Building every possible combination of Kconfigs
+isn't feasible, but the more the merrier. KVM_SMM, KVM_XEN, PROVE_LOCKING, and
+X86_64 are particularly interesting knobs to turn.
+
+Running KVM selftests and KVM-unit-tests is also mandatory (and stating the
+obvious, the tests need to pass). The only exception is for changes that have
+negligible probability of affecting runtime behavior, e.g. patches that only
+modify comments. When possible and relevant, testing on both Intel and AMD is
+strongly preferred. Booting an actual VM is encouraged, but not mandatory.
+
+For changes that touch KVM's shadow paging code, running with TDP (EPT/NPT)
+disabled is mandatory. For changes that affect common KVM MMU code, running
+with TDP disabled is strongly encouraged. For all other changes, if the code
+being modified depends on and/or interacts with a module param, testing with
+the relevant settings is mandatory.
+
+Note, KVM selftests and KVM-unit-tests do have known failures. If you suspect
+a failure is not due to your changes, verify that the *exact same* failure
+occurs with and without your changes.
+
+Changes that touch reStructured Text documentation, i.e. .rst files, must build
+htmldocs cleanly, i.e. with no new warnings or errors.
+
+If you can't fully test a change, e.g. due to lack of hardware, clearly state
+what level of testing you were able to do, e.g. in the cover letter.
+
+New Features
+~~~~~~~~~~~~
+With one exception, new features *must* come with test coverage. KVM specific
+tests aren't strictly required, e.g. if coverage is provided by running a
+sufficiently enabled guest VM, or by running a related kernel selftest in a VM,
+but dedicated KVM tests are preferred in all cases. Negative testcases in
+particular are mandatory for enabling of new hardware features as error and
+exception flows are rarely exercised simply by running a VM.
+
+The only exception to this rule is if KVM is simply advertising support for a
+feature via KVM_GET_SUPPORTED_CPUID, i.e. for instructions/features that KVM
+can't prevent a guest from using and for which there is no true enabling.
+
+Note, "new features" does not just mean "new hardware features"! New features
+that can't be well validated using existing KVM selftests and/or KVM-unit-tests
+must come with tests.
+
+Posting new feature development without tests to get early feedback is more
+than welcome, but such submissions should be tagged RFC, and the cover letter
+should clearly state what type of feedback is requested/expected. Do not abuse
+the RFC process; RFCs will typically not receive in-depth review.
+
+Bug Fixes
+~~~~~~~~~
+Except for "obvious" found-by-inspection bugs, fixes must be accompanied by a
+reproducer for the bug being fixed. In many cases the reproducer is implicit,
+e.g. for build errors and test failures, but it should still be clear to
+readers what is broken and how to verify the fix. Some leeway is given for
+bugs that are found via non-public workloads/tests, but providing regression
+tests for such bugs is strongly preferred.
+
+In general, regression tests are preferred for any bug that is not trivial to
+hit. E.g. even if the bug was originally found by a fuzzer such as syzkaller,
+a targeted regression test may be warranted if the bug requires hitting a
+one-in-a-million type race condition.
+
+Note, KVM bugs are rarely urgent *and* non-trivial to reproduce. Ask yourself
+if a bug is really truly the end of the world before posting a fix without a
+reproducer.
+
+Posting
+-------
+
+Links
+~~~~~
+Do not explicitly reference bug reports, prior versions of a patch/series, etc.
+via ``In-Reply-To:`` headers. Using ``In-Reply-To:`` becomes an unholy mess
+for large series and/or when the version count gets high, and ``In-Reply-To:``
+is useless for anyone that doesn't have the original message, e.g. if someone
+wasn't Cc'd on the bug report or if the list of recipients changes between
+versions.
+
+To link to a bug report, previous version, or anything of interest, use lore
+links. For referencing previous version(s), generally speaking do not include
+a Link: in the changelog as there is no need to record the history in git, i.e.
+put the link in the cover letter or in the section git ignores. Do provide a
+formal Link: for bug reports and/or discussions that led to the patch. The
+context of why a change was made is highly valuable for future readers.
+
+Git Base
+~~~~~~~~
+If you are using git version 2.9.0 or later (Googlers, this is all of you!),
+use ``git format-patch`` with the ``--base`` flag to automatically include the
+base tree information in the generated patches.
+
+Note, ``--base=auto`` works as expected if and only if a branch's upstream is
+set to the base topic branch, e.g. it will do the wrong thing if your upstream
+is set to your personal repository for backup purposes. An alternative "auto"
+solution is to derive the names of your development branches based on their
+KVM x86 topic, and feed that into ``--base``. E.g. ``x86/pmu/my_branch_name``,
+and then write a small wrapper to extract ``pmu`` from the current branch name
+to yield ``--base=x/pmu``, where ``x`` is whatever name your repository uses to
+track the KVM x86 remote.
+
+Co-Posting Tests
+~~~~~~~~~~~~~~~~
+KVM selftests that are associated with KVM changes, e.g. regression tests for
+bug fixes, should be posted along with the KVM changes as a single series. The
+standard kernel rules for bisection apply, i.e. KVM changes that result in test
+failures should be ordered after the selftests updates, and vice versa, new
+tests that fail due to KVM bugs should be ordered after the KVM fixes.
+
+KVM-unit-tests should *always* be posted separately. Tools, e.g. b4 am, don't
+know that KVM-unit-tests is a separate repository and get confused when patches
+in a series apply on different trees. To tie KVM-unit-tests patches back to
+KVM patches, first post the KVM changes and then provide a lore Link: to the
+KVM patch/series in the KVM-unit-tests patch(es).
+
+Notifications
+-------------
+When a patch/series is officially accepted, a notification email will be sent
+in reply to the original posting (cover letter for multi-patch series). The
+notification will include the tree and topic branch, along with the SHA1s of
+the commits of applied patches.
+
+If a subset of patches is applied, this will be clearly stated in the
+notification. Unless stated otherwise, it's implied that any patches in the
+series that were not accepted need more work and should be submitted in a new
+version.
+
+If for some reason a patch is dropped after officially being accepted, a reply
+will be sent to the notification email explaining why the patch was dropped, as
+well as the next steps.
+
+SHA1 Stability
+~~~~~~~~~~~~~~
+SHA1s are not 100% guaranteed to be stable until they land in Linus' tree! A
+SHA1 is *usually* stable once a notification has been sent, but things happen.
+In most cases, an update to the notification email be provided if an applied
+patch's SHA1 changes. However, in some scenarios, e.g. if all KVM x86 branches
+need to be rebased, individual notifications will not be given.
+
+Vulnerabilities
+---------------
+Bugs that can be exploited by the guest to attack the host (kernel or
+userspace), or that can be exploited by a nested VM to *its* host (L2 attacking
+L1), are of particular interest to KVM. Please follow the protocol for
+:ref:`securitybugs` if you suspect a bug can lead to an escape, data leak, etc.
+
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index 83614cec9328..2397b31c0198 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -149,8 +149,11 @@ themselves. No email should ever be sent to the list with the main purpose
of communicating with the bot, the bot commands should be seen as metadata.
The use of the bot is restricted to authors of the patches (the ``From:``
-header on patch submission and command must match!), maintainers themselves
-and a handful of senior reviewers. Bot records its activity here:
+header on patch submission and command must match!), maintainers of
+the modified code according to the MAINTAINERS file (again, ``From:``
+must match the MAINTAINERS entry) and a handful of senior reviewers.
+
+Bot records its activity here:
https://patchwork.hopto.org/pw-bot.html
diff --git a/Documentation/process/maintainer-tip.rst b/Documentation/process/maintainer-tip.rst
index 93d8a794bdfc..08dd0f804410 100644
--- a/Documentation/process/maintainer-tip.rst
+++ b/Documentation/process/maintainer-tip.rst
@@ -455,6 +455,8 @@ and can be added to an existing kernel config by running:
Some of these options are x86-specific and can be left out when testing
on other architectures.
+.. _maintainer-tip-coding-style:
+
Coding style notes
------------------
diff --git a/Documentation/riscv/acpi.rst b/Documentation/riscv/acpi.rst
new file mode 100644
index 000000000000..9870a282815b
--- /dev/null
+++ b/Documentation/riscv/acpi.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+ACPI on RISC-V
+==============
+
+The ISA string parsing rules for ACPI are defined by `Version ASCIIDOC
+Conversion, 12/2022 of the RISC-V specifications, as defined by tag
+"riscv-isa-release-1239329-2023-05-23" (commit 1239329
+) <https://github.com/riscv/riscv-isa-manual/releases/tag/riscv-isa-release-1239329-2023-05-23>`_
diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst
index 95cf9c1e1da1..81cf6e616476 100644
--- a/Documentation/riscv/index.rst
+++ b/Documentation/riscv/index.rst
@@ -5,6 +5,7 @@ RISC-V architecture
.. toctree::
:maxdepth: 1
+ acpi
boot-image-header
vm-layout
hwprobe
diff --git a/Documentation/riscv/vector.rst b/Documentation/riscv/vector.rst
index 48f189d79e41..165b7ed0ac4f 100644
--- a/Documentation/riscv/vector.rst
+++ b/Documentation/riscv/vector.rst
@@ -130,3 +130,11 @@ processes in form of sysctl knob:
Modifying the system default enablement status does not affect the enablement
status of any existing process of thread that do not make an execve() call.
+
+3. Vector Register State Across System Calls
+---------------------------------------------
+
+As indicated by version 1.0 of the V extension [1], vector registers are
+clobbered by system calls.
+
+1: https://github.com/riscv/riscv-v-spec/blob/master/calling-convention.adoc
diff --git a/Documentation/target/scripts.rst b/Documentation/target/scripts.rst
index 172d42b522e4..aa7b9c62c1b3 100644
--- a/Documentation/target/scripts.rst
+++ b/Documentation/target/scripts.rst
@@ -1,9 +1,3 @@
-TCM mod builder script
-----------------------
-
-.. literalinclude:: tcm_mod_builder.py
- :language: perl
-
Target export device script
---------------------------
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
deleted file mode 100755
index 54492aa813b9..000000000000
--- a/Documentation/target/tcm_mod_builder.py
+++ /dev/null
@@ -1,656 +0,0 @@
-#!/usr/bin/env python
-# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
-#
-# Copyright (c) 2010 Rising Tide Systems
-# Copyright (c) 2010 Linux-iSCSI.org
-#
-# Author: nab@kernel.org
-#
-import os, sys
-import subprocess as sub
-import string
-import re
-import optparse
-
-tcm_dir = ""
-
-fabric_ops = []
-fabric_mod_dir = ""
-fabric_mod_port = ""
-fabric_mod_init_port = ""
-
-def tcm_mod_err(msg):
- print msg
- sys.exit(1)
-
-def tcm_mod_create_module_subdir(fabric_mod_dir_var):
-
- if os.path.isdir(fabric_mod_dir_var) == True:
- return 1
-
- print "Creating fabric_mod_dir: " + fabric_mod_dir_var
- ret = os.mkdir(fabric_mod_dir_var)
- if ret:
- tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
-
- return
-
-def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
- global fabric_mod_port
- global fabric_mod_init_port
- buf = ""
-
- f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
- print "Writing file: " + f
-
- p = open(f, 'w');
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
- buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
- buf += "\n"
- buf += "struct " + fabric_mod_name + "_tpg {\n"
- buf += " /* FC lport target portal group tag for TCM */\n"
- buf += " u16 lport_tpgt;\n"
- buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
- buf += " struct " + fabric_mod_name + "_lport *lport;\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
- buf += " struct se_portal_group se_tpg;\n"
- buf += "};\n"
- buf += "\n"
- buf += "struct " + fabric_mod_name + "_lport {\n"
- buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
- buf += " u64 lport_wwpn;\n"
- buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
- buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
- buf += " struct se_wwn lport_wwn;\n"
- buf += "};\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
-
- fabric_mod_port = "lport"
- fabric_mod_init_port = "nport"
-
- return
-
-def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
- global fabric_mod_port
- global fabric_mod_init_port
- buf = ""
-
- f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
- print "Writing file: " + f
-
- p = open(f, 'w');
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
- buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
- buf += "\n"
- buf += "struct " + fabric_mod_name + "_tpg {\n"
- buf += " /* SAS port target portal group tag for TCM */\n"
- buf += " u16 tport_tpgt;\n"
- buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
- buf += " struct " + fabric_mod_name + "_tport *tport;\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
- buf += " struct se_portal_group se_tpg;\n"
- buf += "};\n\n"
- buf += "struct " + fabric_mod_name + "_tport {\n"
- buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
- buf += " u64 tport_wwpn;\n"
- buf += " /* ASCII formatted WWPN for SAS Target port */\n"
- buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
- buf += " struct se_wwn tport_wwn;\n"
- buf += "};\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
-
- fabric_mod_port = "tport"
- fabric_mod_init_port = "iport"
-
- return
-
-def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
- global fabric_mod_port
- global fabric_mod_init_port
- buf = ""
-
- f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
- print "Writing file: " + f
-
- p = open(f, 'w');
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
- buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
- buf += "\n"
- buf += "struct " + fabric_mod_name + "_tpg {\n"
- buf += " /* iSCSI target portal group tag for TCM */\n"
- buf += " u16 tport_tpgt;\n"
- buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
- buf += " struct " + fabric_mod_name + "_tport *tport;\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
- buf += " struct se_portal_group se_tpg;\n"
- buf += "};\n\n"
- buf += "struct " + fabric_mod_name + "_tport {\n"
- buf += " /* ASCII formatted TargetName for IQN */\n"
- buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
- buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
- buf += " struct se_wwn tport_wwn;\n"
- buf += "};\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
-
- fabric_mod_port = "tport"
- fabric_mod_init_port = "iport"
-
- return
-
-def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
-
- if proto_ident == "FC":
- tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
- elif proto_ident == "SAS":
- tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
- elif proto_ident == "iSCSI":
- tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
- else:
- print "Unsupported proto_ident: " + proto_ident
- sys.exit(1)
-
- return
-
-def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
- buf = ""
-
- f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
- print "Writing file: " + f
-
- p = open(f, 'w');
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf = "#include <linux/module.h>\n"
- buf += "#include <linux/moduleparam.h>\n"
- buf += "#include <linux/version.h>\n"
- buf += "#include <generated/utsrelease.h>\n"
- buf += "#include <linux/utsname.h>\n"
- buf += "#include <linux/init.h>\n"
- buf += "#include <linux/slab.h>\n"
- buf += "#include <linux/kthread.h>\n"
- buf += "#include <linux/types.h>\n"
- buf += "#include <linux/string.h>\n"
- buf += "#include <linux/configfs.h>\n"
- buf += "#include <linux/ctype.h>\n"
- buf += "#include <asm/unaligned.h>\n"
- buf += "#include <scsi/scsi_proto.h>\n\n"
- buf += "#include <target/target_core_base.h>\n"
- buf += "#include <target/target_core_fabric.h>\n"
- buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
- buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
-
- buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
-
- buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
- buf += " struct se_wwn *wwn,\n"
- buf += " struct config_group *group,\n"
- buf += " const char *name)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
- buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
- buf += " unsigned long tpgt;\n"
- buf += " int ret;\n\n"
- buf += " if (strstr(name, \"tpgt_\") != name)\n"
- buf += " return ERR_PTR(-EINVAL);\n"
- buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
- buf += " return ERR_PTR(-EINVAL);\n\n"
- buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
- buf += " if (!tpg) {\n"
- buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
- buf += " return ERR_PTR(-ENOMEM);\n"
- buf += " }\n"
- buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
- buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
-
- if proto_ident == "FC":
- buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
- elif proto_ident == "SAS":
- buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
- elif proto_ident == "iSCSI":
- buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
-
- buf += " if (ret < 0) {\n"
- buf += " kfree(tpg);\n"
- buf += " return NULL;\n"
- buf += " }\n"
- buf += " return &tpg->se_tpg;\n"
- buf += "}\n\n"
- buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
- buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
- buf += " core_tpg_deregister(se_tpg);\n"
- buf += " kfree(tpg);\n"
- buf += "}\n\n"
-
- buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
- buf += " struct target_fabric_configfs *tf,\n"
- buf += " struct config_group *group,\n"
- buf += " const char *name)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
-
- if proto_ident == "FC" or proto_ident == "SAS":
- buf += " u64 wwpn = 0;\n\n"
-
- buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
- buf += " return ERR_PTR(-EINVAL); */\n\n"
- buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
- buf += " if (!" + fabric_mod_port + ") {\n"
- buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
- buf += " return ERR_PTR(-ENOMEM);\n"
- buf += " }\n"
-
- if proto_ident == "FC" or proto_ident == "SAS":
- buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
-
- buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
- buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
- buf += "}\n\n"
- buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
- buf += " kfree(" + fabric_mod_port + ");\n"
- buf += "}\n\n"
-
- buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
- buf += " .module = THIS_MODULE,\n"
- buf += " .name = \"" + fabric_mod_name + "\",\n"
- buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
- buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
- buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
- buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
- buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
- buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
- buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
- buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
- buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
- buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
- buf += " .sess_get_initiator_sid = NULL,\n"
- buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
- buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
- buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
- buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
- buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
- buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
- buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
- buf += " /*\n"
- buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
- buf += " */\n"
- buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
- buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
- buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
- buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
- buf += "};\n\n"
-
- buf += "static int __init " + fabric_mod_name + "_init(void)\n"
- buf += "{\n"
- buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
- buf += "};\n\n"
-
- buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
- buf += "{\n"
- buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
- buf += "};\n\n"
-
- buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
- buf += "MODULE_LICENSE(\"GPL\");\n"
- buf += "module_init(" + fabric_mod_name + "_init);\n"
- buf += "module_exit(" + fabric_mod_name + "_exit);\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
-
- return
-
-def tcm_mod_scan_fabric_ops(tcm_dir):
-
- fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
-
- print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
- process_fo = 0;
-
- p = open(fabric_ops_api, 'r')
-
- line = p.readline()
- while line:
- if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
- line = p.readline()
- continue
-
- if process_fo == 0:
- process_fo = 1;
- line = p.readline()
- # Search for function pointer
- if not re.search('\(\*', line):
- continue
-
- fabric_ops.append(line.rstrip())
- continue
-
- line = p.readline()
- # Search for function pointer
- if not re.search('\(\*', line):
- continue
-
- fabric_ops.append(line.rstrip())
-
- p.close()
- return
-
-def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
- buf = ""
- bufi = ""
-
- f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
- print "Writing file: " + f
-
- p = open(f, 'w')
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
- print "Writing file: " + fi
-
- pi = open(fi, 'w')
- if not pi:
- tcm_mod_err("Unable to open file: " + fi)
-
- buf = "#include <linux/slab.h>\n"
- buf += "#include <linux/kthread.h>\n"
- buf += "#include <linux/types.h>\n"
- buf += "#include <linux/list.h>\n"
- buf += "#include <linux/types.h>\n"
- buf += "#include <linux/string.h>\n"
- buf += "#include <linux/ctype.h>\n"
- buf += "#include <asm/unaligned.h>\n"
- buf += "#include <scsi/scsi_common.h>\n"
- buf += "#include <scsi/scsi_proto.h>\n"
- buf += "#include <target/target_core_base.h>\n"
- buf += "#include <target/target_core_fabric.h>\n"
- buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
- buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
-
- buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " return 1;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
-
- buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
-
- total_fabric_ops = len(fabric_ops)
- i = 0
-
- while i < total_fabric_ops:
- fo = fabric_ops[i]
- i += 1
-# print "fabric_ops: " + fo
-
- if re.search('get_fabric_name', fo):
- buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
- buf += "{\n"
- buf += " return \"" + fabric_mod_name + "\";\n"
- buf += "}\n\n"
- bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
- continue
-
- if re.search('get_wwn', fo):
- buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
- buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
- buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
- buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
- buf += "}\n\n"
- bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
-
- if re.search('get_tag', fo):
- buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
- buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
- buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
- buf += "}\n\n"
- bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
-
- if re.search('tpg_get_inst_index\)\(', fo):
- buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
- buf += "{\n"
- buf += " return 1;\n"
- buf += "}\n\n"
- bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
-
- if re.search('\*release_cmd\)\(', fo):
- buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
-
- if re.search('sess_get_index\)\(', fo):
- buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
-
- if re.search('write_pending\)\(', fo):
- buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
-
- if re.search('set_default_node_attributes\)\(', fo):
- buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
-
- if re.search('get_cmd_state\)\(', fo):
- buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
-
- if re.search('queue_data_in\)\(', fo):
- buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
-
- if re.search('queue_status\)\(', fo):
- buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
-
- if re.search('queue_tm_rsp\)\(', fo):
- buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
-
- if re.search('aborted_task\)\(', fo):
- buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
-
- ret = pi.write(bufi)
- if ret:
- tcm_mod_err("Unable to write fi: " + fi)
-
- pi.close()
- return
-
-def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
-
- buf = ""
- f = fabric_mod_dir_var + "/Makefile"
- print "Writing file: " + f
-
- p = open(f, 'w')
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
- buf += " " + fabric_mod_name + "_configfs.o\n"
- buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
- return
-
-def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
-
- buf = ""
- f = fabric_mod_dir_var + "/Kconfig"
- print "Writing file: " + f
-
- p = open(f, 'w')
- if not p:
- tcm_mod_err("Unable to open file: " + f)
-
- buf = "config " + fabric_mod_name.upper() + "\n"
- buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
- buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
- buf += " default n\n"
- buf += " help\n"
- buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
-
- ret = p.write(buf)
- if ret:
- tcm_mod_err("Unable to write f: " + f)
-
- p.close()
- return
-
-def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
- buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
- kbuild = tcm_dir + "/drivers/target/Makefile"
-
- f = open(kbuild, 'a')
- f.write(buf)
- f.close()
- return
-
-def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
- buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
- kconfig = tcm_dir + "/drivers/target/Kconfig"
-
- f = open(kconfig, 'a')
- f.write(buf)
- f.close()
- return
-
-def main(modname, proto_ident):
-# proto_ident = "FC"
-# proto_ident = "SAS"
-# proto_ident = "iSCSI"
-
- tcm_dir = os.getcwd();
- tcm_dir += "/../../"
- print "tcm_dir: " + tcm_dir
- fabric_mod_name = modname
- fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
- print "Set fabric_mod_name: " + fabric_mod_name
- print "Set fabric_mod_dir: " + fabric_mod_dir
- print "Using proto_ident: " + proto_ident
-
- if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
- print "Unsupported proto_ident: " + proto_ident
- sys.exit(1)
-
- ret = tcm_mod_create_module_subdir(fabric_mod_dir)
- if ret:
- print "tcm_mod_create_module_subdir() failed because module already exists!"
- sys.exit(1)
-
- tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
- tcm_mod_scan_fabric_ops(tcm_dir)
- tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
- tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
- tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
- tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
-
- input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
- if input == "yes" or input == "y":
- tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
-
- input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
- if input == "yes" or input == "y":
- tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
-
- return
-
-parser = optparse.OptionParser()
-parser.add_option('-m', '--modulename', help='Module name', dest='modname',
- action='store', nargs=1, type='string')
-parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
- action='store', nargs=1, type='string')
-
-(opts, args) = parser.parse_args()
-
-mandatories = ['modname', 'protoident']
-for m in mandatories:
- if not opts.__dict__[m]:
- print "mandatory option is missing\n"
- parser.print_help()
- exit(-1)
-
-if __name__ == "__main__":
-
- main(str(opts.modname), opts.protoident)
diff --git a/Documentation/tools/rtla/common_options.rst b/Documentation/tools/rtla/common_options.rst
index af76df6205d4..aeb91ff3bd68 100644
--- a/Documentation/tools/rtla/common_options.rst
+++ b/Documentation/tools/rtla/common_options.rst
@@ -2,6 +2,10 @@
Set the osnoise tracer to run the sample threads in the cpu-list.
+**-H**, **--house-keeping** *cpu-list*
+
+ Run rtla control threads only on the given cpu-list.
+
**-d**, **--duration** *time[s|m|h|d]*
Set the duration of the session.
@@ -42,6 +46,10 @@
- *f:prio* - use SCHED_FIFO with *prio*;
- *d:runtime[us|ms|s]:period[us|ms|s]* - use SCHED_DEADLINE with *runtime* and *period* in nanoseconds.
+**-C**, **--cgroup**\[*=cgroup*]
+
+ Set a *cgroup* to the tracer's threads. If the **-C** option is passed without arguments, the tracer's thread will inherit **rtla**'s *cgroup*. Otherwise, the threads will be placed on the *cgroup* passed to the option.
+
**-h**, **--help**
Print help menu.
diff --git a/Documentation/tools/rtla/common_timerlat_aa.rst b/Documentation/tools/rtla/common_timerlat_aa.rst
index 795b9fbcbc6d..077029e6b289 100644
--- a/Documentation/tools/rtla/common_timerlat_aa.rst
+++ b/Documentation/tools/rtla/common_timerlat_aa.rst
@@ -5,10 +5,3 @@
**--no-aa**
disable auto-analysis, reducing rtla timerlat cpu usage
-
-**--aa-only** *us*
-
- Set stop tracing conditions and run without collecting and displaying statistics.
- Print the auto-analysis if the system hits the stop tracing condition. This option
- is useful to reduce rtla timerlat CPU, enabling the debug without the overhead of
- collecting the statistics.
diff --git a/Documentation/tools/rtla/common_timerlat_options.rst b/Documentation/tools/rtla/common_timerlat_options.rst
index bacdea6de7a3..88506b397c2d 100644
--- a/Documentation/tools/rtla/common_timerlat_options.rst
+++ b/Documentation/tools/rtla/common_timerlat_options.rst
@@ -26,3 +26,10 @@
Set the /dev/cpu_dma_latency to *us*, aiming to bound exit from idle latencies.
*cyclictest* sets this value to *0* by default, use **--dma-latency** *0* to have
similar results.
+
+**-u**, **--user-threads**
+
+ Set timerlat to run without a workload, and then dispatches user-space workloads
+ to wait on the timerlat_fd. Once the workload is awakes, it goes to sleep again
+ adding so the measurement for the kernel-to-user and user-to-kernel to the tracer
+ output.
diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst
index 6bf7f0ca4556..057db78d4095 100644
--- a/Documentation/tools/rtla/rtla-timerlat-hist.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst
@@ -29,15 +29,18 @@ OPTIONS
.. include:: common_options.rst
+.. include:: common_timerlat_aa.rst
+
EXAMPLE
=======
In the example below, **rtla timerlat hist** is set to run for *10* minutes,
in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
hist** will change the priority of the *timerlat* threads to run under
*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
-*1ms* period is also passed to the *timerlat* tracer::
+*1ms* period is also passed to the *timerlat* tracer. Auto-analysis is disabled
+to reduce overhead ::
- [root@alien ~]# timerlat hist -d 10m -c 0-4 -P d:100us:1ms -p 1ms
+ [root@alien ~]# timerlat hist -d 10m -c 0-4 -P d:100us:1ms -p 1ms --no-aa
# RTLA timerlat histogram
# Time unit is microseconds (us)
# Duration: 0 00:10:00
diff --git a/Documentation/tools/rtla/rtla-timerlat-top.rst b/Documentation/tools/rtla/rtla-timerlat-top.rst
index 73799c1150ad..1b7cf4e3eafe 100644
--- a/Documentation/tools/rtla/rtla-timerlat-top.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-top.rst
@@ -32,6 +32,13 @@ OPTIONS
.. include:: common_timerlat_aa.rst
+**--aa-only** *us*
+
+ Set stop tracing conditions and run without collecting and displaying statistics.
+ Print the auto-analysis if the system hits the stop tracing condition. This option
+ is useful to reduce rtla timerlat CPU, enabling the debug without the overhead of
+ collecting the statistics.
+
EXAMPLE
=======
diff --git a/Documentation/trace/coresight/coresight-dummy.rst b/Documentation/trace/coresight/coresight-dummy.rst
new file mode 100644
index 000000000000..eff7c553ee6c
--- /dev/null
+++ b/Documentation/trace/coresight/coresight-dummy.rst
@@ -0,0 +1,32 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+Coresight Dummy Trace Module
+=============================
+
+ :Author: Hao Zhang <quic_hazha@quicinc.com>
+ :Date: June 2023
+
+Introduction
+------------
+
+The Coresight dummy trace module is for the specific devices that kernel don't
+have permission to access or configure, e.g., CoreSight TPDMs on Qualcomm
+platforms. For these devices, a dummy driver is needed to register them as
+Coresight devices. The module may also be used to define components that may
+not have any programming interfaces, so that paths can be created in the driver.
+It provides Coresight API for operations on dummy devices, such as enabling and
+disabling them. It also provides the Coresight dummy sink/source paths for
+debugging.
+
+Config details
+--------------
+
+There are two types of nodes, dummy sink and dummy source. These nodes
+are available at ``/sys/bus/coresight/devices``.
+
+Example output::
+
+ $ ls -l /sys/bus/coresight/devices | grep dummy
+ dummy_sink0 -> ../../../devices/platform/soc@0/soc@0:sink/dummy_sink0
+ dummy_source0 -> ../../../devices/platform/soc@0/soc@0:source/dummy_source0
diff --git a/Documentation/trace/hisi-ptt.rst b/Documentation/trace/hisi-ptt.rst
index 4f87d8e21065..989255eb5622 100644
--- a/Documentation/trace/hisi-ptt.rst
+++ b/Documentation/trace/hisi-ptt.rst
@@ -148,14 +148,20 @@ For example, if the desired filter is Endpoint function 0000:01:00.1 the filter
value will be 0x00101. If the desired filter is Root Port 0000:00:10.0 then
then filter value is calculated as 0x80001.
+The driver also presents every supported Root Port and Requester filter through
+sysfs. Each filter will be an individual file with name of its related PCIe
+device name (domain:bus:device.function). The files of Root Port filters are
+under $(PTT PMU dir)/root_port_filters and files of Requester filters
+are under $(PTT PMU dir)/requester_filters.
+
Note that multiple Root Ports can be specified at one time, but only one
Endpoint function can be specified in one trace. Specifying both Root Port
and function at the same time is not supported. Driver maintains a list of
available filters and will check the invalid inputs.
-Currently the available filters are detected in driver's probe. If the supported
-devices are removed/added after probe, you may need to reload the driver to update
-the filters.
+The available filters will be dynamically updated, which means you will always
+get correct filter information when hotplug events happen, or when you manually
+remove/rescan the devices.
2. Type
-------
diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst
index 4a6ed0219494..e68c9de0f7f8 100644
--- a/Documentation/translations/zh_CN/process/2.Process.rst
+++ b/Documentation/translations/zh_CN/process/2.Process.rst
@@ -358,7 +358,7 @@ Andrew Morton 为有抱负的内核开å‘人员æ供了如下建议
机器上始终完美è¿è¡Œâ€ã€‚通常的方法是和其他人一起解决问题(这å¯èƒ½éœ€
è¦åšæŒï¼ï¼‰ï¼Œä½†å°±æ˜¯å¦‚此——这是内核开å‘的一部分。
-(http://lwn.net/articles/283982/)
+(http://lwn.net/Articles/283982/)
在没有明显问题需è¦è§£å†³çš„情况下,通常建议开å‘人员查看当å‰çš„回归和开放缺陷
列表。从æ¥éƒ½ä¸ç¼ºå°‘需è¦è§£å†³çš„问题;通过解决这些问题,开å‘人员将从该过程获得
diff --git a/Documentation/translations/zh_CN/process/3.Early-stage.rst b/Documentation/translations/zh_CN/process/3.Early-stage.rst
index de53dd12e911..2caba4753b75 100644
--- a/Documentation/translations/zh_CN/process/3.Early-stage.rst
+++ b/Documentation/translations/zh_CN/process/3.Early-stage.rst
@@ -44,7 +44,7 @@
试图å‘这些人传达用户需求是浪费时间。他们太“èªæ˜Žâ€äº†ï¼Œæ ¹æœ¬å¬ä¸åˆ°å°‘æ•°
人的è¯ã€‚
-(http://lwn.net/articles/131776/)
+(http://lwn.net/Articles/131776/)
实际情况å´æ˜¯ä¸åŒçš„;与特定模å—相比,内核开å‘人员更关心系统稳定性ã€é•¿æœŸç»´æŠ¤
以åŠæ‰¾åˆ°é—®é¢˜çš„正确解决方案。这个故事的寓æ„是把é‡ç‚¹æ”¾åœ¨é—®é¢˜ä¸Šâ€”—而ä¸æ˜¯å…·ä½“çš„
diff --git a/Documentation/translations/zh_CN/process/4.Coding.rst b/Documentation/translations/zh_CN/process/4.Coding.rst
index 94f7f866f103..7cac9424f5d5 100644
--- a/Documentation/translations/zh_CN/process/4.Coding.rst
+++ b/Documentation/translations/zh_CN/process/4.Coding.rst
@@ -149,7 +149,7 @@ Linus对这个问题给出了最佳答案:
所以我们ä¸ä¼šé€šè¿‡å¼•å…¥æ–°é—®é¢˜æ¥ä¿®å¤é”™è¯¯ã€‚è¿™ç§æ–¹å¼æ˜¯é ä¸ä½çš„,没人知é“
是å¦çœŸçš„有进展。是å‰è¿›ä¸¤æ­¥ã€åŽé€€ä¸€æ­¥ï¼Œè¿˜æ˜¯å‰è¿›ä¸€æ­¥ã€åŽé€€ä¸¤æ­¥ï¼Ÿ
-(http://lwn.net/articles/243460/)
+(http://lwn.net/Articles/243460/)
特别ä¸å—欢迎的一ç§å›žå½’类型是用户空间ABI的任何å˜åŒ–。一旦接å£è¢«å¯¼å‡ºåˆ°ç”¨æˆ·ç©ºé—´ï¼Œ
就必须无é™æœŸåœ°æ”¯æŒå®ƒã€‚这一事实使得用户空间接å£çš„创建特别具有挑战性:因为它们
diff --git a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
index 6d0dadae13b1..57beca02181c 100644
--- a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
+++ b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
@@ -98,7 +98,7 @@ Gitæ供了一些强大的工具,å¯ä»¥è®©æ‚¨é‡å†™å¼€å‘历å²ã€‚一个ä¸æ–
ä½ å¯ä»¥ç»™æˆ‘å‘è¡¥ä¸ï¼Œä½†å½“我从你那里拉å–一个Gitè¡¥ä¸æ—¶ï¼Œæˆ‘需è¦çŸ¥é“你清楚
自己在åšä»€ä¹ˆï¼Œæˆ‘需è¦èƒ½å¤Ÿç›¸ä¿¡äº‹æƒ…而 *无需* 手动检查æ¯ä¸ªå•ç‹¬çš„更改。
-(http://lwn.net/articles/224135/)。
+(http://lwn.net/Articles/224135/)。
为了é¿å…è¿™ç§æƒ…况,请确ä¿ç»™å®šåˆ†æ”¯ä¸­çš„所有补ä¸éƒ½ä¸Žç›¸å…³ä¸»é¢˜ç´§å¯†ç›¸å…³ï¼›â€œé©±åŠ¨ç¨‹åº
ä¿®å¤â€åˆ†æ”¯ä¸åº”更改核心内存管ç†ä»£ç ã€‚而且,最é‡è¦çš„是,ä¸è¦ä½¿ç”¨Gitæ ‘æ¥ç»•è¿‡
diff --git a/Documentation/translations/zh_TW/process/2.Process.rst b/Documentation/translations/zh_TW/process/2.Process.rst
index b01cdd3a39ae..9d465df1f6c3 100644
--- a/Documentation/translations/zh_TW/process/2.Process.rst
+++ b/Documentation/translations/zh_TW/process/2.Process.rst
@@ -361,7 +361,7 @@ Andrew Morton 爲有抱負的內核開發人員æ供了如下建議
機器上始終完美é‹è¡Œã€ã€‚通常的方法是和其他人一起解決å•é¡Œï¼ˆé€™å¯èƒ½éœ€
è¦å …æŒï¼ï¼‰ï¼Œä½†å°±æ˜¯å¦‚此——這是內核開發的一部分。
-(http://lwn.net/articles/283982/)
+(http://lwn.net/Articles/283982/)
在沒有明顯å•é¡Œéœ€è¦è§£æ±ºçš„情æ³ä¸‹ï¼Œé€šå¸¸å»ºè­°é–‹ç™¼äººå“¡æŸ¥çœ‹ç•¶å‰çš„回歸和開放缺陷
列表。從來都ä¸ç¼ºå°‘需è¦è§£æ±ºçš„å•é¡Œï¼›é€šéŽè§£æ±ºé€™äº›å•é¡Œï¼Œé–‹ç™¼äººå“¡å°‡å¾žè©²éŽç¨‹ç²å¾—
diff --git a/Documentation/translations/zh_TW/process/3.Early-stage.rst b/Documentation/translations/zh_TW/process/3.Early-stage.rst
index ab2a45fd65a4..076873ca0905 100644
--- a/Documentation/translations/zh_TW/process/3.Early-stage.rst
+++ b/Documentation/translations/zh_TW/process/3.Early-stage.rst
@@ -47,7 +47,7 @@
試圖å‘這些人傳é”用戶需求是浪費時間。他們太「è°æ˜Žã€äº†ï¼Œæ ¹æœ¬è½ä¸åˆ°å°‘數
人的話。
-(http://lwn.net/articles/131776/)
+(http://lwn.net/Articles/131776/)
實際情æ³å»æ˜¯ä¸åŒçš„;與特定模塊相比,內核開發人員更關心系統穩定性ã€é•·æœŸç¶­è­·
以åŠæ‰¾åˆ°å•é¡Œçš„正確解決方案。這個故事的寓æ„是把é‡é»žæ”¾åœ¨å•é¡Œä¸Šâ€”—而ä¸æ˜¯å…·é«”çš„
diff --git a/Documentation/translations/zh_TW/process/4.Coding.rst b/Documentation/translations/zh_TW/process/4.Coding.rst
index ccc3946227a0..7fc0344ed16b 100644
--- a/Documentation/translations/zh_TW/process/4.Coding.rst
+++ b/Documentation/translations/zh_TW/process/4.Coding.rst
@@ -152,7 +152,7 @@ Linuså°é€™å€‹å•é¡Œçµ¦å‡ºäº†æœ€ä½³ç­”案:
所以我們ä¸æœƒé€šéŽå¼•å…¥æ–°å•é¡Œä¾†ä¿®å¾©éŒ¯èª¤ã€‚這種方å¼æ˜¯é ä¸ä½çš„,沒人知é“
是å¦çœŸçš„有進展。是å‰é€²å…©æ­¥ã€å¾Œé€€ä¸€æ­¥ï¼Œé‚„是å‰é€²ä¸€æ­¥ã€å¾Œé€€å…©æ­¥ï¼Ÿ
-(http://lwn.net/articles/243460/)
+(http://lwn.net/Articles/243460/)
特別ä¸å—歡迎的一種回歸類型是用戶空間ABI的任何變化。一旦接å£è¢«å°Žå‡ºåˆ°ç”¨æˆ¶ç©ºé–“,
就必須無é™æœŸåœ°æ”¯æŒå®ƒã€‚這一事實使得用戶空間接å£çš„創建特別具有挑戰性:因爲它們
diff --git a/Documentation/translations/zh_TW/process/7.AdvancedTopics.rst b/Documentation/translations/zh_TW/process/7.AdvancedTopics.rst
index 3de093d0f170..4fbc104a37ca 100644
--- a/Documentation/translations/zh_TW/process/7.AdvancedTopics.rst
+++ b/Documentation/translations/zh_TW/process/7.AdvancedTopics.rst
@@ -101,7 +101,7 @@ Gitæ供了一些強大的工具,å¯ä»¥è®“您é‡å¯«é–‹ç™¼æ­·å²ã€‚一個ä¸æ–
ä½ å¯ä»¥çµ¦æˆ‘發補ä¸ï¼Œä½†ç•¶æˆ‘從你那裡拉å–一個Git補ä¸æ™‚,我需è¦çŸ¥é“你清楚
自己在åšä»€éº¼ï¼Œæˆ‘需è¦èƒ½å¤ ç›¸ä¿¡äº‹æƒ…而 *無需* 手動檢查æ¯å€‹å–®ç¨çš„更改。
-(http://lwn.net/articles/224135/)。
+(http://lwn.net/Articles/224135/)。
爲了é¿å…這種情æ³ï¼Œè«‹ç¢ºä¿çµ¦å®šåˆ†æ”¯ä¸­çš„所有補ä¸éƒ½èˆ‡ç›¸é—œä¸»é¡Œç·Šå¯†ç›¸é—œï¼›ã€Œé©…動程åº
修復ã€åˆ†æ”¯ä¸æ‡‰æ›´æ”¹æ ¸å¿ƒå…§å­˜ç®¡ç†ä»£ç¢¼ã€‚而且,最é‡è¦çš„是,ä¸è¦ä½¿ç”¨Git樹來繞éŽ
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 4f7b23faebb9..4ea5b837399a 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -180,6 +180,7 @@ Code Seq# Include File Comments
'P' 00-0F drivers/usb/class/usblp.c conflict!
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
'P' 00-0F xen/privcmd.h conflict!
+'P' 00-05 linux/tps6594_pfsm.h conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
diff --git a/Documentation/userspace-api/media/frontend.h.rst.exceptions b/Documentation/userspace-api/media/frontend.h.rst.exceptions
index 8b73fee11a79..dcaf5740de7e 100644
--- a/Documentation/userspace-api/media/frontend.h.rst.exceptions
+++ b/Documentation/userspace-api/media/frontend.h.rst.exceptions
@@ -142,6 +142,10 @@ ignore symbol FEC_26_45
ignore symbol FEC_28_45
ignore symbol FEC_32_45
ignore symbol FEC_77_90
+ignore symbol FEC_11_45
+ignore symbol FEC_4_15
+ignore symbol FEC_14_45
+ignore symbol FEC_7_15
ignore symbol TRANSMISSION_MODE_AUTO
ignore symbol TRANSMISSION_MODE_1K
diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst
index 9cd18c153d19..72aef1759b60 100644
--- a/Documentation/userspace-api/media/v4l/biblio.rst
+++ b/Documentation/userspace-api/media/v4l/biblio.rst
@@ -427,3 +427,12 @@ VP9
:title: VP9 Bitstream & Decoding Process Specification
:author: Adrian Grange (Google), Peter de Rivaz (Argon Design), Jonathan Hunt (Argon Design)
+
+.. _av1:
+
+AV1
+===
+
+:title: AV1 Bitstream & Decoding Process Specification
+
+:author: Peter de Rivaz, Argon Design Ltd, Jack Haughton, Argon Design Ltd
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-camera.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-camera.rst
index daa4f40869f8..cdc515c60468 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-camera.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-camera.rst
@@ -506,6 +506,8 @@ enum v4l2_scene_mode -
value down. A value of zero stops the motion if one is in progress
and has no effect otherwise.
+.. _v4l2-camera-sensor-orientation:
+
``V4L2_CID_CAMERA_ORIENTATION (menu)``
This read-only control describes the camera orientation by reporting its
mounting position on the device where the camera is installed. The control
@@ -536,6 +538,7 @@ enum v4l2_scene_mode -
- The camera is not directly attached to the device and is freely movable.
+.. _v4l2-camera-sensor-rotation:
``V4L2_CID_CAMERA_SENSOR_ROTATION (integer)``
This read-only control describes the rotation correction in degrees in the
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
index 3d8411acd5b8..81e60f4002c8 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
@@ -1890,11 +1890,11 @@ params syntax' of the :ref:`vp9` specification for more details.
* - __u8
- ``tree_probs[7]``
- Specifies the probability values to be used when decoding a Segment-ID.
- See '5.15. Segmentation map' section of :ref:`vp9` for more details.
+ See '5.15 Segmentation map' section of :ref:`vp9` for more details.
* - __u8
- ``pred_probs[3]``
- Specifies the probability values to be used when decoding a
- Predicted-Segment-ID. See '6.4.14. Get segment id syntax'
+ Predicted-Segment-ID. See '6.4.14 Get segment id syntax'
section of :ref:`vp9` for more details.
* - __u8
- ``flags``
@@ -2923,6 +2923,13 @@ This structure contains all loop filter related parameters. See sections
- ``poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
- PocLtCurr as described in section 8.3.2 "Decoding process for reference
picture set": provides the index of the long term references in DPB array.
+ * - __u8
+ - ``num_delta_pocs_of_ref_rps_idx``
+ - When the short_term_ref_pic_set_sps_flag in the slice header is equal to 0,
+ it is the same as the derived value NumDeltaPocs[RefRpsIdx]. It can be used to parse
+ the RPS data in slice headers instead of skipping it with @short_term_ref_pic_set_size.
+ When the value of short_term_ref_pic_set_sps_flag in the slice header is
+ equal to 1, num_delta_pocs_of_ref_rps_idx shall be set to 0.
* - struct :c:type:`v4l2_hevc_dpb_entry`
- ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
- The decoded picture buffer, for meta-data about reference frames.
@@ -2950,3 +2957,1208 @@ This structure contains all loop filter related parameters. See sections
* - ``V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR``
- 0x00000004
-
+
+.. _v4l2-codec-stateless-av1:
+
+``V4L2_CID_STATELESS_AV1_SEQUENCE (struct)``
+ Represents an AV1 Sequence OBU (Open Bitstream Unit). See section 5.5
+ "Sequence header OBU syntax" in :ref:`av1` for more details.
+
+.. c:type:: v4l2_ctrl_av1_sequence
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{5.8cm}|p{4.8cm}|p{6.6cm}|
+
+.. flat-table:: struct v4l2_ctrl_av1_sequence
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``flags``
+ - See :ref:`AV1 Sequence Flags <av1_sequence_flags>`.
+ * - __u8
+ - ``seq_profile``
+ - Specifies the features that can be used in the coded video sequence.
+ * - __u8
+ - ``order_hint_bits``
+ - Specifies the number of bits used for the order_hint field at each frame.
+ * - __u8
+ - ``bit_depth``
+ - the bit depth to use for the sequence as described in section 5.5.2
+ "Color config syntax" in :ref:`av1` for more details.
+ * - __u8
+ - ``reserved``
+ - Applications and drivers must set this to zero.
+ * - __u16
+ - ``max_frame_width_minus_1``
+ - specifies the maximum frame width minus 1 for the frames represented by
+ this sequence header.
+
+.. _av1_sequence_flags:
+
+``AV1 Sequence Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE``
+ - 0x00000001
+ - If set, specifies that the coded video sequence contains only one coded
+ frame. If not set, specifies that the coded video sequence contains one
+ or more coded frames.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK``
+ - 0x00000002
+ - If set, indicates that superblocks contain 128x128 luma samples.
+ When equal to 0, it indicates that superblocks contain 64x64 luma
+ samples. The number of contained chroma samples depends on
+ subsampling_x and subsampling_y.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA``
+ - 0x00000004
+ - If set, specifies that the use_filter_intra syntax element may be
+ present. If not set, specifies that the use_filter_intra syntax element
+ will not be present.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER``
+ - 0x00000008
+ - Specifies whether the intra edge filtering process should be enabled.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND``
+ - 0x00000010
+ - If set, specifies that the mode info for inter blocks may contain the
+ syntax element interintra. If not set, specifies that the syntax element
+ interintra will not be present.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND``
+ - 0x00000020
+ - If set, specifies that the mode info for inter blocks may contain the
+ syntax element compound_type. If not set, specifies that the syntax
+ element compound_type will not be present.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION``
+ - 0x00000040
+ - If set, indicates that the allow_warped_motion syntax element may be
+ present. If not set, indicates that the allow_warped_motion syntax
+ element will not be present.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER``
+ - 0x00000080
+ - If set, indicates that the inter prediction filter type may be specified
+ independently in the horizontal and vertical directions. If the flag is
+ equal to 0, only one filter type may be specified, which is then used in
+ both directions.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT``
+ - 0x00000100
+ - If set, indicates that tools based on the values of order hints may be
+ used. If not set, indicates that tools based on order hints are
+ disabled.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP``
+ - 0x00000200
+ - If set, indicates that the distance weights process may be used for
+ inter prediction.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS``
+ - 0x00000400
+ - If set, indicates that the use_ref_frame_mvs syntax element may be
+ present. If not set, indicates that the use_ref_frame_mvs syntax element
+ will not be present.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES``
+ - 0x00000800
+ - If set, specifies that the use_superres syntax element will be present
+ in the uncompressed header. If not set, specifies that the use_superres
+ syntax element will not be present (instead use_superres will be set to
+ 0 in the uncompressed header without being read).
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF``
+ - 0x00001000
+ - If set, specifies that cdef filtering may be enabled. If not set,
+ specifies that cdef filtering is disabled.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION``
+ - 0x00002000
+ - If set, specifies that loop restoration filtering may be enabled. If not
+ set, specifies that loop restoration filtering is disabled.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME``
+ - 0x00004000
+ - If set, indicates that the video does not contain U and V color planes.
+ If not set, indicates that the video contains Y, U, and V color planes.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE``
+ - 0x00008000
+ - If set, signals full swing representation, i.e. "Full Range
+ Quantization". If not set, signals studio swing representation, i.e.
+ "Limited Range Quantization".
+ * - ``V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X``
+ - 0x00010000
+ - Specify the chroma subsampling format.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y``
+ - 0x00020000
+ - Specify the chroma subsampling format.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT``
+ - 0x00040000
+ - Specifies whether film grain parameters are present in the coded video
+ sequence.
+ * - ``V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q``
+ - 0x00080000
+ - If set, indicates that the U and V planes may have separate delta
+ quantizer values. If not set, indicates that the U and V planes will share
+ the same delta quantizer value.
+
+``V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (struct)``
+ Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
+ MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
+ v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
+ tile_col. See section 6.10.1 "General tile group OBU semantics" in
+ :ref:`av1` for more details.
+
+.. c:type:: v4l2_ctrl_av1_tile_group_entry
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{5.8cm}|p{4.8cm}|p{6.6cm}|
+
+.. flat-table:: struct v4l2_ctrl_av1_tile_group_entry
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``tile_offset``
+ - Offset from the OBU data, i.e. where the coded tile data actually starts.
+ * - __u32
+ - ``tile_size``
+ - Specifies the size in bytes of the coded tile. Equivalent to "TileSize"
+ in :ref:`av1`.
+ * - __u32
+ - ``tile_row``
+ - Specifies the row of the current tile. Equivalent to "TileRow" in
+ :ref:`av1`.
+ * - __u32
+ - ``tile_col``
+ - Specifies the column of the current tile. Equivalent to "TileColumn" in
+ :ref:`av1`.
+
+.. c:type:: v4l2_av1_warp_model
+
+ AV1 Warp Model as described in section 3 "Symbols and abbreviated terms" of
+ :ref:`av1`.
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_WARP_MODEL_IDENTITY``
+ - 0
+ - Warp model is just an identity transform.
+ * - ``V4L2_AV1_WARP_MODEL_TRANSLATION``
+ - 1
+ - Warp model is a pure translation.
+ * - ``V4L2_AV1_WARP_MODEL_ROTZOOM``
+ - 2
+ - Warp model is a rotation + symmetric zoom + translation.
+ * - ``V4L2_AV1_WARP_MODEL_AFFINE``
+ - 3
+ - Warp model is a general affine transform.
+
+.. c:type:: v4l2_av1_reference_frame
+
+AV1 Reference Frames as described in section 6.10.24 "Ref frames semantics"
+of :ref:`av1`.
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_REF_INTRA_FRAME``
+ - 0
+ - Intra Frame Reference.
+ * - ``V4L2_AV1_REF_LAST_FRAME``
+ - 1
+ - Last Frame Reference.
+ * - ``V4L2_AV1_REF_LAST2_FRAME``
+ - 2
+ - Last2 Frame Reference.
+ * - ``V4L2_AV1_REF_LAST3_FRAME``
+ - 3
+ - Last3 Frame Reference.
+ * - ``V4L2_AV1_REF_GOLDEN_FRAME``
+ - 4
+ - Golden Frame Reference.
+ * - ``V4L2_AV1_REF_BWDREF_FRAME``
+ - 5
+ - BWD Frame Reference.
+ * - ``V4L2_AV1_REF_ALTREF2_FRAME``
+ - 6
+ - ALTREF2 Frame Reference.
+ * - ``V4L2_AV1_REF_ALTREF_FRAME``
+ - 7
+ - ALTREF Frame Reference.
+
+.. c:type:: v4l2_av1_global_motion
+
+AV1 Global Motion parameters as described in section 6.8.17
+"Global motion params semantics" of :ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_global_motion
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags[V4L2_AV1_TOTAL_REFS_PER_FRAME]``
+ - A bitfield containing the flags per reference frame. See
+ :ref:`AV1 Global Motion Flags <av1_global_motion_flags>` for more
+ details.
+ * - enum :c:type:`v4l2_av1_warp_model`
+ - ``type[V4L2_AV1_TOTAL_REFS_PER_FRAME]``
+ - The type of global motion transform used.
+ * - __s32
+ - ``params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6]``
+ - This field has the same meaning as "gm_params" in :ref:`av1`.
+ * - __u8
+ - ``invalid``
+ - Bitfield indicating whether the global motion params are invalid for a
+ given reference frame. See section 7.11.3.6 Setup shear process and the
+ variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
+ create a suitable mask.
+ * - __u8
+ - ``reserved[3]``
+ - Applications and drivers must set this to zero.
+
+.. _av1_global_motion_flags:
+
+``AV1 Global Motion Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL``
+ - 0x00000001
+ - Specifies whether global motion parameters are present for a particular
+ reference frame.
+ * - ``V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM``
+ - 0x00000002
+ - Specifies whether a particular reference frame uses rotation and zoom
+ global motion.
+ * - ``V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION``
+ - 0x00000004
+ - Specifies whether a particular reference frame uses translation global
+ motion
+
+.. c:type:: v4l2_av1_frame_restoration_type
+
+AV1 Frame Restoration Type.
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_FRAME_RESTORE_NONE``
+ - 0
+ - No filtering is applied.
+ * - ``V4L2_AV1_FRAME_RESTORE_WIENER``
+ - 1
+ - Wiener filter process is invoked.
+ * - ``V4L2_AV1_FRAME_RESTORE_SGRPROJ``
+ - 2
+ - Self guided filter process is invoked.
+ * - ``V4L2_AV1_FRAME_RESTORE_SWITCHABLE``
+ - 3
+ - Restoration filter is swichtable.
+
+.. c:type:: v4l2_av1_loop_restoration
+
+AV1 Loop Restauration as described in section 6.10.15 "Loop restoration params
+semantics" of :ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_loop_restoration
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See :ref:`AV1 Loop Restoration Flags <av1_loop_restoration_flags>`.
+ * - __u8
+ - ``lr_unit_shift``
+ - Specifies if the luma restoration size should be halved.
+ * - __u8
+ - ``lr_uv_shift``
+ - Specifies if the chroma size should be half the luma size.
+ * - __u8
+ - ``reserved``
+ - Applications and drivers must set this to zero.
+ * - :c:type:`v4l2_av1_frame_restoration_type`
+ - ``frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX]``
+ - Specifies the type of restoration used for each plane.
+ * - __u8
+ - ``loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES]``
+ - Specifies the size of loop restoration units in units of samples in the
+ current plane.
+
+.. _av1_loop_restoration_flags:
+
+``AV1 Loop Restoration Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR``
+ - 0x00000001
+ - Retains the same meaning as UsesLr in :ref:`av1`.
+ * - ``V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR``
+ - 0x00000002
+ - Retains the same meaning as UsesChromaLr in :ref:`av1`.
+
+.. c:type:: v4l2_av1_cdef
+
+AV1 CDEF params semantics as described in section 6.10.14 "CDEF params
+semantics" of :ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_cdef
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``damping_minus_3``
+ - Controls the amount of damping in the deringing filter.
+ * - __u8
+ - ``bits``
+ - Specifies the number of bits needed to specify which CDEF filter to
+ apply.
+ * - __u8
+ - ``y_pri_strength[V4L2_AV1_CDEF_MAX]``
+ - Specifies the strength of the primary filter.
+ * - __u8
+ - ``y_sec_strength[V4L2_AV1_CDEF_MAX]``
+ - Specifies the strength of the secondary filter.
+ * - __u8
+ - ``uv_pri_strength[V4L2_AV1_CDEF_MAX]``
+ - Specifies the strength of the primary filter.
+ * - __u8
+ - ``uv_secondary_strength[V4L2_AV1_CDEF_MAX]``
+ - Specifies the strength of the secondary filter.
+
+.. c:type:: v4l2_av1_segment_feature
+
+AV1 segment features as described in section 3 "Symbols and abbreviated terms"
+of :ref:`av1`.
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_SEG_LVL_ALT_Q``
+ - 0
+ - Index for quantizer segment feature.
+ * - ``V4L2_AV1_SEG_LVL_ALT_LF_Y_V``
+ - 1
+ - Index for vertical luma loop filter segment feature.
+ * - ``V4L2_AV1_SEG_LVL_REF_FRAME``
+ - 5
+ - Index for reference frame segment feature.
+ * - ``V4L2_AV1_SEG_LVL_REF_SKIP``
+ - 6
+ - Index for skip segment feature.
+ * - ``V4L2_AV1_SEG_LVL_REF_GLOBALMV``
+ - 7
+ - Index for global mv feature.
+ * - ``V4L2_AV1_SEG_LVL_MAX``
+ - 8
+ - Number of segment features.
+
+.. c:type:: v4l2_av1_segmentation
+
+AV1 Segmentation params as defined in section 6.8.13 "Segmentation params
+semantics" of :ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_segmentation
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See :ref:`AV1 Segmentation Flags <av1_segmentation_flags>`
+ * - __u8
+ - ``last_active_seg_id``
+ - Indicates the highest numbered segment id that has some
+ enabled feature. This is used when decoding the segment id to only decode
+ choices corresponding to used segments.
+ * - __u8
+ - ``feature_enabled[V4L2_AV1_MAX_SEGMENTS]``
+ - Bitmask defining which features are enabled in each segment. Use
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
+ * - __u16
+ - `feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]``
+ - Data attached to each feature. Data entry is only valid if the feature
+ is enabled.
+
+.. _av1_segmentation_flags:
+
+``AV1 Segmentation Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_SEGMENTATION_FLAG_ENABLED``
+ - 0x00000001
+ - If set, indicates that this frame makes use of the segmentation tool. If
+ not set, indicates that the frame does not use segmentation.
+ * - ``V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP``
+ - 0x00000002
+ - If set, indicates that the segmentation map are updated during the
+ decoding of this frame. If not set, indicates that the segmentation map
+ from the previous frame is used.
+ * - ``V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE``
+ - 0x00000004
+ - If set, indicates that the updates to the segmentation map are coded
+ relative to the existing segmentation map. If not set, indicates that
+ the new segmentation map is coded without reference to the existing
+ segmentation map.
+ * - ``V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA``
+ - 0x00000008
+ - If set, indicates that the updates to the segmentation map are coded
+ relative to the existing segmentation map. If not set, indicates that
+ the new segmentation map is coded without reference to the existing
+ segmentation map.
+ * - ``V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP``
+ - 0x00000010
+ - If set, indicates that the segment id will be read before the skip
+ syntax element. If not set, indicates that the skip syntax element will
+ be read first.
+
+.. c:type:: v4l2_av1_loop_filter
+
+AV1 Loop filter params as defined in section 6.8.10 "Loop filter semantics" of
+:ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_global_motion
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See
+ :ref:`AV1 Loop Filter flags <av1_loop_filter_flags>` for more details.
+ * - __u8
+ - ``level[4]``
+ - An array containing loop filter strength values. Different loop
+ filter strength values from the array are used depending on the image
+ plane being filtered, and the edge direction (vertical or horizontal)
+ being filtered.
+ * - __u8
+ - ``sharpness``
+ - indicates the sharpness level. The loop_filter_level and
+ loop_filter_sharpness together determine when a block edge is filtered,
+ and by how much the filtering can change the sample values. The loop
+ filter process is described in section 7.14 of :ref:`av1`.
+ * - __u8
+ - ``ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME]``
+ - contains the adjustment needed for the filter level based on the
+ chosen reference frame. If this syntax element is not present, it
+ maintains its previous value.
+ * - __u8
+ - ``mode_deltas[2]``
+ - contains the adjustment needed for the filter level based on
+ the chosen mode. If this syntax element is not present, it maintains its
+ previous value.
+ * - __u8
+ - ``delta_lf_res``
+ - specifies the left shift which should be applied to decoded loop filter
+ delta values.
+
+.. _av1_loop_filter_flags:
+
+``AV1 Loop Filter Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED``
+ - 0x00000001
+ - If set, means that the filter level depends on the mode and reference
+ frame used to predict a block. If not set, means that the filter level
+ does not depend on the mode and reference frame.
+ * - ``V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE``
+ - 0x00000002
+ - If set, means that additional syntax elements are present that specify
+ which mode and reference frame deltas are to be updated. If not set,
+ means that these syntax elements are not present.
+ * - ``V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT``
+ - 0x00000004
+ - Specifies whether loop filter delta values are present
+ * - ``V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI``
+ - 0x00000008
+ - A value equal to 1 specifies that separate loop filter
+ deltas are sent for horizontal luma edges, vertical luma edges,
+ the U edges, and the V edges. A value of delta_lf_multi equal to 0
+ specifies that the same loop filter delta is used for all edges.
+
+.. c:type:: v4l2_av1_quantization
+
+AV1 Quantization params as defined in section 6.8.11 "Quantization params
+semantics" of :ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_quantization
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See
+ :ref:`AV1 Loop Filter flags <av1_quantization_flags>` for more details.
+ * - __u8
+ - ``base_q_idx``
+ - Indicates the base frame qindex. This is used for Y AC coefficients and
+ as the base value for the other quantizers.
+ * - __u8
+ - ``delta_q_y_dc``
+ - Indicates the Y DC quantizer relative to base_q_idx.
+ * - __u8
+ - ``delta_q_u_dc``
+ - Indicates the U DC quantizer relative to base_q_idx.
+ * - __u8
+ - ``delta_q_u_ac``
+ - Indicates the U AC quantizer relative to base_q_idx.
+ * - __u8
+ - ``delta_q_v_dc``
+ - Indicates the V DC quantizer relative to base_q_idx.
+ * - __u8
+ - ``delta_q_v_ac``
+ - Indicates the V AC quantizer relative to base_q_idx.
+ * - __u8
+ - ``qm_y``
+ - Specifies the level in the quantizer matrix that should be used for
+ luma plane decoding.
+ * - __u8
+ - ``qm_u``
+ - Specifies the level in the quantizer matrix that should be used for
+ chroma U plane decoding.
+ * - __u8
+ - ``qm_v``
+ - Specifies the level in the quantizer matrix that should be used for
+ chroma V plane decoding.
+ * - __u8
+ - ``delta_q_res``
+ - Specifies the left shift which should be applied to decoded quantizer
+ index delta values.
+
+.. _av1_quantization_flags:
+
+``AV1 Quantization Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA``
+ - 0x00000001
+ - If set, indicates that the U and V delta quantizer values are coded
+ separately. If not set, indicates that the U and V delta quantizer
+ values share a common value.
+ * - ``V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX``
+ - 0x00000002
+ - If set, specifies that the quantizer matrix will be used to compute
+ quantizers.
+ * - ``V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT``
+ - 0x00000004
+ - Specifies whether quantizer index delta values are present.
+
+.. c:type:: v4l2_av1_tile_info
+
+AV1 Tile info as defined in section 6.8.14 "Tile info semantics" of ref:`av1`.
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_av1_tile_info
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See
+ :ref:`AV1 Tile Info flags <av1_tile_info_flags>` for more details.
+ * - __u8
+ - ``context_update_tile_id``
+ - Specifies which tile to use for the CDF update.
+ * - __u8
+ - ``tile_cols``
+ - Specifies the number of tiles across the frame.
+ * - __u8
+ - ``tile_rows``
+ - Specifies the number of tiles down the frame.
+ * - __u32
+ - ``mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1]``
+ - An array specifying the start column (in units of 4x4 luma
+ samples) for each tile across the image.
+ * - __u32
+ - ``mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1]``
+ - An array specifying the start row (in units of 4x4 luma
+ samples) for each tile across the image.
+ * - __u32
+ - ``width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS]``
+ - Specifies the width of a tile minus 1 in units of superblocks.
+ * - __u32
+ - ``height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS]``
+ - Specifies the height of a tile minus 1 in units of superblocks.
+ * - __u8
+ - ``tile_size_bytes``
+ - Specifies the number of bytes needed to code each tile size.
+ * - __u8
+ - ``reserved[3]``
+ - Applications and drivers must set this to zero.
+
+.. _av1_tile_info_flags:
+
+``AV1 Tile Info Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING``
+ - 0x00000001
+ - If set, means that the tiles are uniformly spaced across the frame. (In
+ other words, all tiles are the same size except for the ones at the
+ right and bottom edge which can be smaller). If not set means that the
+ tile sizes are coded.
+
+.. c:type:: v4l2_av1_frame_type
+
+AV1 Frame Type
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_KEY_FRAME``
+ - 0
+ - Key frame.
+ * - ``V4L2_AV1_INTER_FRAME``
+ - 1
+ - Inter frame.
+ * - ``V4L2_AV1_INTRA_ONLY_FRAME``
+ - 2
+ - Intra-only frame.
+ * - ``V4L2_AV1_SWITCH_FRAME``
+ - 3
+ - Switch frame.
+
+.. c:type:: v4l2_av1_interpolation_filter
+
+AV1 Interpolation Filter
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP``
+ - 0
+ - Eight tap filter.
+ * - ``V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH``
+ - 1
+ - Eight tap smooth filter.
+ * - ``V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP``
+ - 2
+ - Eight tap sharp filter.
+ * - ``V4L2_AV1_INTERPOLATION_FILTER_BILINEAR``
+ - 3
+ - Bilinear filter.
+ * - ``V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE``
+ - 4
+ - Filter selection is signaled at the block level.
+
+.. c:type:: v4l2_av1_tx_mode
+
+AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`.
+
+.. raw:: latex
+
+ \scriptsize
+
+.. tabularcolumns:: |p{7.4cm}|p{0.3cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_TX_MODE_ONLY_4X4``
+ - 0
+ - The inverse transform will use only 4x4 transforms.
+ * - ``V4L2_AV1_TX_MODE_LARGEST``
+ - 1
+ - The inverse transform will use the largest transform size that fits
+ inside the block.
+ * - ``V4L2_AV1_TX_MODE_SELECT``
+ - 2
+ - The choice of transform size is specified explicitly for each block.
+
+``V4L2_CID_STATELESS_AV1_FRAME (struct)``
+ Represents a Frame Header OBU. See 6.8 "Frame Header OBU semantics" of
+ :ref:`av1` for more details.
+
+.. c:type:: v4l2_ctrl_av1_frame
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{5.8cm}|p{4.8cm}|p{6.6cm}|
+
+.. flat-table:: struct v4l2_ctrl_av1_frame
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - struct :c:type:`v4l2_av1_tile_info`
+ - ``tile_info``
+ - Tile info
+ * - struct :c:type:`v4l2_av1_quantization`
+ - ``quantization``
+ - Quantization parameters.
+ * - struct :c:type:`v4l2_av1_segmentation`
+ - ``segmentation``
+ - Segmentation parameters.
+ * - __u8
+ - ``superres_denom``
+ - The denominator for the upscaling ratio.
+ * - struct :c:type:`v4l2_av1_loop_filter`
+ - ``loop_filter``
+ - Loop filter params
+ * - struct :c:type:`v4l2_av1_cdef`
+ - ``cdef``
+ - CDEF params
+ * - __u8
+ - ``skip_mode_frame[2]``
+ - Specifies the frames to use for compound prediction when skip_mode is
+ equal to 1.
+ * - __u8
+ - ``primary_ref_frame``
+ - Specifies which reference frame contains the CDF values and other state
+ that should be loaded at the start of the frame.
+ * - struct :c:type:`v4l2_av1_loop_restoration`
+ - ``loop_restoration``
+ - Loop restoration parameters.
+ * - struct :c:type:`v4l2_av1_loop_global_motion`
+ - ``global_motion``
+ - Global motion parameters.
+ * - __u32
+ - ``flags``
+ - See
+ :ref:`AV1 Frame flags <av1_frame_flags>` for more details.
+ * - enum :c:type:`v4l2_av1_frame_type`
+ - ``frame_type``
+ - Specifies the AV1 frame type
+ * - __u32
+ - ``order_hint``
+ - Specifies OrderHintBits least significant bits of the expected output
+ order for this frame.
+ * - __u32
+ - ``upscaled_width``
+ - The upscaled width.
+ * - enum :c:type:`v4l2_av1_interpolation_filter`
+ - ``interpolation_filter``
+ - Specifies the filter selection used for performing inter prediction.
+ * - enum :c:type:`v4l2_av1_tx_mode`
+ - ``tx_mode``
+ - Specifies how the transform size is determined.
+ * - __u32
+ - ``frame_width_minus_1``
+ - Add 1 to get the frame's width.
+ * - __u32
+ - ``frame_height_minus_1``
+ - Add 1 to get the frame's height.
+ * - __u16
+ - ``render_width_minus_1``
+ - Add 1 to get the render width of the frame in luma samples.
+ * - __u16
+ - ``render_height_minus_1``
+ - Add 1 to get the render height of the frame in luma samples.
+ * - __u32
+ - ``current_frame_id``
+ - Specifies the frame id number for the current frame. Frame
+ id numbers are additional information that do not affect the decoding
+ process, but provide decoders with a way of detecting missing reference
+ frames so that appropriate action can be taken.
+ * - __u8
+ - ``buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS]``
+ - Specifies the frame removal time in units of DecCT clock ticks counted
+ from the removal time of the last random access point for operating point
+ opNum.
+ * - __u8
+ - ``reserved[4]``
+ - Applications and drivers must set this to zero.
+ * - __u32
+ - ``order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME]``
+ - Specifies the expected output order hint for each reference frame.
+ This field corresponds to the OrderHints variable from the specification
+ (section 5.9.2 "Uncompressed header syntax"). As such, this is only
+ used for non-intra frames and ignored otherwise. order_hints[0] is
+ always ignored.
+ * - __u64
+ - ``reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME]``
+ - The V4L2 timestamp for each of the reference frames enumerated in
+ enum :c:type:`v4l2_av1_reference_frame` starting at
+ ``V4L2_AV1_REF_LAST_FRAME``. This represents the state of reference
+ slot as described in the spec and updated by userland through the
+ "Reference frame update process" in section 7.20 The timestamp refers
+ to the ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __s8
+ - ``ref_frame_idx[V4L2_AV1_REFS_PER_FRAME]``
+ - An index into ``reference_frame_ts`` representing the ordered list of
+ references used by inter-frame. Matches the bitstream syntax
+ element of the same name.
+ * - __u8
+ - ``refresh_frame_flags``
+ - Contains a bitmask that specifies which reference frame slots will be
+ updated with the current frame after it is decoded.
+
+.. _av1_frame_flags:
+
+``AV1 Frame Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_FRAME_FLAG_SHOW_FRAME``
+ - 0x00000001
+ - If set, specifies that this frame should be immediately output once
+ decoded. If not set, specifies that this frame should not be immediately
+ output; it may be output later if a later uncompressed header uses
+ show_existing_frame equal to 1.
+ * - ``V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME``
+ - 0x00000002
+ - If set, specifies that the frame may be output using the
+ show_existing_frame mechanism. If not set, specifies that this frame
+ will not be output using the show_existing_frame mechanism.
+ * - ``V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE``
+ - 0x00000004
+ - Specifies whether error resilient mode is enabled.
+ * - ``V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE``
+ - 0x00000008
+ - Specifies whether the CDF update in the symbol decoding process should
+ be disabled.
+ * - ``V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS``
+ - 0x00000010
+ - If set, indicates that intra blocks may use palette encoding. If not
+ set, indicates that palette encoding is never used.
+ * - ``V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV``
+ - 0x00000020
+ - If set, specifies that motion vectors will always be integers. If not
+ set, specifies that motion vectors can contain fractional bits.
+ * - ``V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC``
+ - 0x00000040
+ - If set, indicates that intra block copy may be used in this frame. If
+ not set, indicates that intra block copy is not allowed in this frame.
+ * - ``V4L2_AV1_FRAME_FLAG_USE_SUPERRES``
+ - 0x00000080
+ - If set, indicates that upscaling is needed.
+ * - ``V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV``
+ - 0x00000100
+ - If set, specifies that motion vectors are specified to eighth pel
+ precision. If not set, specifies that motion vectors are specified to
+ quarter pel precision;
+ * - ``V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE``
+ - 0x00000200
+ - If not set, specifies that only the SIMPLE motion mode will be used.
+ * - ``V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS``
+ - 0x00000400
+ - If set specifies that motion vector information from a previous frame
+ can be used when decoding the current frame. If not set, specifies that
+ this information will not be used.
+ * - ``V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF``
+ - 0x00000800
+ - If set indicates that the end of frame CDF update is disabled. If not
+ set, indicates that the end of frame CDF update is enabled
+ * - ``V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION``
+ - 0x00001000
+ - If set, indicates that the syntax element motion_mode may be present, if
+ not set, indicates that the syntax element motion_mode will not be
+ present.
+ * - ``V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT``
+ - 0x00002000
+ - If set, specifies that the mode info for inter blocks contains the
+ syntax element comp_mode that indicates whether to use single or
+ compound reference prediction. If not set, specifies that all inter
+ blocks will use single prediction.
+ * - ``V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET``
+ - 0x00004000
+ - If set, specifies that the frame is restricted to a reduced subset of
+ the full set of transform types.
+ * - ``V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED``
+ - 0x00008000
+ - This flag retains the same meaning as SkipModeAllowed in :ref:`av1`.
+ * - ``V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT``
+ - 0x00010000
+ - If set, specifies that the syntax element skip_mode will be present, if
+ not set, specifies that skip_mode will not be used for this frame.
+ * - ``V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE``
+ - 0x00020000
+ - If set, specifies that the frame size will either be specified as the
+ size of one of the reference frames, or computed from the
+ frame_width_minus_1 and frame_height_minus_1 syntax elements. If not
+ set, specifies that the frame size is equal to the size in the sequence
+ header.
+ * - ``V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT``
+ - 0x00040000
+ - If set, specifies that buffer_removal_time is present. If not set,
+ specifies that buffer_removal_time is not present.
+ * - ``V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING``
+ - 0x00080000
+ - If set, indicates that only two reference frames are explicitly
+ signaled. If not set, indicates that all reference frames are explicitly
+ signaled.
+
+``V4L2_CID_STATELESS_AV1_FILM_GRAIN (struct)``
+ Represents the optional film grain parameters. See section
+ 6.8.20 "Film grain params semantics" of :ref:`av1` for more details.
+
+.. c:type:: v4l2_ctrl_av1_film_grain
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
+
+.. flat-table:: struct v4l2_ctrl_av1_film_grain
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``flags``
+ - See :ref:`AV1 Film Grain Flags <av1_film_grain_flags>`.
+ * - __u8
+ - ``cr_mult``
+ - Represents a multiplier for the cr component used in derivation of the
+ input index to the cr component scaling function.
+ * - __u16
+ - ``grain_seed``
+ - Specifies the starting value for the pseudo-random numbers used during
+ film grain synthesis.
+ * - __u8
+ - ``film_grain_params_ref_idx``
+ - Indicates which reference frame contains the film grain parameters to be
+ used for this frame.
+ * - __u8
+ - ``num_y_points``
+ - Specifies the number of points for the piece-wise linear scaling
+ function of the luma component.
+ * - __u8
+ - ``point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS]``
+ - Represents the x (luma value) coordinate for the i-th point
+ of the piecewise linear scaling function for luma component. The values
+ are signaled on the scale of 0..255. In case of 10 bit video, these
+ values correspond to luma values divided by 4. In case of 12 bit video,
+ these values correspond to luma values divided by 16.
+ * - __u8
+ - ``point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS]``
+ - Represents the scaling (output) value for the i-th point
+ of the piecewise linear scaling function for luma component.
+ * - __u8
+ - ``num_cb_points``
+ - Specifies the number of points for the piece-wise linear scaling
+ function of the cb component.
+ * - __u8
+ - ``point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS]``
+ - Represents the x coordinate for the i-th point of the
+ piece-wise linear scaling function for cb component. The values are
+ signaled on the scale of 0..255.
+ * - __u8
+ - ``point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS]``
+ - Represents the scaling (output) value for the i-th point of the
+ piecewise linear scaling function for cb component.
+ * - __u8
+ - ``num_cr_points``
+ - Represents the number of points for the piece-wise
+ linear scaling function of the cr component.
+ * - __u8
+ - ``point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS]``
+ - Represents the x coordinate for the i-th point of the
+ piece-wise linear scaling function for cr component. The values are
+ signaled on the scale of 0..255.
+ * - __u8
+ - ``point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS]``
+ - Represents the scaling (output) value for the i-th point of the
+ piecewise linear scaling function for cr component.
+ * - __u8
+ - ``grain_scaling_minus_8``
+ - Represents the shift - 8 applied to the values of the chroma component.
+ The grain_scaling_minus_8 can take values of 0..3 and determines the
+ range and quantization step of the standard deviation of film grain.
+ * - __u8
+ - ``ar_coeff_lag``
+ - Specifies the number of auto-regressive coefficients for luma and
+ chroma.
+ * - __u8
+ - ``ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE]``
+ - Specifies auto-regressive coefficients used for the Y plane.
+ * - __u8
+ - ``ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE]``
+ - Specifies auto-regressive coefficients used for the U plane.
+ * - __u8
+ - ``ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE]``
+ - Specifies auto-regressive coefficients used for the V plane.
+ * - __u8
+ - ``ar_coeff_shift_minus_6``
+ - Specifies the range of the auto-regressive coefficients. Values of 0,
+ 1, 2, and 3 correspond to the ranges for auto-regressive coefficients of
+ [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25, 0.25) respectively.
+ * - __u8
+ - ``grain_scale_shift``
+ - Specifies how much the Gaussian random numbers should be scaled down
+ during the grain synthesis process.
+ * - __u8
+ - ``cb_mult``
+ - Represents a multiplier for the cb component used in derivation of the
+ input index to the cb component scaling function.
+ * - __u8
+ - ``cb_luma_mult``
+ - Represents a multiplier for the average luma component used in
+ derivation of the input index to the cb component scaling function..
+ * - __u8
+ - ``cr_luma_mult``
+ - Represents a multiplier for the average luma component used in
+ derivation of the input index to the cr component scaling function.
+ * - __u16
+ - ``cb_offset``
+ - Represents an offset used in derivation of the input index to the
+ cb component scaling function.
+ * - __u16
+ - ``cr_offset``
+ - Represents an offset used in derivation of the input index to the
+ cr component scaling function.
+ * - __u8
+ - ``reserved[4]``
+ - Applications and drivers must set this to zero.
+
+.. _av1_film_grain_flags:
+
+``AV1 Film Grain Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN``
+ - 0x00000001
+ - If set, specifies that film grain should be added to this frame. If not
+ set, specifies that film grain should not be added.
+ * - ``V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN``
+ - 0x00000002
+ - If set, means that a new set of parameters should be sent. If not set,
+ specifies that the previous set of parameters should be used.
+ * - ``V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA``
+ - 0x00000004
+ - If set, specifies that the chroma scaling is inferred from the luma
+ scaling.
+ * - ``V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP``
+ - 0x00000008
+ - If set, indicates that the overlap between film grain blocks shall be
+ applied. If not set, indicates that the overlap between film grain blocks
+ shall not be applied.
+ * - ``V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE``
+ - 0x00000010
+ - If set, indicates that clipping to the restricted (studio, i.e. limited)
+ range shall be applied to the sample values after adding the film grain
+ (see the semantics for color_range for an explanation of studio swing).
+ If not set, indicates that clipping to the full range shall be applied
+ to the sample values after adding the film grain.
diff --git a/Documentation/userspace-api/media/v4l/meta-formats.rst b/Documentation/userspace-api/media/v4l/meta-formats.rst
index fff25357fe86..0bb61fc5bc00 100644
--- a/Documentation/userspace-api/media/v4l/meta-formats.rst
+++ b/Documentation/userspace-api/media/v4l/meta-formats.rst
@@ -12,10 +12,10 @@ These formats are used for the :ref:`metadata` interface only.
.. toctree::
:maxdepth: 1
- pixfmt-meta-d4xx
- pixfmt-meta-intel-ipu3
- pixfmt-meta-rkisp1
- pixfmt-meta-uvc
- pixfmt-meta-vsp1-hgo
- pixfmt-meta-vsp1-hgt
- pixfmt-meta-vivid
+ metafmt-d4xx
+ metafmt-intel-ipu3
+ metafmt-rkisp1
+ metafmt-uvc
+ metafmt-vsp1-hgo
+ metafmt-vsp1-hgt
+ metafmt-vivid
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-d4xx.rst b/Documentation/userspace-api/media/v4l/metafmt-d4xx.rst
index 4e437ba97a0e..541836074f94 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-d4xx.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-d4xx.rst
@@ -12,7 +12,7 @@ Intel D4xx UVC Cameras Metadata
Description
===========
-Intel D4xx (D435 and other) cameras include per-frame metadata in their UVC
+Intel D4xx (D435, D455 and others) cameras include per-frame metadata in their UVC
payload headers, following the Microsoft(R) UVC extension proposal [1_]. That
means, that the private D4XX metadata, following the standard UVC header, is
organised in blocks. D4XX cameras implement several standard block types,
@@ -26,6 +26,8 @@ V4L2_META_FMT_UVC with the only difference, that it also includes proprietary
payload header data. D4xx cameras use bulk transfers and only send one payload
per frame, therefore their headers cannot be larger than 255 bytes.
+This document implements Intel Configuration version 3 [9_].
+
Below are proprietary Microsoft style metadata types, used by D4xx cameras,
where all fields are in little endian order:
@@ -43,10 +45,10 @@ where all fields are in little endian order:
* - __u32 ID
- 0x80000000
* - __u32 Size
- - Size in bytes (currently 56)
+ - Size in bytes, include ID (all protocol versions: 60)
* - __u32 Version
- - Version of this structure. The documentation herein corresponds to
- version xxx. The version number will be incremented when new fields are
+ - Version of this structure. The documentation herein covers versions 1,
+ 2 and 3. The version number will be incremented when new fields are
added.
* - __u32 Flags
- A bitmask of flags: see [2_] below
@@ -72,13 +74,17 @@ where all fields are in little endian order:
- Bottom border of the AE Region of Interest
* - __u32 Preset
- Preset selector value, default: 0, unless changed by the user
- * - __u32 Laser mode
- - 0: off, 1: on
+ * - __u8 Emitter mode (v3 only) (__u32 Laser mode for v1) [8_]
+ - 0: off, 1: on, same as __u32 Laser mode for v1
+ * - __u8 RFU byte (v3 only)
+ - Spare byte for future use
+ * - __u16 LED Power (v3 only)
+ - Led power value 0-360 (F416 SKU)
* - :cspan:`1` *Capture Timing*
* - __u32 ID
- 0x80000001
* - __u32 Size
- - Size in bytes (currently 40)
+ - Size in bytes, include ID (all protocol versions: 40)
* - __u32 Version
- Version of this structure. The documentation herein corresponds to
version xxx. The version number will be incremented when new fields are
@@ -101,7 +107,7 @@ where all fields are in little endian order:
* - __u32 ID
- 0x80000002
* - __u32 Size
- - Size in bytes (currently 40)
+ - Size in bytes, include ID (v1:36, v3:40)
* - __u32 Version
- Version of this structure. The documentation herein corresponds to
version xxx. The version number will be incremented when new fields are
@@ -124,6 +130,14 @@ where all fields are in little endian order:
- Requested frame rate per second
* - __u16 Trigger
- Byte 0: bit 0: depth and RGB are synchronised, bit 1: external trigger
+ * - __u16 Calibration count (v3 only)
+ - Calibration counter, see [4_] below
+ * - __u8 GPIO input data (v3 only)
+ - GPIO readout, see [4_] below (Supported from FW 5.12.7.0)
+ * - __u32 Sub-preset info (v3 only)
+ - Sub-preset choice information, see [4_] below
+ * - __u8 reserved (v3 only)
+ - RFU byte.
.. _1:
@@ -140,6 +154,8 @@ where all fields are in little endian order:
0x00000010 Exposure priority
0x00000020 AE ROI
0x00000040 Preset
+ 0x00000080 Emitter mode
+ 0x00000100 LED Power
.. _3:
@@ -165,6 +181,8 @@ where all fields are in little endian order:
0x00000040 Framerate
0x00000080 Trigger
0x00000100 Cal count
+ 0x00000200 GPIO Input Data
+ 0x00000400 Sub-preset Info
.. _5:
@@ -211,3 +229,24 @@ Left sensor: ::
Fish Eye sensor: ::
1 RAW8
+
+.. _8:
+
+[8] The "Laser mode" has been replaced in version 3 by three different fields.
+"Laser" has been renamed to "Emitter" as there are multiple technologies for
+camera projectors. As we have another field for "Laser Power" we introduced
+"LED Power" for extra emitter.
+
+The "Laser mode" __u32 fiels has been split into: ::
+ 1 __u8 Emitter mode
+ 2 __u8 RFU byte
+ 3 __u16 LED Power
+
+This is a change between versions 1 and 3. All versions 1, 2 and 3 are backward
+compatible with the same data format and they are supported. See [2_] for which
+attributes are valid.
+
+.. _9:
+
+[9] LibRealSense SDK metadata source:
+https://github.com/IntelRealSense/librealsense/blob/master/src/metadata.h
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst b/Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst
index 84d81dd7a7b5..84d81dd7a7b5 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
index fa04f00bcd2e..fa04f00bcd2e 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-uvc.rst b/Documentation/userspace-api/media/v4l/metafmt-uvc.rst
index 784346d14bbd..784346d14bbd 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-uvc.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-uvc.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-vivid.rst b/Documentation/userspace-api/media/v4l/metafmt-vivid.rst
index 7173e2c3e245..7173e2c3e245 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-vivid.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-vivid.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgo.rst b/Documentation/userspace-api/media/v4l/metafmt-vsp1-hgo.rst
index 8d886feb180c..8d886feb180c 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgo.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-vsp1-hgo.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgt.rst b/Documentation/userspace-api/media/v4l/metafmt-vsp1-hgt.rst
index d8830ff605de..d8830ff605de 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-meta-vsp1-hgt.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-vsp1-hgt.rst
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst b/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
index 06b78e5589d2..806ed73ac474 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-compressed.rst
@@ -258,6 +258,22 @@ Compressed Formats
RV9 players - the format and decoder did not change, only
the encoder did. As a result, it uses the same FourCC.
+ * .. _V4L2-PIX-FMT-AV1-FRAME:
+
+ - ``V4L2_PIX_FMT_AV1_FRAME``
+ - 'AV1F'
+ - AV1 parsed frame, including the frame header, as extracted from the container.
+ This format is adapted for stateless video decoders that implement a AV1
+ pipeline with the :ref:`stateless_decoder`. Metadata associated with the
+ frame to decode is required to be passed through the
+ ``V4L2_CID_STATELESS_AV1_SEQUENCE``, ``V4L2_CID_STATELESS_AV1_FRAME``,
+ and ``V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY`` controls.
+ See the :ref:`associated Codec Control IDs <v4l2-codec-stateless-av1>`.
+ Exactly one output and one capture buffer must be provided for use with
+ this pixel format. The output buffer must contain the appropriate number
+ of macroblocks to decode a full corresponding frame to the matching
+ capture buffer.
+
.. raw:: latex
\normalsize
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
index 72324274f20c..1840224faa41 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
@@ -137,6 +137,13 @@ All components are stored with the same number of bits per component.
- Cb, Cr
- No
- Linear
+ * - V4L2_PIX_FMT_NV15_4L4
+ - 'VT15'
+ - 15
+ - 4:2:0
+ - Cb, Cr
+ - Yes
+ - 4x4 tiles
* - V4L2_PIX_FMT_NV16
- 'NV16'
- 8
@@ -378,6 +385,15 @@ two non-contiguous planes.
Example V4L2_PIX_FMT_NV12MT memory layout of tiles
+.. _V4L2-PIX-FMT-NV15-4L4:
+
+Tiled NV15
+----------
+
+Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling.
+All components are packed without any padding between each other.
+As a side-effect, each group of 4 components are stored over 5 bytes
+(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes).
.. _V4L2-PIX-FMT-NV16:
.. _V4L2-PIX-FMT-NV61:
diff --git a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
index 5292d5e1a91f..f9f73530a6be 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
@@ -185,12 +185,12 @@ still cause this situation.
- ``p_u32``
- A pointer to a matrix control of unsigned 32-bit values. Valid if
this control is of type ``V4L2_CTRL_TYPE_U32``.
- * - __u32 *
+ * - __s32 *
- ``p_s32``
- A pointer to a matrix control of signed 32-bit values. Valid if
this control is of type ``V4L2_CTRL_TYPE_INTEGER`` and
``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set.
- * - __u32 *
+ * - __s64 *
- ``p_s64``
- A pointer to a matrix control of signed 64-bit values. Valid if
this control is of type ``V4L2_CTRL_TYPE_INTEGER64`` and
@@ -279,6 +279,22 @@ still cause this situation.
- ``p_hevc_decode_params``
- A pointer to a struct :c:type:`v4l2_ctrl_hevc_decode_params`. Valid if this
control is of type ``V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS``.
+ * - struct :c:type:`v4l2_ctrl_av1_sequence` *
+ - ``p_av1_sequence``
+ - A pointer to a struct :c:type:`v4l2_ctrl_av1_sequence`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AV1_SEQUENCE``.
+ * - struct :c:type:`v4l2_ctrl_av1_tile_group_entry` *
+ - ``p_av1_tile_group_entry``
+ - A pointer to a struct :c:type:`v4l2_ctrl_av1_tile_group_entry`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY``.
+ * - struct :c:type:`v4l2_ctrl_av1_frame` *
+ - ``p_av1_frame``
+ - A pointer to a struct :c:type:`v4l2_ctrl_av1_frame`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AV1_FRAME``.
+ * - struct :c:type:`v4l2_ctrl_av1_film_grain` *
+ - ``p_av1_film_grain``
+ - A pointer to a struct :c:type:`v4l2_ctrl_av1_film_grain`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AV1_FILM_GRAIN``.
* - void *
- ``ptr``
- A pointer to a compound type which can be an N-dimensional array
diff --git a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
index a20dfa2a933b..4d38acafe8e1 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
@@ -525,6 +525,30 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_vp9_frame`, containing VP9
frame decode parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AV1_SEQUENCE``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_av1_sequence`, containing AV1 Sequence OBU
+ decoding parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_av1_tile_group_entry`, containing AV1 Tile Group
+ OBU decoding parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AV1_FRAME``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_av1_frame`, containing AV1 Frame/Frame
+ Header OBU decoding parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AV1_FILM_GRAIN``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_av1_film_grain`, containing AV1 Film Grain
+ parameters for stateless video decoders.
.. raw:: latex
diff --git a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
index 68ca343c3b44..2d6e3bbdd040 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
@@ -122,7 +122,7 @@ for all the route entries and call ``VIDIOC_SUBDEV_G_ROUTING`` again.
:widths: 3 1 4
* - V4L2_SUBDEV_ROUTE_FL_ACTIVE
- - 0
+ - 0x0001
- The route is enabled. Set by applications.
Return Value
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index 2a589d34b80e..3e58aac4ef0b 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -161,6 +161,10 @@ replace symbol V4L2_CTRL_TYPE_HEVC_PPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AV1_SEQUENCE :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AV1_FRAME :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AV1_FILM_GRAIN :c:type:`v4l2_ctrl_type`
# V4L2 capability defines
replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 96c4475539c2..c0ddd3035462 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8445,6 +8445,33 @@ structure.
When getting the Modified Change Topology Report value, the attr->addr
must point to a byte where the value will be stored or retrieved from.
+8.40 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+---------------------------------------
+
+:Capability: KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+:Architectures: arm64
+:Type: vm
+:Parameters: arg[0] is the new split chunk size.
+:Returns: 0 on success, -EINVAL if any memslot was already created.
+
+This capability sets the chunk size used in Eager Page Splitting.
+
+Eager Page Splitting improves the performance of dirty-logging (used
+in live migrations) when guest memory is backed by huge-pages. It
+avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
+it eagerly when enabling dirty logging (with the
+KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
+KVM_CLEAR_DIRTY_LOG.
+
+The chunk size specifies how many pages to break at a time, using a
+single allocation for each chunk. Bigger the chunk size, more pages
+need to be allocated ahead of time.
+
+The chunk size needs to be a valid block size. The list of acceptable
+block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
+64-bit bitmap (each bit describing a block size). The default value is
+0, to disable the eager page splitting.
+
9. Known KVM API problems
=========================
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 487b6328b3e7..995780088eb2 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -57,7 +57,7 @@ information, see the SEV Key Management spec [api-spec]_
The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP. If the argument
to KVM_MEMORY_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
-and ``ENOTTY` if it is disabled (on some older versions of Linux,
+and ``ENOTTY`` if it is disabled (on some older versions of Linux,
the ioctl runs normally even with a NULL argument, and therefore will
likely return ``EFAULT``). If non-NULL, the argument to KVM_MEMORY_ENCRYPT_OP
must be a struct kvm_sev_cmd::
diff --git a/Documentation/virt/kvm/x86/mmu.rst b/Documentation/virt/kvm/x86/mmu.rst
index 8364afa228ec..26f62034b6f3 100644
--- a/Documentation/virt/kvm/x86/mmu.rst
+++ b/Documentation/virt/kvm/x86/mmu.rst
@@ -205,7 +205,7 @@ Shadow pages contain the following information:
role.passthrough:
The page is not backed by a guest page table, but its first entry
points to one. This is set if NPT uses 5-level page tables (host
- CR4.LA57=1) and is shadowing L1's 4-level NPT (L1 CR4.LA57=1).
+ CR4.LA57=1) and is shadowing L1's 4-level NPT (L1 CR4.LA57=0).
gfn:
Either the guest page table containing the translations shadowed by this
page, or the base page frame for linear translations. See role.direct.
diff --git a/MAINTAINERS b/MAINTAINERS
index 308b5250b7de..a634fa7d6754 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,81 +1,5 @@
-List of maintainers and how to submit kernel changes
-====================================================
-
-Please try to follow the guidelines below. This will make things
-easier on the maintainers. Not all of these guidelines matter for every
-trivial patch so apply some common sense.
-
-Tips for patch submitters
--------------------------
-
-1. Always *test* your changes, however small, on at least 4 or
- 5 people, preferably many more.
-
-2. Try to release a few ALPHA test versions to the net. Announce
- them onto the kernel channel and await results. This is especially
- important for device drivers, because often that's the only way
- you will find things like the fact version 3 firmware needs
- a magic fix you didn't know about, or some clown changed the
- chips on a board and not its name. (Don't laugh! Look at the
- SMC etherpower for that.)
-
-3. Make sure your changes compile correctly in multiple
- configurations. In particular check that changes work both as a
- module and built into the kernel.
-
-4. When you are happy with a change make it generally available for
- testing and await feedback.
-
-5. Make a patch available to the relevant maintainer in the list. Use
- ``diff -u`` to make the patch easy to merge. Be prepared to get your
- changes sent back with seemingly silly requests about formatting
- and variable names. These aren't as silly as they seem. One
- job the maintainers (and especially Linus) do is to keep things
- looking the same. Sometimes this means that the clever hack in
- your driver to get around a problem actually needs to become a
- generalized kernel feature ready for next time.
-
- PLEASE check your patch with the automated style checker
- (scripts/checkpatch.pl) to catch trivial style violations.
- See Documentation/process/coding-style.rst for guidance here.
-
- PLEASE CC: the maintainers and mailing lists that are generated
- by ``scripts/get_maintainer.pl.`` The results returned by the
- script will be best if you have git installed and are making
- your changes in a branch derived from Linus' latest git tree.
- See Documentation/process/submitting-patches.rst for details.
-
- PLEASE try to include any credit lines you want added with the
- patch. It avoids people being missed off by mistake and makes
- it easier to know who wants adding and who doesn't.
-
- PLEASE document known bugs. If it doesn't work for everything
- or does something very odd once a month document it.
-
- PLEASE remember that submissions must be made under the terms
- of the Linux Foundation certificate of contribution and should
- include a Signed-off-by: line. The current version of this
- "Developer's Certificate of Origin" (DCO) is listed in the file
- Documentation/process/submitting-patches.rst.
-
-6. Make sure you have the right to send any changes you make. If you
- do changes at work you may find your employer owns the patch
- not you.
-
-7. When sending security related changes or reports to a maintainer
- please Cc: security@kernel.org, especially if the maintainer
- does not respond. Please keep in mind that the security team is
- a small set of people who can be efficient only when working on
- verified bugs. Please only Cc: this list when you have identified
- that the bug would present a short-term risk to other users if it
- were publicly disclosed. For example, reports of address leaks do
- not represent an immediate threat and are better handled publicly,
- and ideally, should come with a patch proposal. Please do not send
- automated reports to this list either. Such bugs will be handled
- better and faster in the usual public places. See
- Documentation/process/security-bugs.rst for details.
-
-8. Happy hacking.
+List of maintainers
+===================
Descriptions of section entries and preferred order
---------------------------------------------------
@@ -2103,6 +2027,7 @@ N: digicolor
ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Mike Leach <mike.leach@linaro.org>
+R: James Clark <james.clark@arm.com>
R: Leo Yan <leo.yan@linaro.org>
L: coresight@lists.linaro.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3384,6 +3309,16 @@ F: include/uapi/linux/audit.h
F: kernel/audit*
F: lib/*audit.c
+AUXILIARY BUS DRIVER
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R: Dave Ertman <david.m.ertman@intel.com>
+R: Ira Weiny <ira.weiny@intel.com>
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+F: Documentation/driver-api/auxiliary_bus.rst
+F: drivers/base/auxiliary.c
+F: include/linux/auxiliary_bus.h
+
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
@@ -3564,18 +3499,24 @@ M: Yury Norov <yury.norov@gmail.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
+F: include/linux/bitfield.h
F: include/linux/bitmap.h
+F: include/linux/bits.h
F: include/linux/cpumask.h
F: include/linux/find.h
F: include/linux/nodemask.h
+F: include/vdso/bits.h
F: lib/bitmap.c
F: lib/cpumask.c
F: lib/cpumask_kunit.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
+F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h
+F: tools/include/linux/bits.h
F: tools/include/linux/find.h
+F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c
@@ -4529,6 +4470,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
F: drivers/usb/cdns3/
X: drivers/usb/cdns3/cdns3*
+CADENCE USBHS DRIVER
+M: Pawel Laszczak <pawell@cadence.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/gadget/udc/cdns2
+
CADET FM/AM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -5203,6 +5150,13 @@ S: Maintained
F: drivers/cxl/
F: include/uapi/linux/cxl_mem.h
+COMPUTE EXPRESS LINK PMU (CPMU)
+M: Jonathan Cameron <jonathan.cameron@huawei.com>
+L: linux-cxl@vger.kernel.org
+S: Maintained
+F: Documentation/admin-guide/perf/cxl.rst
+F: drivers/perf/cxl_pmu.c
+
CONEXANT ACCESSRUNNER USB DRIVER
L: accessrunner-general@lists.sourceforge.net
S: Orphan
@@ -5908,7 +5862,9 @@ S: Orphan
F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER
-M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+R: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+R: Serge Semin <fancer.lancer@gmail.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/dw-edma/
@@ -9483,6 +9439,13 @@ F: lib/test_hmm*
F: mm/hmm*
F: tools/testing/selftests/mm/*hmm*
+HONEYWELL MPRLS0025PA PRESSURE SENSOR SERIES IIO DRIVER
+M: Andreas Klinger <ak@it-klinger.de>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
+F: drivers/iio/pressure/mprls0025pa.c
+
HOST AP DRIVER
M: Jouni Malinen <j@w1.fi>
L: linux-wireless@vger.kernel.org
@@ -10327,6 +10290,13 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/i810/
+INTEL 8254 COUNTER DRIVER
+M: William Breathitt Gray <william.gray@linaro.org>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/counter/i8254.c
+F: include/linux/i8254.h
+
INTEL 8255 GPIO DRIVER
M: William Breathitt Gray <william.gray@linaro.org>
L: linux-gpio@vger.kernel.org
@@ -10532,7 +10502,7 @@ L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/media/ipu3.rst
F: Documentation/admin-guide/media/ipu3_rcb.svg
-F: Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
+F: Documentation/userspace-api/media/v4l/metafmt-intel-ipu3.rst
F: drivers/staging/media/ipu3/
INTEL ISHTP ECLITE DRIVER
@@ -10876,7 +10846,6 @@ S: Maintained
F: drivers/net/ethernet/sgi/ioc3-eth.c
IOMAP FILESYSTEM LIBRARY
-M: Christoph Hellwig <hch@infradead.org>
M: Darrick J. Wong <djwong@kernel.org>
L: linux-xfs@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@@ -11509,6 +11478,7 @@ M: Sean Christopherson <seanjc@google.com>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported
+P: Documentation/process/maintainer-kvm-x86.rst
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/svm.h
@@ -12099,6 +12069,13 @@ F: Documentation/litmus-tests/
F: Documentation/memory-barriers.txt
F: tools/memory-model/
+LINUX-NEXT TREE
+M: Stephen Rothwell <sfr@canb.auug.org.au>
+L: linux-next@vger.kernel.org
+S: Supported
+B: mailto:linux-next@vger.kernel.org and the appropriate development tree
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/
+
LIS3LV02D ACCELEROMETER DRIVER
M: Eric Piel <eric.piel@tremplin-utc.net>
S: Maintained
@@ -13981,6 +13958,7 @@ F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
+F: Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
F: drivers/power/reset/at91-sama5d2_shdwc.c
MICROCHIP SOC DRIVERS
@@ -15032,7 +15010,7 @@ M: Allen Hubbe <allenbh@gmail.com>
L: ntb@lists.linux.dev
S: Supported
W: https://github.com/jonmason/ntb/wiki
-T: git git://github.com/jonmason/ntb.git
+T: git https://github.com/jonmason/ntb.git
F: drivers/net/ntb_netdev.c
F: drivers/ntb/
F: drivers/pci/endpoint/functions/pci-epf-*ntb.c
@@ -15568,6 +15546,13 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/og01a1b.c
+OMNIVISION OV01A10 SENSOR DRIVER
+M: Bingbu Cao <bingbu.cao@intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/ov01a10.c
+
OMNIVISION OV02A10 SENSOR DRIVER
M: Dongchun Zhu <dongchun.zhu@mediatek.com>
L: linux-media@vger.kernel.org
@@ -15916,7 +15901,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mark@fasheh.com>
M: Joel Becker <jlbec@evilplan.org>
M: Joseph Qi <joseph.qi@linux.alibaba.com>
-L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
+L: ocfs2-devel@lists.linux.dev
S: Supported
W: http://ocfs2.wiki.kernel.org
F: Documentation/filesystems/dlmfs.rst
@@ -17663,9 +17648,18 @@ S: Maintained
F: Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
F: drivers/thermal/qcom/
+QUALCOMM TYPEC PORT MANAGER DRIVER
+M: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/usb/qcom,pmic-*.yaml
+F: drivers/usb/typec/tcpm/qcom/
+
QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
M: Stanimir Varbanov <stanimir.k.varbanov@gmail.com>
M: Vikash Garodia <quic_vgarodia@quicinc.com>
+R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -18180,6 +18174,13 @@ S: Maintained
F: Documentation/devicetree/bindings/clock/renesas,versaclock7.yaml
F: drivers/clk/clk-versaclock7.c
+RENESAS X9250 DIGITAL POTENTIOMETERS DRIVER
+M: Herve Codina <herve.codina@bootlin.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/potentiometer/renesas,x9250.yaml
+F: drivers/iio/potentiometer/x9250.c
+
RESET CONTROLLER FRAMEWORK
M: Philipp Zabel <p.zabel@pengutronix.de>
S: Maintained
@@ -18275,6 +18276,7 @@ F: drivers/clk/microchip/clk-mpfs*.c
F: drivers/i2c/busses/i2c-microchip-corei2c.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/pcie-microchip-host.c
+F: drivers/pwm/pwm-microchip-core.c
F: drivers/reset/reset-mpfs.c
F: drivers/rtc/rtc-mpfs.c
F: drivers/soc/microchip/mpfs-sys-controller.c
@@ -18347,7 +18349,7 @@ L: linux-rockchip@lists.infradead.org
S: Maintained
F: Documentation/admin-guide/media/rkisp1.rst
F: Documentation/devicetree/bindings/media/rockchip-isp1.yaml
-F: Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst
+F: Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
F: drivers/media/platform/rockchip/rkisp1
F: include/uapi/linux/rkisp1-config.h
@@ -18392,10 +18394,11 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/light/bh1750.yaml
F: drivers/iio/light/bh1750.c
-ROHM BU27034 AMBIENT LIGHT SENSOR DRIVER
+ROHM BU270xx LIGHT SENSOR DRIVERs
M: Matti Vaittinen <mazziesaccount@gmail.com>
L: linux-iio@vger.kernel.org
S: Supported
+F: drivers/iio/light/rohm-bu27008.c
F: drivers/iio/light/rohm-bu27034.c
ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS
@@ -20290,6 +20293,12 @@ F: Documentation/devicetree/bindings/reset/starfive,jh7100-reset.yaml
F: drivers/reset/starfive/reset-starfive-jh71*
F: include/dt-bindings/reset/starfive?jh71*.h
+STARFIVE JH71X0 USB DRIVERS
+M: Minda Chen <minda.chen@starfivetech.com>
+S: Maintained
+F: Documentation/devicetree/bindings/usb/starfive,jh7110-usb.yaml
+F: drivers/usb/cdns3/cdns3-starfive.c
+
STARFIVE JH71XX PMU CONTROLLER DRIVER
M: Walker Chen <walker.chen@starfivetech.com>
S: Supported
@@ -20989,10 +20998,13 @@ TEGRA VIDEO DRIVER
M: Thierry Reding <thierry.reding@gmail.com>
M: Jonathan Hunter <jonathanh@nvidia.com>
M: Sowjanya Komatineni <skomatineni@nvidia.com>
+M: Luca Ceresoli <luca.ceresoli@bootlin.com>
L: linux-media@vger.kernel.org
L: linux-tegra@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.yaml
+F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vi.yaml
+F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-vip.yaml
F: drivers/staging/media/tegra-video/
TEGRA XUSB PADCTL DRIVER
@@ -22087,6 +22099,7 @@ F: drivers/usb/
F: include/dt-bindings/usb/
F: include/linux/usb.h
F: include/linux/usb/
+F: include/uapi/linux/usb/
USB TYPEC BUS FOR ALTERNATE MODES
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@ -22474,6 +22487,10 @@ F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
F: tools/virtio/
+PDS DSC VIRTIO DATA PATH ACCELERATOR
+R: Shannon Nelson <shannon.nelson@amd.com>
+F: drivers/vdpa/pds/
+
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org
@@ -23382,8 +23399,10 @@ M: Srinivas Neeli <srinivas.neeli@amd.com>
R: Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
R: Michal Simek <michal.simek@amd.com>
S: Maintained
+F: Documentation/devicetree/bindings/watchdog/xlnx,versal-wwdt.yaml
F: Documentation/devicetree/bindings/watchdog/xlnx,xps-timebase-wdt.yaml
F: drivers/watchdog/of_xilinx_wdt.c
+F: drivers/watchdog/xilinx_wwdt.c
XILINX XDMA DRIVER
M: Lizhi Hou <lizhi.hou@amd.com>
diff --git a/Makefile b/Makefile
index 48a044bfe062..12579666581f 100644
--- a/Makefile
+++ b/Makefile
@@ -38,6 +38,10 @@ __all:
# descending is started. They are now explicitly listed as the
# prepare rule.
+this-makefile := $(lastword $(MAKEFILE_LIST))
+export abs_srctree := $(realpath $(dir $(this-makefile)))
+export abs_objtree := $(CURDIR)
+
ifneq ($(sub_make_done),1)
# Do not use make's built-in rules and variables
@@ -185,20 +189,8 @@ $(if $(abs_objtree),, \
# $(realpath ...) resolves symlinks
abs_objtree := $(realpath $(abs_objtree))
-else
-abs_objtree := $(CURDIR)
endif # ifneq ($(KBUILD_OUTPUT),)
-ifeq ($(abs_objtree),$(CURDIR))
-# Suppress "Entering directory ..." unless we are changing the work directory.
-MAKEFLAGS += --no-print-directory
-else
-need-sub-make := 1
-endif
-
-this-makefile := $(lastword $(MAKEFILE_LIST))
-abs_srctree := $(realpath $(dir $(this-makefile)))
-
ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
$(error source directory cannot contain spaces or colons)
endif
@@ -211,9 +203,25 @@ need-sub-make := 1
$(this-makefile): ;
endif
-export abs_srctree abs_objtree
export sub_make_done := 1
+endif # sub_make_done
+
+ifeq ($(abs_objtree),$(CURDIR))
+# Suppress "Entering directory ..." if we are at the final work directory.
+no-print-directory := --no-print-directory
+else
+# Recursion to show "Entering directory ..."
+need-sub-make := 1
+endif
+
+ifeq ($(filter --no-print-directory, $(MAKEFLAGS)),)
+# If --no-print-directory is unset, recurse once again to set it.
+# You may end up recursing into __sub-make twice. This is needed due to the
+# behavior change in GNU Make 4.4.1.
+need-sub-make := 1
+endif
+
ifeq ($(need-sub-make),1)
PHONY += $(MAKECMDGOALS) __sub-make
@@ -223,18 +231,12 @@ $(filter-out $(this-makefile), $(MAKECMDGOALS)) __all: __sub-make
# Invoke a second make in the output directory, passing relevant variables
__sub-make:
- $(Q)$(MAKE) -C $(abs_objtree) -f $(abs_srctree)/Makefile $(MAKECMDGOALS)
+ $(Q)$(MAKE) $(no-print-directory) -C $(abs_objtree) \
+ -f $(abs_srctree)/Makefile $(MAKECMDGOALS)
-endif # need-sub-make
-endif # sub_make_done
+else # need-sub-make
# We process the rest of the Makefile if this is the final invocation of make
-ifeq ($(need-sub-make),)
-
-# Do not print "Entering directory ...",
-# but we want to display it when entering to the output directory
-# so that IDEs/editors are able to understand relative filenames.
-MAKEFLAGS += --no-print-directory
ifeq ($(abs_srctree),$(abs_objtree))
# building in the source tree
@@ -447,8 +449,7 @@ HOSTRUSTC = rustc
HOSTPKG_CONFIG = pkg-config
KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
- -O2 -fomit-frame-pointer -std=gnu11 \
- -Wdeclaration-after-statement
+ -O2 -fomit-frame-pointer -std=gnu11
KBUILD_USERCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
KBUILD_USERLDFLAGS := $(USERLDFLAGS)
@@ -1012,9 +1013,6 @@ endif
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc
-# warn about C99 declaration after statement
-KBUILD_CFLAGS += -Wdeclaration-after-statement
-
# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
KBUILD_CFLAGS += -Wvla
@@ -1199,28 +1197,12 @@ endif
export KBUILD_VMLINUX_LIBS
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
-# Recurse until adjust_autoksyms.sh is satisfied
-PHONY += autoksyms_recursive
ifdef CONFIG_TRIM_UNUSED_KSYMS
# For the kernel to actually contain only the needed exported symbols,
# we have to build modules as well to determine what those symbols are.
-# (this can be evaluated only once include/config/auto.conf has been included)
KBUILD_MODULES := 1
-
-autoksyms_recursive: $(build-dir) modules.order
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
- "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive"
endif
-autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h)
-
-quiet_cmd_autoksyms_h = GEN $@
- cmd_autoksyms_h = mkdir -p $(dir $@); \
- $(CONFIG_SHELL) $(srctree)/scripts/gen_autoksyms.sh $@
-
-$(autoksyms_h):
- $(call cmd,autoksyms_h)
-
# '$(AR) mPi' needs 'T' to workaround the bug of llvm-ar <= 14
quiet_cmd_ar_vmlinux.a = AR $@
cmd_ar_vmlinux.a = \
@@ -1229,7 +1211,7 @@ quiet_cmd_ar_vmlinux.a = AR $@
$(AR) mPiT $$($(AR) t $@ | sed -n 1p) $@ $$($(AR) t $@ | grep -F -f $(srctree)/scripts/head-object-list.txt)
targets += vmlinux.a
-vmlinux.a: $(KBUILD_VMLINUX_OBJS) scripts/head-object-list.txt autoksyms_recursive FORCE
+vmlinux.a: $(KBUILD_VMLINUX_OBJS) scripts/head-object-list.txt FORCE
$(call if_changed,ar_vmlinux.a)
PHONY += vmlinux_o
@@ -1285,7 +1267,7 @@ scripts: scripts_basic scripts_dtc
PHONY += prepare archprepare
archprepare: outputmakefile archheaders archscripts scripts include/config/kernel.release \
- asm-generic $(version_h) $(autoksyms_h) include/generated/utsrelease.h \
+ asm-generic $(version_h) include/generated/utsrelease.h \
include/generated/compile.h include/generated/autoconf.h remove-stale-files
prepare0: archprepare
@@ -1567,6 +1549,8 @@ modules_sign_only := y
endif
endif
+endif # CONFIG_MODULES
+
modinst_pre :=
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
modinst_pre := __modinst_pre
@@ -1577,18 +1561,18 @@ PHONY += __modinst_pre
__modinst_pre:
@rm -rf $(MODLIB)/kernel
@rm -f $(MODLIB)/source
- @mkdir -p $(MODLIB)/kernel
+ @mkdir -p $(MODLIB)
+ifdef CONFIG_MODULES
@ln -s $(abspath $(srctree)) $(MODLIB)/source
@if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
rm -f $(MODLIB)/build ; \
ln -s $(CURDIR) $(MODLIB)/build ; \
fi
@sed 's:^\(.*\)\.o$$:kernel/\1.ko:' modules.order > $(MODLIB)/modules.order
+endif
@cp -f modules.builtin $(MODLIB)/
@cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
-endif # CONFIG_MODULES
-
###
# Cleaning is done on three levels.
# make clean Delete most generated files
@@ -1930,6 +1914,13 @@ help:
@echo ' clean - remove generated files in module directory only'
@echo ''
+__external_modules_error:
+ @echo >&2 '***'
+ @echo >&2 '*** The present kernel disabled CONFIG_MODULES.'
+ @echo >&2 '*** You cannot build or install external modules.'
+ @echo >&2 '***'
+ @false
+
endif # KBUILD_EXTMOD
# ---------------------------------------------------------------------------
@@ -1966,13 +1957,10 @@ else # CONFIG_MODULES
# Modules not configured
# ---------------------------------------------------------------------------
-modules modules_install:
- @echo >&2 '***'
- @echo >&2 '*** The present kernel configuration has modules disabled.'
- @echo >&2 '*** To use the module feature, please run "make menuconfig" etc.'
- @echo >&2 '*** to enable CONFIG_MODULES.'
- @echo >&2 '***'
- @exit 1
+PHONY += __external_modules_error
+
+modules modules_install: __external_modules_error
+ @:
KBUILD_MODULES :=
@@ -2045,7 +2033,7 @@ clean: $(clean-dirs)
-o -name '*.dtb.S' -o -name '*.dtbo.S' \
-o -name '*.dt.yaml' \
-o -name '*.dwo' -o -name '*.lst' \
- -o -name '*.su' -o -name '*.mod' -o -name '*.usyms' \
+ -o -name '*.su' -o -name '*.mod' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.lex.c' -o -name '*.tab.[ch]' \
-o -name '*.asn1.[ch]' \
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index c9434ff3aa4c..8a3fb71e9cfa 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -8,6 +8,10 @@
#include <asm/dwarf.h>
+#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
+
#ifdef __ASSEMBLY__
.macro ST2 e, o, off
@@ -28,10 +32,6 @@
#endif
.endm
-#define ASM_NL ` /* use '`' to mark new line in macro */
-#define __ALIGN .align 4
-#define __ALIGN_STR __stringify(__ALIGN)
-
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
#ifdef CONFIG_ARC_HAS_DCCM
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 2b89b6c53801..9f64d729c9f8 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -87,7 +87,7 @@ void __init setup_arch_memory(void)
setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
- min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE);
+ min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
diff --git a/arch/arm/boot/dts/st/spear1310.dtsi b/arch/arm/boot/dts/st/spear1310.dtsi
index 2f746a9428a7..ba827d60bf07 100644
--- a/arch/arm/boot/dts/st/spear1310.dtsi
+++ b/arch/arm/boot/dts/st/spear1310.dtsi
@@ -11,7 +11,7 @@
compatible = "st,spear1310";
ahb {
- spics: spics@e0700000{
+ spics: spics@e0700000 {
compatible = "st,spear-spics-gpio";
reg = <0xe0700000 0x1000>;
st-spics,peripcfg-reg = <0x3b0>;
diff --git a/arch/arm/boot/dts/st/spear1340.dtsi b/arch/arm/boot/dts/st/spear1340.dtsi
index 818886e11713..d54e10629a7d 100644
--- a/arch/arm/boot/dts/st/spear1340.dtsi
+++ b/arch/arm/boot/dts/st/spear1340.dtsi
@@ -12,7 +12,7 @@
ahb {
- spics: spics@e0700000{
+ spics: spics@e0700000 {
compatible = "st,spear-spics-gpio";
reg = <0xe0700000 0x1000>;
st-spics,peripcfg-reg = <0x42c>;
diff --git a/arch/arm/boot/dts/st/stih407-family.dtsi b/arch/arm/boot/dts/st/stih407-family.dtsi
index 5ebb77947fd9..3f58383a7b59 100644
--- a/arch/arm/boot/dts/st/stih407-family.dtsi
+++ b/arch/arm/boot/dts/st/stih407-family.dtsi
@@ -645,7 +645,7 @@
st,lpc-mode = <ST_LPC_MODE_CLKSRC>;
};
- spifsm: spifsm@9022000{
+ spifsm: spifsm@9022000 {
compatible = "st,spi-fsm";
reg = <0x9022000 0x1000>;
reg-names = "spi-fsm";
diff --git a/arch/arm/boot/dts/st/stih407-pinctrl.dtsi b/arch/arm/boot/dts/st/stih407-pinctrl.dtsi
index 2cf335714ca2..7815669fe813 100644
--- a/arch/arm/boot/dts/st/stih407-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stih407-pinctrl.dtsi
@@ -1090,7 +1090,7 @@
};
i2s_out {
- pinctrl_i2s_8ch_out: i2s_8ch_out{
+ pinctrl_i2s_8ch_out: i2s_8ch_out {
st,pins {
mclk = <&pio33 5 ALT1 OUT>;
lrclk = <&pio33 7 ALT1 OUT>;
@@ -1102,7 +1102,7 @@
};
};
- pinctrl_i2s_2ch_out: i2s_2ch_out{
+ pinctrl_i2s_2ch_out: i2s_2ch_out {
st,pins {
mclk = <&pio33 5 ALT1 OUT>;
lrclk = <&pio33 7 ALT1 OUT>;
@@ -1113,7 +1113,7 @@
};
i2s_in {
- pinctrl_i2s_8ch_in: i2s_8ch_in{
+ pinctrl_i2s_8ch_in: i2s_8ch_in {
st,pins {
mclk = <&pio32 5 ALT1 IN>;
lrclk = <&pio32 7 ALT1 IN>;
@@ -1126,7 +1126,7 @@
};
};
- pinctrl_i2s_2ch_in: i2s_2ch_in{
+ pinctrl_i2s_2ch_in: i2s_2ch_in {
st,pins {
mclk = <&pio32 5 ALT1 IN>;
lrclk = <&pio32 7 ALT1 IN>;
@@ -1137,7 +1137,7 @@
};
spdif_out {
- pinctrl_spdif_out: spdif_out{
+ pinctrl_spdif_out: spdif_out {
st,pins {
spdif_out = <&pio34 7 ALT1 OUT>;
};
diff --git a/arch/arm/boot/dts/st/stm32f429-disco.dts b/arch/arm/boot/dts/st/stm32f429-disco.dts
index 3b81228d46a2..a3cb4aabdd5a 100644
--- a/arch/arm/boot/dts/st/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/st/stm32f429-disco.dts
@@ -190,7 +190,7 @@
status = "okay";
};
- display: display@1{
+ display: display@1 {
/* Connect panel-ilitek-9341 to ltdc */
compatible = "st,sf-tc240t-9370-t", "ilitek,ili9341";
reg = <1>;
diff --git a/arch/arm/boot/dts/st/stm32f746-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f746-pinctrl.dtsi
index fcfd2ac7239b..781197ef42d6 100644
--- a/arch/arm/boot/dts/st/stm32f746-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32f746-pinctrl.dtsi
@@ -6,6 +6,6 @@
#include "stm32f7-pinctrl.dtsi"
-&pinctrl{
+&pinctrl {
compatible = "st,stm32f746-pinctrl";
};
diff --git a/arch/arm/boot/dts/st/stm32f769-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f769-pinctrl.dtsi
index 31005dd9929c..c26abc04e2ce 100644
--- a/arch/arm/boot/dts/st/stm32f769-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32f769-pinctrl.dtsi
@@ -6,6 +6,6 @@
#include "stm32f7-pinctrl.dtsi"
-&pinctrl{
+&pinctrl {
compatible = "st,stm32f769-pinctrl";
};
diff --git a/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
index aa1bc3e10a49..7f1d234e1024 100644
--- a/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32h7-pinctrl.dtsi
@@ -94,7 +94,7 @@
drive-push-pull;
bias-disable;
};
- pins2{
+ pins2 {
pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
slew-rate = <3>;
drive-open-drain;
@@ -122,7 +122,7 @@
drive-push-pull;
bias-pull-up;
};
- pins2{
+ pins2 {
pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */
bias-pull-up;
};
@@ -162,7 +162,7 @@
drive-push-pull;
bias-disable;
};
- pins2{
+ pins2 {
pinmux = <STM32_PINMUX('D', 7, AF11)>; /* SDMMC1_CMD */
slew-rate = <3>;
drive-open-drain;
diff --git a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
index 06e969aa5fdb..05c9c4f8064c 100644
--- a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
@@ -1659,7 +1659,7 @@
drive-push-pull;
bias-pull-up;
};
- pins2{
+ pins2 {
pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
bias-pull-up;
};
@@ -1694,7 +1694,7 @@
drive-push-pull;
bias-pull-up;
};
- pins2{
+ pins2 {
pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
bias-pull-up;
};
diff --git a/arch/arm/boot/dts/st/stm32mp157a-icore-stm32mp1.dtsi b/arch/arm/boot/dts/st/stm32mp157a-icore-stm32mp1.dtsi
index 9de893101b40..569a7e940ecc 100644
--- a/arch/arm/boot/dts/st/stm32mp157a-icore-stm32mp1.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp157a-icore-stm32mp1.dtsi
@@ -165,12 +165,12 @@
status = "okay";
};
-&iwdg2{
+&iwdg2 {
timeout-sec = <32>;
status = "okay";
};
-&m4_rproc{
+&m4_rproc {
memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
<&vdev0vring1>, <&vdev0buffer>;
mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
@@ -184,7 +184,7 @@
status = "okay";
};
-&rtc{
+&rtc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32mp157a-microgea-stm32mp1.dtsi b/arch/arm/boot/dts/st/stm32mp157a-microgea-stm32mp1.dtsi
index fb4600a59869..a75f50cf7123 100644
--- a/arch/arm/boot/dts/st/stm32mp157a-microgea-stm32mp1.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp157a-microgea-stm32mp1.dtsi
@@ -117,12 +117,12 @@
status = "okay";
};
-&iwdg2{
+&iwdg2 {
timeout-sec = <32>;
status = "okay";
};
-&m4_rproc{
+&m4_rproc {
memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
<&vdev0vring1>, <&vdev0buffer>;
mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
@@ -136,7 +136,7 @@
status = "okay";
};
-&rtc{
+&rtc {
status = "okay";
};
diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c
index 6237ede2f0c7..1ca26c063f80 100644
--- a/arch/arm/common/sharpsl_param.c
+++ b/arch/arm/common/sharpsl_param.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mach/sharpsl_param.h>
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Certain hardware parameters determined at the time of device manufacture,
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 4f80b72372b4..1d069e558d8d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/param.h> /* HZ */
/*
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 7fcdc785366c..56b08ed6cc3b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -23,7 +23,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm-generic/pci_iomap.h>
/*
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 62e9df024445..ef2aa79ece5a 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -5,11 +5,16 @@
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
- * Note: this file should not be included by non-asm/.h files
+ * Note: this file should not be included explicitly, include <asm/page.h>
+ * to get access to these definitions.
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#ifndef _ASMARM_PAGE_H
+#error "Do not include <asm/memory.h> directly"
+#endif
+
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
@@ -288,10 +293,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
-#define virt_to_pfn(kaddr) \
- ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
- PHYS_PFN_OFFSET)
-
+static inline unsigned long virt_to_pfn(const void *p)
+{
+ unsigned long kaddr = (unsigned long)p;
+ return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+}
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 28c63d172a96..119aa85d1feb 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -183,10 +183,10 @@ extern int pfn_valid(unsigned long);
#define pfn_valid pfn_valid
#endif
-#include <asm/memory.h>
-
#endif /* !__ASSEMBLY__ */
+#include <asm/memory.h>
+
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/getorder.h>
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a58ccbb406ad..34662a9d4cab 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -27,7 +27,7 @@ extern struct page *empty_zero_page;
#else
#include <asm-generic/pgtable-nopud.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index c82f7a29ec4a..280396483f5d 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -147,8 +147,6 @@ static inline void init_proc_vtable(const struct processor *p)
extern void cpu_resume(void);
-#include <asm/memory.h>
-
#ifdef CONFIG_MMU
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index d362233856a5..421e3415338a 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -2,7 +2,7 @@
#ifndef ASMARM_SPARSEMEM_H
#define ASMARM_SPARSEMEM_H
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Two definitions are required for sparsemem:
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index 6451a433912c..65da32e1f1c1 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -5,7 +5,7 @@
#include <asm/asm-offsets.h>
#include <asm/domain.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/thread_info.h>
.macro csdb
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 2fcbec9c306c..bb5c81823117 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -9,7 +9,7 @@
* User space memory access functions
*/
#include <linux/string.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h>
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 38121c59cbc2..6a80d4be743b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -17,7 +17,7 @@
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/mpu.h>
#include <asm/procinfo.h>
#include <asm/suspend.h>
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 291dc48d6bed..76e8125d05d2 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 03d4c5578c5c..bcc4c9ec3aa4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -9,7 +9,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index de8a60363c85..52bacf07ba16 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -6,7 +6,7 @@
*
* Low-level vector interface routines for the ARMv7-M architecture
*/
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/glue.h>
#include <asm/thread_notify.h>
#include <asm/v7m.h>
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 950bef83339f..b9d6818f1ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -14,12 +14,11 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
#include <asm/v7m.h>
#include <asm/mpu.h>
-#include <asm/page.h>
/*
* Kernel startup entry point.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 656991055bc1..1ec35f065617 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -17,7 +17,7 @@
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/thread_info.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index 2373020af965..38a90a3d12b2 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -19,7 +19,7 @@
#include <asm/system_misc.h>
#include <asm/idmap.h>
#include <asm/suspend.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/sections.h>
#include "reboot.h"
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 43f0a3ebf390..c3ec3861dd07 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -8,7 +8,7 @@
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index d3a85f01b328..f59927bcfbce 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -15,7 +15,7 @@
#include <linux/string.h> /* memcpy */
#include <asm/cputype.h>
#include <asm/mach/map.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/tcm.h>
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 76678732c60d..c16d196b5aad 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -12,9 +12,8 @@
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
-#include <asm/mpu.h>
#include <asm/page.h>
+#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index aa12b65a7fd6..bd9127c4b451 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,9 +12,8 @@
#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
-#include <asm/mpu.h>
#include <asm/page.h>
+#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
diff --git a/arch/arm/mach-berlin/platsmp.c b/arch/arm/mach-berlin/platsmp.c
index 593fc4a69d84..ed94758d30ff 100644
--- a/arch/arm/mach-berlin/platsmp.c
+++ b/arch/arm/mach-berlin/platsmp.c
@@ -12,7 +12,7 @@
#include <asm/cacheflush.h>
#include <asm/cp15.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index aa352c2de313..68039aad3014 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -18,7 +18,7 @@
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include "memory.h"
diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
index ac3d0b363c51..3bfd8b5e03ed 100644
--- a/arch/arm/mach-omap2/sleep33xx.S
+++ b/arch/arm/mach-omap2/sleep33xx.S
@@ -10,7 +10,7 @@
#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include "iomap.h"
#include "cm33xx.h"
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index 832c91327945..ec0972a48f08 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -11,7 +11,7 @@
#include <linux/platform_data/pm33xx.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include "cm33xx.h"
#include "common.h"
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index f60f6a9aed73..f09c9197808b 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -9,7 +9,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/smp_scu.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/hardware/cache-l2x0.h>
#include "omap-secure.h"
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 6b7197ae3c72..c9f0f62187bd 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -26,7 +26,7 @@
#include <linux/clk.h>
#include <asm/setup.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <linux/sizes.h>
diff --git a/arch/arm/mach-rockchip/sleep.S b/arch/arm/mach-rockchip/sleep.S
index 3eca3922c944..38b6c5186c3c 100644
--- a/arch/arm/mach-rockchip/sleep.S
+++ b/arch/arm/mach-rockchip/sleep.S
@@ -6,7 +6,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
.data
/*
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 0c21ef45db68..9495fc109baa 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -29,7 +29,7 @@
#include <linux/time.h>
#include <mach/hardware.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/suspend.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index d0234296ae62..e892ee794d64 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Boot code for secondary CPUs.
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 9466ae61f56a..a956b489b6ea 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -11,7 +11,7 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#define SCTLR_MMU 0x01
#define BOOTROM_ADDRESS 0xE6340000
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 54f1844eac03..f7e91a772428 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -6,7 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/assembler.h>
.arch armv7-a
diff --git a/arch/arm/mach-spear/spear.h b/arch/arm/mach-spear/spear.h
index 432efd407c76..f23eaf1e522f 100644
--- a/arch/arm/mach-spear/spear.h
+++ b/arch/arm/mach-spear/spear.h
@@ -10,7 +10,7 @@
#ifndef __MACH_SPEAR_H
#define __MACH_SPEAR_H
-#include <asm/memory.h>
+#include <asm/page.h>
#if defined(CONFIG_ARCH_SPEAR3XX) || defined (CONFIG_ARCH_SPEAR6XX)
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 3a464d1649b4..71c64e92dead 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 905ac2fa2b1e..ad382cee0fdb 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bc4ed5ce3e00..033a1bce2b17 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -25,7 +25,7 @@
#include <linux/sizes.h>
#include <linux/cma.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 059eb4cdc9c2..a9381095ab36 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -15,7 +15,7 @@
#include <asm/domain.h>
#include <asm/fixmap.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ce64bdb55a16..a42e4cd11db2 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -26,7 +26,7 @@
#include <asm/cp15.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c
index 46d9f4a622cb..24d71b5db62d 100644
--- a/arch/arm/mm/kasan_init.c
+++ b/arch/arm/mm/kasan_init.c
@@ -17,7 +17,6 @@
#include <asm/cputype.h>
#include <asm/highmem.h>
#include <asm/mach/map.h>
-#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f3a52c08a200..13fc4bb5f792 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -27,7 +27,7 @@
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/procinfo.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/kasan_def.h>
diff --git a/arch/arm/mm/physaddr.c b/arch/arm/mm/physaddr.c
index cf75819e4c13..3f263c840ebc 100644
--- a/arch/arm/mm/physaddr.c
+++ b/arch/arm/mm/physaddr.c
@@ -6,7 +6,7 @@
#include <linux/mm.h>
#include <asm/sections.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/fixmap.h>
#include <asm/dma.h>
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 8359748a19a1..28cdc5468406 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -11,7 +11,7 @@
#include <asm/cputype.h>
#include <asm/mpu.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/sections.h>
#include "mm.h"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6b4ef9539b68..193c7aeb6703 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -14,7 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 335144d50134..d65a12f851a9 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -9,7 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/v7m.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
index f8e11f7c7880..1d9f52c71ad0 100644
--- a/arch/arm/mm/pv-fixup-asm.S
+++ b/arch/arm/mm/pv-fixup-asm.S
@@ -9,7 +9,7 @@
#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
-#include <asm/memory.h>
+#include <asm/page.h>
.section ".idmap.text", "ax"
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 891ab530a665..7856c3a3e35a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -414,6 +414,25 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
+config AMPERE_ERRATUM_AC03_CPU_38
+ bool "AmpereOne: AC03_CPU_38: Certain bits in the Virtualization Translation Control Register and Translation Control Registers do not follow RES0 semantics"
+ default y
+ help
+ This option adds an alternative code sequence to work around Ampere
+ erratum AC03_CPU_38 on AmpereOne.
+
+ The affected design reports FEAT_HAFDBS as not implemented in
+ ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
+ as required by the architecture. The unadvertised HAFDBS
+ implementation suffers from an additional erratum where hardware
+ A/D updates can occur after a PTE has been marked invalid.
+
+ The workaround forces KVM to explicitly set VTCR_EL2.HA to 0,
+ which avoids enabling unadvertised hardware Access Flag management
+ at stage-2.
+
+ If unsure, say Y.
+
config ARM64_WORKAROUND_CLEAN_CACHE
bool
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 7a95c324e52a..96e50227f940 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -15,6 +15,9 @@
#define MAX_CPU_FEATURES 128
#define cpu_feature(x) KERNEL_HWCAP_ ## x
+#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
+#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
+
#ifndef __ASSEMBLY__
#include <linux/bug.h>
@@ -905,6 +908,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
return 8;
}
+s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
extern struct arm64_ftr_override id_aa64mmfr1_override;
@@ -915,6 +919,8 @@ extern struct arm64_ftr_override id_aa64smfr0_override;
extern struct arm64_ftr_override id_aa64isar1_override;
extern struct arm64_ftr_override id_aa64isar2_override;
+extern struct arm64_ftr_override arm64_sw_feature_override;
+
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index f4c3d30bf746..8e5ffb58f83e 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -43,6 +43,11 @@
*/
.macro __init_el2_timers
mov x0, #3 // Enable EL1 physical timers
+ mrs x1, hcr_el2
+ and x1, x1, #HCR_E2H
+ cbz x1, .LnVHE_\@
+ lsl x0, x0, #10
+.LnVHE_\@:
msr cnthctl_el2, x0
msr cntvoff_el2, xzr // Clear virtual offset
.endm
@@ -133,8 +138,15 @@
.endm
/* Coprocessor traps */
-.macro __init_el2_nvhe_cptr
+.macro __init_el2_cptr
+ mrs x1, hcr_el2
+ and x1, x1, #HCR_E2H
+ cbz x1, .LnVHE_\@
+ mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
+ b .Lset_cptr_\@
+.LnVHE_\@:
mov x0, #0x33ff
+.Lset_cptr_\@:
msr cptr_el2, x0 // Disable copro. traps to EL2
.endm
@@ -210,9 +222,8 @@
__init_el2_gicv3
__init_el2_hstr
__init_el2_nvhe_idregs
- __init_el2_nvhe_cptr
+ __init_el2_cptr
__init_el2_fgt
- __init_el2_nvhe_prepare_eret
.endm
#ifndef __KVM_NVHE_HYPERVISOR__
@@ -258,7 +269,17 @@
.Linit_sve_\@: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps
+ mrs x1, hcr_el2
+ and x1, x1, #HCR_E2H
+ cbz x1, .Lcptr_nvhe_\@
+
+ // VHE case
+ orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+ b .Lset_cptr_\@
+
+.Lcptr_nvhe_\@: // nVHE case
bic x0, x0, #CPTR_EL2_TZ
+.Lset_cptr_\@:
msr cptr_el2, x0
isb
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index cd03819a3b68..cdf6a35e3994 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -316,12 +316,12 @@
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
cbz \save_ffr, 921f
_sve_rdffr 0
- _sve_str_p 0, \nxbase
- _sve_ldr_p 0, \nxbase, -16
b 922f
921:
- str xzr, [x\nxbase] // Zero out FFR
+ _sve_pfalse 0 // Zero out FFR
922:
+ _sve_str_p 0, \nxbase
+ _sve_ldr_p 0, \nxbase, -16
mrs x\nxtmp, fpsr
str w\nxtmp, [\xpfpsr]
mrs x\nxtmp, fpcr
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c6e12e8f2751..58e5eb27da68 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -19,6 +19,7 @@
#define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_AMVOFFEN (UL(1) << 51)
+#define HCR_TID4 (UL(1) << 49)
#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
@@ -87,7 +88,7 @@
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID2)
+ HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
@@ -289,7 +290,6 @@
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
-#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
GENMASK(29, 21) | \
GENMASK(19, 14) | \
@@ -351,8 +351,7 @@
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
-#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
- CPACR_EL1_ZEN_EL1EN)
+#define CPACR_EL1_TTA (1 << 28)
#define kvm_mode_names \
{ PSR_MODE_EL0t, "EL0t" }, \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 86042afa86c3..7d170aaa2db4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -68,6 +68,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
@@ -225,6 +226,9 @@ extern void __kvm_flush_vm_context(void);
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
+extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa,
+ int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index b31b32ecbe2d..efc0b45d79c3 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -62,19 +62,14 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
#else
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
-
- WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
- &kvm->arch.flags));
-
- return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+ return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
}
#endif
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
- if (is_kernel_in_hyp_mode())
+ if (has_vhe() || has_hvhe())
vcpu->arch.hcr_el2 |= HCR_E2H;
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
/* route synchronous external abort exceptions to EL2 */
@@ -95,6 +90,12 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TVM;
}
+ if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+ !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+ vcpu->arch.hcr_el2 |= HCR_TID4;
+ else
+ vcpu->arch.hcr_el2 |= HCR_TID2;
+
if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW;
@@ -570,4 +571,35 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
+static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ if (has_vhe()) {
+ val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+ CPACR_EL1_ZEN_EL1EN);
+ } else if (has_hvhe()) {
+ val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ } else {
+ val = CPTR_NVHE_EL2_RES1;
+
+ if (vcpu_has_sve(vcpu) &&
+ (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+ val |= CPTR_EL2_TZ;
+ if (cpus_have_final_cap(ARM64_SME))
+ val &= ~CPTR_EL2_TSM;
+ }
+
+ return val;
+}
+
+static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
+{
+ u64 val = kvm_get_reset_cptr_el2(vcpu);
+
+ if (has_vhe() || has_hvhe())
+ write_sysreg(val, cpacr_el1);
+ else
+ write_sysreg(val, cptr_el2);
+}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d48609d95423..8b6096753740 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -39,6 +39,7 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
#define KVM_VCPU_MAX_FEATURES 7
+#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
@@ -159,6 +160,21 @@ struct kvm_s2_mmu {
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
+#define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
+ /*
+ * Memory cache used to split
+ * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
+ * is used to allocate stage2 page tables while splitting huge
+ * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+ * influences both the capacity of the split page cache, and
+ * how often KVM reschedules. Be wary of raising CHUNK_SIZE
+ * too high.
+ *
+ * Protected by kvm->slots_lock.
+ */
+ struct kvm_mmu_memory_cache split_page_cache;
+ uint64_t split_page_chunk_size;
+
struct kvm_arch *arch;
};
@@ -214,25 +230,23 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_MTE_ENABLED 1
/* At least one vCPU has ran in the VM */
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
- /*
- * The following two bits are used to indicate the guest's EL1
- * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
- * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
- * Otherwise, the guest's EL1 register width has not yet been
- * determined yet.
- */
-#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
-#define KVM_ARCH_FLAG_EL1_32BIT 4
+ /* The vCPU feature set for the VM is configured */
+#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
/* PSCI SYSTEM_SUSPEND enabled for the guest */
-#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
+#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
/* VM counter offset */
-#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6
+#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
/* Timer PPIs made immutable */
-#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7
+#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
/* SMCCC filter initialized for the VM */
-#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 8
+#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7
+ /* Initial ID reg values loaded */
+#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8
unsigned long flags;
+ /* VM-wide vCPU feature set */
+ DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
+
/*
* VM-wide PMU filter, implemented as a bitmap and big enough for
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
@@ -242,18 +256,24 @@ struct kvm_arch {
cpumask_var_t supported_cpus;
- u8 pfr0_csv2;
- u8 pfr0_csv3;
- struct {
- u8 imp:4;
- u8 unimp:4;
- } dfr0_pmuver;
-
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter;
/*
+ * Emulated CPU ID registers per VM
+ * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
+ * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
+ *
+ * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
+ * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
+ */
+#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
+#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
+#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
+ u64 id_regs[KVM_ARM_ID_REG_NUM];
+
+ /*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
*/
@@ -410,6 +430,7 @@ struct kvm_host_data {
struct kvm_host_psci_config {
/* PSCI version used by host. */
u32 version;
+ u32 smccc_version;
/* Function IDs used by host if version is v0.1. */
struct psci_0_1_function_ids function_ids_0_1;
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index bdd9cf546d95..b7238c72a04c 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -16,12 +16,35 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
+/*
+ * Unified accessors for registers that have a different encoding
+ * between VHE and non-VHE. They must be specified without their "ELx"
+ * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
+ */
+
+#if defined(__KVM_VHE_HYPERVISOR__)
+
+#define read_sysreg_el0(r) read_sysreg_s(r##_EL02)
+#define write_sysreg_el0(v,r) write_sysreg_s(v, r##_EL02)
+#define read_sysreg_el1(r) read_sysreg_s(r##_EL12)
+#define write_sysreg_el1(v,r) write_sysreg_s(v, r##_EL12)
+#define read_sysreg_el2(r) read_sysreg_s(r##_EL1)
+#define write_sysreg_el2(v,r) write_sysreg_s(v, r##_EL1)
+
+#else // !__KVM_VHE_HYPERVISOR__
+
+#if defined(__KVM_NVHE_HYPERVISOR__)
+#define VHE_ALT_KEY ARM64_KVM_HVHE
+#else
+#define VHE_ALT_KEY ARM64_HAS_VIRT_HOST_EXTN
+#endif
+
#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
- asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
+ asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
__mrs_s("%0", r##vh), \
- ARM64_HAS_VIRT_HOST_EXTN) \
+ VHE_ALT_KEY) \
: "=r" (reg)); \
reg; \
})
@@ -31,16 +54,10 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
u64 __val = (u64)(v); \
asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
__msr_s(r##vh, "%x0"), \
- ARM64_HAS_VIRT_HOST_EXTN) \
+ VHE_ALT_KEY) \
: : "rZ" (__val)); \
} while (0)
-/*
- * Unified accessors for registers that have a different encoding
- * between VHE and non-VHE. They must be specified without their "ELx"
- * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
- */
-
#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
@@ -48,6 +65,8 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
+#endif // __KVM_VHE_HYPERVISOR__
+
/*
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
* static inline can allow the compiler to out-of-line this. KVM always wants
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 27e63c111f78..0e1e1ab17b4d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -172,6 +172,7 @@ void __init free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
+void kvm_uninit_stage2_mmu(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
@@ -227,7 +228,8 @@ static inline void __invalidate_icache_guest_page(void *va, size_t size)
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
icache_inval_all_pou();
- } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
+ } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 ||
+ !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
icache_inval_pou((unsigned long)va, (unsigned long)va + size);
}
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 93bd0975b15f..8294a9a7e566 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -92,6 +92,24 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
}
+static inline u32 kvm_supported_block_sizes(void)
+{
+ u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
+ u32 r = 0;
+
+ for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
+ r |= BIT(kvm_granule_shift(level));
+
+ return r;
+}
+
+static inline bool kvm_is_block_size_supported(u64 size)
+{
+ bool is_power_of_two = IS_ALIGNED(size, size);
+
+ return is_power_of_two && (size & kvm_supported_block_sizes());
+}
+
/**
* struct kvm_pgtable_mm_ops - Memory management callbacks.
* @zalloc_page: Allocate a single zeroed memory page.
@@ -104,7 +122,7 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
* allocation is physically contiguous.
* @free_pages_exact: Free an exact number of memory pages previously
* allocated by zalloc_pages_exact.
- * @free_removed_table: Free a removed paging structure by unlinking and
+ * @free_unlinked_table: Free an unlinked paging structure by unlinking and
* dropping references.
* @get_page: Increment the refcount on a page.
* @put_page: Decrement the refcount on a page. When the
@@ -124,7 +142,7 @@ struct kvm_pgtable_mm_ops {
void* (*zalloc_page)(void *arg);
void* (*zalloc_pages_exact)(size_t size);
void (*free_pages_exact)(void *addr, size_t size);
- void (*free_removed_table)(void *addr, u32 level);
+ void (*free_unlinked_table)(void *addr, u32 level);
void (*get_page)(void *addr);
void (*put_page)(void *addr);
int (*page_count)(void *addr);
@@ -195,6 +213,12 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
* with other software walkers.
* @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
* invoked from a fault handler.
+ * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
+ * without Break-before-make's
+ * TLB invalidation.
+ * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
+ * without Cache maintenance
+ * operations required.
*/
enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_LEAF = BIT(0),
@@ -202,6 +226,8 @@ enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
KVM_PGTABLE_WALK_SHARED = BIT(3),
KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
+ KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
+ KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
};
struct kvm_pgtable_visit_ctx {
@@ -441,7 +467,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
- * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
* @mm_ops: Memory management callbacks.
* @pgtable: Unlinked stage-2 paging structure to be freed.
* @level: Level of the stage-2 paging structure to be freed.
@@ -449,7 +475,33 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* The page-table is assumed to be unreachable by any hardware walkers prior to
* freeing and therefore no TLB invalidation is performed.
*/
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+
+/**
+ * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @phys: Physical address of the memory to map.
+ * @level: Starting level of the stage-2 paging structure to be created.
+ * @prot: Permissions and attributes for the mapping.
+ * @mc: Cache of pre-allocated and zeroed memory from which to allocate
+ * page-table pages.
+ * @force_pte: Force mappings to PAGE_SIZE granularity.
+ *
+ * Returns an unlinked page-table tree. This new page-table tree is
+ * not reachable (i.e., it is unlinked) from the root pgd and it's
+ * therefore unreachableby the hardware page-table walker. No TLB
+ * invalidation or CMOs are performed.
+ *
+ * If device attributes are not explicitly requested in @prot, then the
+ * mapping will be normal, cacheable.
+ *
+ * Return: The fully populated (unlinked) stage-2 paging structure, or
+ * an ERR_PTR(error) on failure.
+ */
+kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
+ u64 phys, u32 level,
+ enum kvm_pgtable_prot prot,
+ void *mc, bool force_pte);
/**
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
@@ -621,6 +673,25 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
+ * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
+ * to PAGE_SIZE guest pages.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @addr: Intermediate physical address from which to split.
+ * @size: Size of the range.
+ * @mc: Cache of pre-allocated and zeroed memory from which to allocate
+ * page-table pages.
+ *
+ * The function tries to split any level 1 or 2 entry that overlaps
+ * with the input range (given by @addr and @size).
+ *
+ * Return: 0 on success, negative error code on failure. Note that
+ * kvm_pgtable_stage2_split() is best effort: it tries to break as many
+ * blocks in the input range as allowed by @mc_capacity.
+ */
+int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct kvm_mmu_memory_cache *mc);
+
+/**
* kvm_pgtable_walk() - Walk a page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_*_init().
* @addr: Input address for the start of the walk.
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index 01129b0d4c68..e46250a02017 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -6,7 +6,9 @@
#ifndef __ARM64_KVM_PKVM_H__
#define __ARM64_KVM_PKVM_H__
+#include <linux/arm_ffa.h>
#include <linux/memblock.h>
+#include <linux/scatterlist.h>
#include <asm/kvm_pgtable.h>
/* Maximum number of VMs that can co-exist under pKVM. */
@@ -106,4 +108,23 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
+#define KVM_FFA_MBOX_NR_PAGES 1
+
+static inline unsigned long hyp_ffa_proxy_pages(void)
+{
+ size_t desc_max;
+
+ /*
+ * The hypervisor FFA proxy needs enough memory to buffer a fragmented
+ * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
+ */
+ desc_max = sizeof(struct ffa_mem_region) +
+ sizeof(struct ffa_mem_region_attributes) +
+ sizeof(struct ffa_composite_mem_region) +
+ SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
+
+ /* Plus a page each for the hypervisor's RX and TX mailboxes. */
+ return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
+}
+
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6e0e5722f229..fde4186cc387 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -333,6 +333,14 @@ static inline void *phys_to_virt(phys_addr_t x)
return (void *)(__phys_to_virt(x));
}
+/* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */
+#include <asm-generic/memory_model.h>
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __phys_to_pfn(virt_to_phys(kaddr));
+}
+
/*
* Drivers should NOT use these either.
*/
@@ -341,7 +349,6 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7a1e62631814..b481935e9314 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -510,6 +510,7 @@
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
(BIT(29)))
+#define SCTLR_EL2_BT (BIT(36))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 4eb601e7de50..5227db7640c8 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -110,8 +110,10 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
-static inline bool is_kernel_in_hyp_mode(void)
+static __always_inline bool is_kernel_in_hyp_mode(void)
{
+ BUILD_BUG_ON(__is_defined(__KVM_NVHE_HYPERVISOR__) ||
+ __is_defined(__KVM_VHE_HYPERVISOR__));
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
@@ -140,6 +142,14 @@ static __always_inline bool is_protected_kvm_enabled(void)
return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
}
+static __always_inline bool has_hvhe(void)
+{
+ if (is_vhe_hyp_code())
+ return false;
+
+ return cpus_have_final_cap(ARM64_KVM_HVHE);
+}
+
static inline bool is_hyp_nvhe(void)
{
return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 485d60bee26c..000000000000
--- a/arch/arm64/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_BITSPERLONG_H
-#define __ASM_BITSPERLONG_H
-
-#define __BITS_PER_LONG 64
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_BITSPERLONG_H */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 757d01a68ffd..5ff1942b04fc 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -213,9 +213,9 @@ int main(void)
DEFINE(FGRET_REGS_X7, offsetof(struct fgraph_ret_regs, regs[7]));
DEFINE(FGRET_REGS_FP, offsetof(struct fgraph_ret_regs, fp));
DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs));
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call));
#endif
-#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 307faa2b4395..be66e94a21bd 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -730,6 +730,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+ {
+ .desc = "AmpereOne erratum AC03_CPU_38",
+ .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6ea7f23b1287..f9d456fe132d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -672,6 +672,8 @@ struct arm64_ftr_override __ro_after_init id_aa64smfr0_override;
struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
+struct arm64_ftr_override arm64_sw_feature_override;
+
static const struct __ftr_reg_entry {
u32 sys_id;
struct arm64_ftr_reg *reg;
@@ -807,7 +809,7 @@ static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
return reg;
}
-static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
+s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
s64 cur)
{
s64 ret = 0;
@@ -2009,6 +2011,19 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap,
return true;
}
+static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ u64 val;
+
+ val = read_sysreg(id_aa64mmfr1_el1);
+ if (!cpuid_feature_extract_unsigned_field(val, ID_AA64MMFR1_EL1_VH_SHIFT))
+ return false;
+
+ val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask;
+ return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE);
+}
+
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
@@ -2683,6 +2698,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1PIE, IMP)
},
+ {
+ .desc = "VHE for hypervisor only",
+ .capability = ARM64_KVM_HVHE,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = hvhe_possible,
+ },
+ {
+ .desc = "Enhanced Virtualization Traps",
+ .capability = ARM64_HAS_EVT,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT,
+ .field_width = 4,
+ .min_field_value = ID_AA64MMFR2_EL1_EVT_IMP,
+ .matches = has_cpuid_feature,
+ },
{},
};
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0f5a30f109d9..757a0de07f91 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -603,6 +603,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el1, x1
mov x2, xzr
2:
+ __init_el2_nvhe_prepare_eret
+
mov w0, #BOOT_CPU_MODE_EL2
orr x0, x0, x2
eret
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index d63de1973ddb..65f76064c86b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -82,7 +82,15 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f
// Needs to be VHE capable, obviously
- check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f x1 x2
+ check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2
+
+0: // Check whether we only want the hypervisor to run VHE, not the kernel
+ adr_l x1, arm64_sw_feature_override
+ ldr x2, [x1, FTR_OVR_VAL_OFFSET]
+ ldr x1, [x1, FTR_OVR_MASK_OFFSET]
+ and x2, x2, x1
+ ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4
+ cbz x2, 2f
1: mov_q x0, HVC_STUB_ERR
eret
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 8439248c21d3..2fe2491b692c 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -139,15 +139,22 @@ static const struct ftr_set_desc smfr0 __initconst = {
},
};
-extern struct arm64_ftr_override kaslr_feature_override;
+static bool __init hvhe_filter(u64 val)
+{
+ u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1);
+
+ return (val == 1 &&
+ lower_32_bits(__boot_status) == BOOT_CPU_MODE_EL2 &&
+ cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_EL1_VH_SHIFT));
+}
-static const struct ftr_set_desc kaslr __initconst = {
- .name = "kaslr",
-#ifdef CONFIG_RANDOMIZE_BASE
- .override = &kaslr_feature_override,
-#endif
+static const struct ftr_set_desc sw_features __initconst = {
+ .name = "arm64_sw",
+ .override = &arm64_sw_feature_override,
.fields = {
- FIELD("disabled", 0, NULL),
+ FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
+ FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
{}
},
};
@@ -159,7 +166,7 @@ static const struct ftr_set_desc * const regs[] __initconst = {
&isar1,
&isar2,
&smfr0,
- &kaslr,
+ &sw_features,
};
static const struct {
@@ -177,7 +184,7 @@ static const struct {
"id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" },
{ "arm64.nomops", "id_aa64isar2.mops=0" },
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
- { "nokaslr", "kaslr.disabled=1" },
+ { "nokaslr", "arm64_sw.nokaslr=1" },
};
static int __init parse_nokaslr(char *unused)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 17f96a19781d..94a269cd1f07 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -12,13 +12,13 @@
u16 __initdata memstart_offset_seed;
-struct arm64_ftr_override kaslr_feature_override __initdata;
-
bool __ro_after_init __kaslr_is_enabled = false;
void __init kaslr_init(void)
{
- if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
+ if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
+ arm64_sw_feature_override.mask,
+ ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) {
pr_info("KASLR disabled on command line\n");
return;
}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index d014162c5c71..2f73e5bca213 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -65,11 +65,9 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -Wdeclaration-after-statement \
-std=gnu11
VDSO_CFLAGS += -O2
# Some useful compiler-dependent flags from top-level Makefile
-VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
VDSO_CFLAGS += -fno-strict-overflow
VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 05b022be885b..0696732fa38c 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1406,7 +1406,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
- goto out_free_irq;
+ goto out_free_vtimer_irq;
}
static_branch_enable(&has_gic_active_state);
@@ -1422,7 +1422,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
if (err) {
kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
host_ptimer_irq, err);
- return err;
+ goto out_free_vtimer_irq;
}
if (has_gic) {
@@ -1430,7 +1430,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_get_running_vcpus());
if (err) {
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
- goto out_free_irq;
+ goto out_free_ptimer_irq;
}
}
@@ -1439,11 +1439,15 @@ int __init kvm_timer_hyp_init(bool has_gic)
kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
info->physical_irq);
err = -ENODEV;
- goto out_free_irq;
+ goto out_free_vtimer_irq;
}
return 0;
-out_free_irq:
+
+out_free_ptimer_irq:
+ if (info->physical_irq > 0)
+ free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
+out_free_vtimer_irq:
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
return err;
}
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14391826241c..c2c14059f6a8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -51,6 +51,8 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+
static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
@@ -65,6 +67,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
int r;
+ u64 new_cap;
if (cap->flags)
return -EINVAL;
@@ -89,6 +92,24 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
break;
+ case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+ new_cap = cap->args[0];
+
+ mutex_lock(&kvm->slots_lock);
+ /*
+ * To keep things simple, allow changing the chunk
+ * size only when no memory slots have been created.
+ */
+ if (!kvm_are_all_memslots_empty(kvm)) {
+ r = -EINVAL;
+ } else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
+ r = -EINVAL;
+ } else {
+ r = 0;
+ kvm->arch.mmu.split_page_chunk_size = new_cap;
+ }
+ mutex_unlock(&kvm->slots_lock);
+ break;
default:
r = -EINVAL;
break;
@@ -102,22 +123,6 @@ static int kvm_arm_default_max_vcpus(void)
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
}
-static void set_default_spectre(struct kvm *kvm)
-{
- /*
- * The default is to expose CSV2 == 1 if the HW isn't affected.
- * Although this is a per-CPU feature, we make it global because
- * asymmetric systems are just a nuisance.
- *
- * Userspace can override this as long as it doesn't promise
- * the impossible.
- */
- if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
- kvm->arch.pfr0_csv2 = 1;
- if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
- kvm->arch.pfr0_csv3 = 1;
-}
-
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
@@ -161,14 +166,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->max_vcpus = kvm_arm_default_max_vcpus();
- set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm);
- /*
- * Initialise the default PMUver before there is a chance to
- * create an actual PMU.
- */
- kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
+ bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
return 0;
@@ -302,6 +302,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PTRAUTH_GENERIC:
r = system_has_full_ptr_auth();
break;
+ case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+ if (kvm)
+ r = kvm->arch.mmu.split_page_chunk_size;
+ else
+ r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+ break;
+ case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
+ r = kvm_supported_block_sizes();
+ break;
default:
r = 0;
}
@@ -1167,58 +1176,115 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
-static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
+static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
{
- unsigned int i, ret;
- u32 phys_target = kvm_target_cpu();
+ unsigned long features = init->features[0];
+ int i;
+
+ if (features & ~KVM_VCPU_VALID_FEATURES)
+ return -ENOENT;
+
+ for (i = 1; i < ARRAY_SIZE(init->features); i++) {
+ if (init->features[i])
+ return -ENOENT;
+ }
+
+ if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
+ return 0;
- if (init->target != phys_target)
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
return -EINVAL;
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same target.
- */
- if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+ /* MTE is incompatible with AArch32 */
+ if (kvm_has_mte(vcpu->kvm))
return -EINVAL;
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- bool set = (init->features[i / 32] & (1 << (i % 32)));
+ /* NV is incompatible with AArch32 */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
+ return -EINVAL;
- if (set && i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
+ return 0;
+}
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same feature set.
- */
- if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
- test_bit(i, vcpu->arch.features) != set)
- return -EINVAL;
+static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
- if (set)
- set_bit(i, vcpu->arch.features);
- }
+ return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
+ vcpu->arch.target != init->target;
+}
+
+static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
+ struct kvm *kvm = vcpu->kvm;
+ int ret = -EINVAL;
+
+ mutex_lock(&kvm->arch.config_lock);
- vcpu->arch.target = phys_target;
+ if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
+ !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
+ goto out_unlock;
+
+ vcpu->arch.target = init->target;
+ bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu);
if (ret) {
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+ goto out_unlock;
}
+ bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
+ set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
+
+out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ int ret;
+
+ if (init->target != kvm_target_cpu())
+ return -EINVAL;
+
+ ret = kvm_vcpu_init_check_features(vcpu, init);
+ if (ret)
+ return ret;
+
+ if (vcpu->arch.target == -1)
+ return __kvm_vcpu_set_target(vcpu, init);
+
+ if (kvm_vcpu_init_changed(vcpu, init))
+ return -EINVAL;
+
+ return kvm_reset_vcpu(vcpu);
+}
+
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init)
{
+ bool power_off = false;
int ret;
+ /*
+ * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
+ * reflecting it in the finalized feature set, thus limiting its scope
+ * to a single KVM_ARM_VCPU_INIT call.
+ */
+ if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
+ init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
+ power_off = true;
+ }
+
ret = kvm_vcpu_set_target(vcpu, init);
if (ret)
return ret;
@@ -1240,14 +1306,14 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
- vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
+ vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
/*
* Handle the "start in power-off" case.
*/
spin_lock(&vcpu->arch.mp_state_lock);
- if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ if (power_off)
__kvm_arm_vcpu_power_off(vcpu);
else
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
@@ -1666,7 +1732,13 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
- tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
+ tcr = read_sysreg(tcr_el1);
+ if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
+ tcr |= TCR_EPD1_MASK;
+ } else {
+ tcr &= TCR_EL2_MASK;
+ tcr |= TCR_EL2_RES1;
+ }
tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
params->tcr_el2 = tcr;
@@ -1676,6 +1748,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
else
params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
+ if (cpus_have_final_cap(ARM64_KVM_HVHE))
+ params->hcr_el2 |= HCR_E2H;
params->vttbr = params->vtcr = 0;
/*
@@ -1910,6 +1984,7 @@ static bool __init init_psci_relay(void)
}
kvm_host_psci_config.version = psci_ops.get_version();
+ kvm_host_psci_config.smccc_version = arm_smccc_get_version();
if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
@@ -2067,6 +2142,26 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
+static void pkvm_hyp_init_ptrauth(void)
+{
+ struct kvm_cpu_context *hyp_ctxt;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
+ hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
+ hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
+ }
+}
+
/* Inits Hyp-mode on all online CPUs */
static int __init init_hyp_mode(void)
{
@@ -2228,6 +2323,10 @@ static int __init init_hyp_mode(void)
kvm_hyp_init_symbols();
if (is_protected_kvm_enabled()) {
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
+ cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
+ pkvm_hyp_init_ptrauth();
+
init_cpu_logical_map();
if (!init_psci_relay()) {
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 4c9dcd8fc939..8c1d0d4853df 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -180,7 +180,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
/*
* If we have VHE then the Hyp code will reset CPACR_EL1 to
- * CPACR_EL1_DEFAULT and we need to reenable SME.
+ * the default value and we need to reenable SME.
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
@@ -210,7 +210,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
- * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+ * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 2f6e0b3e4a75..4bddb8541bec 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -70,6 +70,56 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}
+static inline bool __hfgxtr_traps_required(void)
+{
+ if (cpus_have_final_cap(ARM64_SME))
+ return true;
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ return true;
+
+ return false;
+}
+
+static inline void __activate_traps_hfgxtr(void)
+{
+ u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+
+ if (cpus_have_final_cap(ARM64_SME)) {
+ tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+
+ r_clr |= tmp;
+ w_clr |= tmp;
+ }
+
+ /*
+ * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ w_set |= HFGxTR_EL2_TCR_EL1_MASK;
+
+ sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
+ sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+}
+
+static inline void __deactivate_traps_hfgxtr(void)
+{
+ u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+
+ if (cpus_have_final_cap(ARM64_SME)) {
+ tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+
+ r_set |= tmp;
+ w_set |= tmp;
+ }
+
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ w_clr |= HFGxTR_EL2_TCR_EL1_MASK;
+
+ sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
+ sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+}
+
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
@@ -95,16 +145,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
- sysreg_clear_set_s(SYS_HFGWTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
- }
+ if (__hfgxtr_traps_required())
+ __activate_traps_hfgxtr();
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -120,14 +162,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
- }
+ if (__hfgxtr_traps_required())
+ __deactivate_traps_hfgxtr();
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
@@ -209,7 +245,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Valid trap. Switch the context: */
/* First disable enough traps to allow us to update the registers */
- if (has_vhe()) {
+ if (has_vhe() || has_hvhe()) {
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (sve_guest)
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
@@ -401,12 +437,39 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
return true;
}
+static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ if (sysreg != SYS_TCR_EL1)
+ return false;
+
+ /*
+ * Affected parts do not advertise support for hardware Access Flag /
+ * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
+ * control bits are still functional. The architecture requires these be
+ * RES0 on systems that do not implement FEAT_HAFDBS.
+ *
+ * Uphold the requirements of the architecture by masking guest writes
+ * to TCR_EL1.{HA,HD} here.
+ */
+ val &= ~(TCR_HD | TCR_HA);
+ write_sysreg_el1(val, SYS_TCR);
+ return true;
+}
+
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
return true;
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
+ handle_ampere1_tcr(vcpu))
+ return true;
+
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
new file mode 100644
index 000000000000..1becb10ecd80
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 - Google LLC
+ * Author: Andrew Walbran <qwandor@google.com>
+ */
+#ifndef __KVM_HYP_FFA_H
+#define __KVM_HYP_FFA_H
+
+#include <asm/kvm_host.h>
+
+#define FFA_MIN_FUNC_NUM 0x60
+#define FFA_MAX_FUNC_NUM 0x7F
+
+int hyp_ffa_init(void *pages);
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
+
+#endif /* __KVM_HYP_FFA_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index b7bdbe63deed..0972faccc2af 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -57,6 +57,7 @@ extern struct host_mmu host_mmu;
enum pkvm_component_id {
PKVM_ID_HOST,
PKVM_ID_HYP,
+ PKVM_ID_FFA,
};
extern unsigned long hyp_nr_cpus;
@@ -66,6 +67,8 @@ int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 530347cdebe3..9ddc025e4b86 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -22,7 +22,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
- cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o
+ cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
new file mode 100644
index 000000000000..58dcd92bf346
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
+ * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
+ * Framework for Arm A-profile", which is specified by Arm in document
+ * number DEN0077.
+ *
+ * Copyright (C) 2022 - Google LLC
+ * Author: Andrew Walbran <qwandor@google.com>
+ *
+ * This driver hooks into the SMC trapping logic for the host and intercepts
+ * all calls falling within the FF-A range. Each call is either:
+ *
+ * - Forwarded on unmodified to the SPMD at EL3
+ * - Rejected as "unsupported"
+ * - Accompanied by a host stage-2 page-table check/update and reissued
+ *
+ * Consequently, any attempts by the host to make guest memory pages
+ * accessible to the secure world using FF-A will be detected either here
+ * (in the case that the memory is already owned by the guest) or during
+ * donation to the guest (in the case that the memory was previously shared
+ * with the secure world).
+ *
+ * To allow the rolling-back of page-table updates and FF-A calls in the
+ * event of failure, operations involving the RXTX buffers are locked for
+ * the duration and are therefore serialised.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/arm_ffa.h>
+#include <asm/kvm_pkvm.h>
+
+#include <nvhe/ffa.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/memory.h>
+#include <nvhe/trap_handler.h>
+#include <nvhe/spinlock.h>
+
+/*
+ * "ID value 0 must be returned at the Non-secure physical FF-A instance"
+ * We share this ID with the host.
+ */
+#define HOST_FFA_ID 0
+
+/*
+ * A buffer to hold the maximum descriptor size we can see from the host,
+ * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
+ * when resolving the handle on the reclaim path.
+ */
+struct kvm_ffa_descriptor_buffer {
+ void *buf;
+ size_t len;
+};
+
+static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
+
+struct kvm_ffa_buffers {
+ hyp_spinlock_t lock;
+ void *tx;
+ void *rx;
+};
+
+/*
+ * Note that we don't currently lock these buffers explicitly, instead
+ * relying on the locking of the host FFA buffers as we only have one
+ * client.
+ */
+static struct kvm_ffa_buffers hyp_buffers;
+static struct kvm_ffa_buffers host_buffers;
+
+static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
+{
+ *res = (struct arm_smccc_res) {
+ .a0 = FFA_ERROR,
+ .a2 = ffa_errno,
+ };
+}
+
+static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
+{
+ if (ret == FFA_RET_SUCCESS) {
+ *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
+ .a2 = prop };
+ } else {
+ ffa_to_smccc_error(res, ret);
+ }
+}
+
+static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
+{
+ ffa_to_smccc_res_prop(res, ret, 0);
+}
+
+static void ffa_set_retval(struct kvm_cpu_context *ctxt,
+ struct arm_smccc_res *res)
+{
+ cpu_reg(ctxt, 0) = res->a0;
+ cpu_reg(ctxt, 1) = res->a1;
+ cpu_reg(ctxt, 2) = res->a2;
+ cpu_reg(ctxt, 3) = res->a3;
+}
+
+static bool is_ffa_call(u64 func_id)
+{
+ return ARM_SMCCC_IS_FAST_CALL(func_id) &&
+ ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
+ ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
+ ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
+}
+
+static int ffa_map_hyp_buffers(u64 ffa_page_count)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
+ hyp_virt_to_phys(hyp_buffers.tx),
+ hyp_virt_to_phys(hyp_buffers.rx),
+ ffa_page_count,
+ 0, 0, 0, 0,
+ &res);
+
+ return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
+}
+
+static int ffa_unmap_hyp_buffers(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
+ HOST_FFA_ID,
+ 0, 0, 0, 0, 0, 0,
+ &res);
+
+ return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
+}
+
+static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 fraglen, u32 endpoint_id)
+{
+ arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
+ handle_lo, handle_hi, fraglen, endpoint_id,
+ 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 fragoff)
+{
+ arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
+ handle_lo, handle_hi, fragoff, HOST_FFA_ID,
+ 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
+ u32 fraglen)
+{
+ arm_smccc_1_1_smc(func_id, len, fraglen,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 flags)
+{
+ arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
+ handle_lo, handle_hi, flags,
+ 0, 0, 0, 0,
+ res);
+}
+
+static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
+{
+ arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
+ len, len,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
+static void do_ffa_rxtx_map(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(phys_addr_t, tx, ctxt, 1);
+ DECLARE_REG(phys_addr_t, rx, ctxt, 2);
+ DECLARE_REG(u32, npages, ctxt, 3);
+ int ret = 0;
+ void *rx_virt, *tx_virt;
+
+ if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (host_buffers.tx) {
+ ret = FFA_RET_DENIED;
+ goto out_unlock;
+ }
+
+ /*
+ * Map our hypervisor buffers into the SPMD before mapping and
+ * pinning the host buffers in our own address space.
+ */
+ ret = ffa_map_hyp_buffers(npages);
+ if (ret)
+ goto out_unlock;
+
+ ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unmap;
+ }
+
+ ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unshare_tx;
+ }
+
+ tx_virt = hyp_phys_to_virt(tx);
+ ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unshare_rx;
+ }
+
+ rx_virt = hyp_phys_to_virt(rx);
+ ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unpin_tx;
+ }
+
+ host_buffers.tx = tx_virt;
+ host_buffers.rx = rx_virt;
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ ffa_to_smccc_res(res, ret);
+ return;
+
+err_unpin_tx:
+ hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
+err_unshare_rx:
+ __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
+err_unshare_tx:
+ __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
+err_unmap:
+ ffa_unmap_hyp_buffers();
+ goto out_unlock;
+}
+
+static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, id, ctxt, 1);
+ int ret = 0;
+
+ if (id != HOST_FFA_ID) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
+ WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
+ host_buffers.tx = NULL;
+
+ hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
+ WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
+ host_buffers.rx = NULL;
+
+ ffa_unmap_hyp_buffers();
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ ffa_to_smccc_res(res, ret);
+}
+
+static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 i;
+
+ for (i = 0; i < nranges; ++i) {
+ struct ffa_mem_region_addr_range *range = &ranges[i];
+ u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
+ u64 pfn = hyp_phys_to_pfn(range->address);
+
+ if (!PAGE_ALIGNED(sz))
+ break;
+
+ if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
+ break;
+ }
+
+ return i;
+}
+
+static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 i;
+
+ for (i = 0; i < nranges; ++i) {
+ struct ffa_mem_region_addr_range *range = &ranges[i];
+ u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
+ u64 pfn = hyp_phys_to_pfn(range->address);
+
+ if (!PAGE_ALIGNED(sz))
+ break;
+
+ if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
+ break;
+ }
+
+ return i;
+}
+
+static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 nshared = __ffa_host_share_ranges(ranges, nranges);
+ int ret = 0;
+
+ if (nshared != nranges) {
+ WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
+ ret = FFA_RET_DENIED;
+ }
+
+ return ret;
+}
+
+static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
+ int ret = 0;
+
+ if (nunshared != nranges) {
+ WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
+ ret = FFA_RET_DENIED;
+ }
+
+ return ret;
+}
+
+static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, handle_lo, ctxt, 1);
+ DECLARE_REG(u32, handle_hi, ctxt, 2);
+ DECLARE_REG(u32, fraglen, ctxt, 3);
+ DECLARE_REG(u32, endpoint_id, ctxt, 4);
+ struct ffa_mem_region_addr_range *buf;
+ int ret = FFA_RET_INVALID_PARAMETERS;
+ u32 nr_ranges;
+
+ if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
+ goto out;
+
+ if (fraglen % sizeof(*buf))
+ goto out;
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx)
+ goto out_unlock;
+
+ buf = hyp_buffers.tx;
+ memcpy(buf, host_buffers.tx, fraglen);
+ nr_ranges = fraglen / sizeof(*buf);
+
+ ret = ffa_host_share_ranges(buf, nr_ranges);
+ if (ret) {
+ /*
+ * We're effectively aborting the transaction, so we need
+ * to restore the global state back to what it was prior to
+ * transmission of the first fragment.
+ */
+ ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
+ WARN_ON(res->a0 != FFA_SUCCESS);
+ goto out_unlock;
+ }
+
+ ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
+ if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
+ WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+
+ /*
+ * If for any reason this did not succeed, we're in trouble as we have
+ * now lost the content of the previous fragments and we can't rollback
+ * the host stage-2 changes. The pages previously marked as shared will
+ * remain stuck in that state forever, hence preventing the host from
+ * sharing/donating them again and may possibly lead to subsequent
+ * failures, but this will not compromise confidentiality.
+ */
+ return;
+}
+
+static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+ struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, len, ctxt, 1);
+ DECLARE_REG(u32, fraglen, ctxt, 2);
+ DECLARE_REG(u64, addr_mbz, ctxt, 3);
+ DECLARE_REG(u32, npages_mbz, ctxt, 4);
+ struct ffa_composite_mem_region *reg;
+ struct ffa_mem_region *buf;
+ u32 offset, nr_ranges;
+ int ret = 0;
+
+ BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
+ func_id != FFA_FN64_MEM_LEND);
+
+ if (addr_mbz || npages_mbz || fraglen > len ||
+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ if (fraglen < sizeof(struct ffa_mem_region) +
+ sizeof(struct ffa_mem_region_attributes)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ buf = hyp_buffers.tx;
+ memcpy(buf, host_buffers.tx, fraglen);
+
+ offset = buf->ep_mem_access[0].composite_off;
+ if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ reg = (void *)buf + offset;
+ nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
+ if (nr_ranges % sizeof(reg->constituents[0])) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ nr_ranges /= sizeof(reg->constituents[0]);
+ ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
+ if (ret)
+ goto out_unlock;
+
+ ffa_mem_xfer(res, func_id, len, fraglen);
+ if (fraglen != len) {
+ if (res->a0 != FFA_MEM_FRAG_RX)
+ goto err_unshare;
+
+ if (res->a3 != fraglen)
+ goto err_unshare;
+ } else if (res->a0 != FFA_SUCCESS) {
+ goto err_unshare;
+ }
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+ return;
+
+err_unshare:
+ WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
+ goto out_unlock;
+}
+
+static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, handle_lo, ctxt, 1);
+ DECLARE_REG(u32, handle_hi, ctxt, 2);
+ DECLARE_REG(u32, flags, ctxt, 3);
+ struct ffa_composite_mem_region *reg;
+ u32 offset, len, fraglen, fragoff;
+ struct ffa_mem_region *buf;
+ int ret = 0;
+ u64 handle;
+
+ handle = PACK_HANDLE(handle_lo, handle_hi);
+
+ hyp_spin_lock(&host_buffers.lock);
+
+ buf = hyp_buffers.tx;
+ *buf = (struct ffa_mem_region) {
+ .sender_id = HOST_FFA_ID,
+ .handle = handle,
+ };
+
+ ffa_retrieve_req(res, sizeof(*buf));
+ buf = hyp_buffers.rx;
+ if (res->a0 != FFA_MEM_RETRIEVE_RESP)
+ goto out_unlock;
+
+ len = res->a1;
+ fraglen = res->a2;
+
+ offset = buf->ep_mem_access[0].composite_off;
+ /*
+ * We can trust the SPMD to get this right, but let's at least
+ * check that we end up with something that doesn't look _completely_
+ * bogus.
+ */
+ if (WARN_ON(offset > len ||
+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
+ ret = FFA_RET_ABORTED;
+ goto out_unlock;
+ }
+
+ if (len > ffa_desc_buf.len) {
+ ret = FFA_RET_NO_MEMORY;
+ goto out_unlock;
+ }
+
+ buf = ffa_desc_buf.buf;
+ memcpy(buf, hyp_buffers.rx, fraglen);
+
+ for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
+ ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
+ if (res->a0 != FFA_MEM_FRAG_TX) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ fraglen = res->a3;
+ memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
+ }
+
+ ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
+ if (res->a0 != FFA_SUCCESS)
+ goto out_unlock;
+
+ reg = (void *)buf + offset;
+ /* If the SPMD was happy, then we should be too. */
+ WARN_ON(ffa_host_unshare_ranges(reg->constituents,
+ reg->addr_range_cnt));
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+}
+
+/*
+ * Is a given FFA function supported, either by forwarding on directly
+ * or by handling at EL2?
+ */
+static bool ffa_call_supported(u64 func_id)
+{
+ switch (func_id) {
+ /* Unsupported memory management calls */
+ case FFA_FN64_MEM_RETRIEVE_REQ:
+ case FFA_MEM_RETRIEVE_RESP:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MEM_OP_PAUSE:
+ case FFA_MEM_OP_RESUME:
+ case FFA_MEM_FRAG_RX:
+ case FFA_FN64_MEM_DONATE:
+ /* Indirect message passing via RX/TX buffers */
+ case FFA_MSG_SEND:
+ case FFA_MSG_POLL:
+ case FFA_MSG_WAIT:
+ /* 32-bit variants of 64-bit calls */
+ case FFA_MSG_SEND_DIRECT_REQ:
+ case FFA_MSG_SEND_DIRECT_RESP:
+ case FFA_RXTX_MAP:
+ case FFA_MEM_DONATE:
+ case FFA_MEM_RETRIEVE_REQ:
+ return false;
+ }
+
+ return true;
+}
+
+static bool do_ffa_features(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, id, ctxt, 1);
+ u64 prop = 0;
+ int ret = 0;
+
+ if (!ffa_call_supported(id)) {
+ ret = FFA_RET_NOT_SUPPORTED;
+ goto out_handled;
+ }
+
+ switch (id) {
+ case FFA_MEM_SHARE:
+ case FFA_FN64_MEM_SHARE:
+ case FFA_MEM_LEND:
+ case FFA_FN64_MEM_LEND:
+ ret = FFA_RET_SUCCESS;
+ prop = 0; /* No support for dynamic buffers */
+ goto out_handled;
+ default:
+ return false;
+ }
+
+out_handled:
+ ffa_to_smccc_res_prop(res, ret, prop);
+ return true;
+}
+
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
+ struct arm_smccc_res res;
+
+ /*
+ * There's no way we can tell what a non-standard SMC call might
+ * be up to. Ideally, we would terminate these here and return
+ * an error to the host, but sadly devices make use of custom
+ * firmware calls for things like power management, debugging,
+ * RNG access and crash reporting.
+ *
+ * Given that the architecture requires us to trust EL3 anyway,
+ * we forward unrecognised calls on under the assumption that
+ * the firmware doesn't expose a mechanism to access arbitrary
+ * non-secure memory. Short of a per-device table of SMCs, this
+ * is the best we can do.
+ */
+ if (!is_ffa_call(func_id))
+ return false;
+
+ switch (func_id) {
+ case FFA_FEATURES:
+ if (!do_ffa_features(&res, host_ctxt))
+ return false;
+ goto out_handled;
+ /* Memory management */
+ case FFA_FN64_RXTX_MAP:
+ do_ffa_rxtx_map(&res, host_ctxt);
+ goto out_handled;
+ case FFA_RXTX_UNMAP:
+ do_ffa_rxtx_unmap(&res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_SHARE:
+ case FFA_FN64_MEM_SHARE:
+ do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_RECLAIM:
+ do_ffa_mem_reclaim(&res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_LEND:
+ case FFA_FN64_MEM_LEND:
+ do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_FRAG_TX:
+ do_ffa_mem_frag_tx(&res, host_ctxt);
+ goto out_handled;
+ }
+
+ if (ffa_call_supported(func_id))
+ return false; /* Pass through */
+
+ ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
+out_handled:
+ ffa_set_retval(host_ctxt, &res);
+ return true;
+}
+
+int hyp_ffa_init(void *pages)
+{
+ struct arm_smccc_res res;
+ size_t min_rxtx_sz;
+ void *tx, *rx;
+
+ if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == FFA_RET_NOT_SUPPORTED)
+ return 0;
+
+ if (res.a0 != FFA_VERSION_1_0)
+ return -EOPNOTSUPP;
+
+ arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != FFA_SUCCESS)
+ return -EOPNOTSUPP;
+
+ if (res.a2 != HOST_FFA_ID)
+ return -EINVAL;
+
+ arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != FFA_SUCCESS)
+ return -EOPNOTSUPP;
+
+ switch (res.a2) {
+ case FFA_FEAT_RXTX_MIN_SZ_4K:
+ min_rxtx_sz = SZ_4K;
+ break;
+ case FFA_FEAT_RXTX_MIN_SZ_16K:
+ min_rxtx_sz = SZ_16K;
+ break;
+ case FFA_FEAT_RXTX_MIN_SZ_64K:
+ min_rxtx_sz = SZ_64K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (min_rxtx_sz > PAGE_SIZE)
+ return -EOPNOTSUPP;
+
+ tx = pages;
+ pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
+ rx = pages;
+ pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
+
+ ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
+ .buf = pages,
+ .len = PAGE_SIZE *
+ (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
+ };
+
+ hyp_buffers = (struct kvm_ffa_buffers) {
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+ .tx = tx,
+ .rx = rx,
+ };
+
+ host_buffers = (struct kvm_ffa_buffers) {
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+ };
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index b6c0188c4b35..c87c63133e10 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -10,6 +10,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
.text
@@ -37,10 +38,43 @@ SYM_FUNC_START(__host_exit)
/* Save the host context pointer in x29 across the function call */
mov x29, x0
+
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_save
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+ /* Save kernel ptrauth keys. */
+ add x18, x29, #CPU_APIAKEYLO_EL1
+ ptrauth_save_state x18, x19, x20
+
+ /* Use hyp keys. */
+ adr_this_cpu x18, kvm_hyp_ctxt, x19
+ add x18, x18, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state x18, x19, x20
+ isb
+alternative_else_nop_endif
+__skip_pauth_save:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
bl handle_trap
- /* Restore host regs x0-x17 */
__host_enter_restore_full:
+ /* Restore kernel keys. */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_restore
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+ add x18, x29, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state x18, x19, x20
+alternative_else_nop_endif
+__skip_pauth_restore:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+ /* Restore host regs x0-x17 */
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index a6d67c2bb5ae..90fade1b032e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -83,9 +83,6 @@ SYM_CODE_END(__kvm_hyp_init)
* x0: struct kvm_nvhe_init_params PA
*/
SYM_CODE_START_LOCAL(___kvm_hyp_init)
- ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
- msr tpidr_el2, x1
-
ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA]
mov sp, x1
@@ -95,6 +92,22 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
ldr x1, [x0, #NVHE_INIT_HCR_EL2]
msr hcr_el2, x1
+ mov x2, #HCR_E2H
+ and x2, x1, x2
+ cbz x2, 1f
+
+ // hVHE: Replay the EL2 setup to account for the E2H bit
+ // TPIDR_EL2 is used to preserve x0 across the macro maze...
+ isb
+ msr tpidr_el2, x0
+ init_el2_state
+ finalise_el2_state
+ mrs x0, tpidr_el2
+
+1:
+ ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
+ msr tpidr_el2, x1
+
ldr x1, [x0, #NVHE_INIT_VTTBR]
msr vttbr_el2, x1
@@ -128,6 +141,13 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
orr x0, x0, x1
alternative_else_nop_endif
+
+#ifdef CONFIG_ARM64_BTI_KERNEL
+alternative_if ARM64_BTI
+ orr x0, x0, #SCTLR_EL2_BT
+alternative_else_nop_endif
+#endif /* CONFIG_ARM64_BTI_KERNEL */
+
msr sctlr_el2, x0
isb
@@ -184,6 +204,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Initialize EL2 CPU state to sane values. */
init_el2_state // Clobbers x0..x2
finalise_el2_state
+ __init_el2_nvhe_prepare_eret
/* Enable MMU, set vectors and stack. */
mov x0, x28
@@ -196,6 +217,11 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
SYM_CODE_END(__kvm_hyp_init_cpu)
SYM_CODE_START(__kvm_handle_stub_hvc)
+ /*
+ * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
+ * we need bti j at beginning.
+ */
+ bti j
cmp x0, #HVC_SOFT_RESTART
b.ne 1f
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 728e01d4536b..a169c619db60 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -13,6 +13,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
@@ -125,6 +126,15 @@ static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
}
+static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
+ DECLARE_REG(int, level, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
+}
+
static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -315,6 +325,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
@@ -374,6 +385,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
handled = kvm_host_psci_handler(host_ctxt);
if (!handled)
+ handled = kvm_host_ffa_handler(host_ctxt);
+ if (!handled)
default_host_smc_handler(host_ctxt);
/* SMC was trapped, move ELR past the current PC. */
@@ -392,7 +405,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+ if (has_hvhe())
+ sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
+ CPACR_EL1_ZEN_EL0EN));
+ else
+ sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index a8813b212996..9d703441278b 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -91,9 +91,9 @@ static void host_s2_put_page(void *addr)
hyp_put_page(&host_s2_pool, addr);
}
-static void host_s2_free_removed_table(void *addr, u32 level)
+static void host_s2_free_unlinked_table(void *addr, u32 level)
{
- kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
+ kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
}
static int prepare_s2_pool(void *pgt_pool_base)
@@ -110,7 +110,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
.zalloc_page = host_s2_zalloc_page,
- .free_removed_table = host_s2_free_removed_table,
+ .free_unlinked_table = host_s2_free_unlinked_table,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
@@ -842,6 +842,13 @@ static int check_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
break;
+ case PKVM_ID_FFA:
+ /*
+ * We only check the host; the secure side will check the other
+ * end when we forward the FFA call.
+ */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -870,6 +877,13 @@ static int __do_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
break;
+ case PKVM_ID_FFA:
+ /*
+ * We're not responsible for any secure page-tables, so there's
+ * nothing to do here.
+ */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -918,6 +932,10 @@ static int check_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
break;
+ case PKVM_ID_FFA:
+ /* See check_share() */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -946,6 +964,10 @@ static int __do_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
break;
+ case PKVM_ID_FFA:
+ /* See __do_share() */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -1235,3 +1257,49 @@ void hyp_unpin_shared_mem(void *from, void *to)
hyp_unlock_component();
host_unlock_component();
}
+
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = hyp_pfn_to_phys(pfn),
+ },
+ .completer = {
+ .id = PKVM_ID_FFA,
+ },
+ },
+ };
+
+ host_lock_component();
+ ret = do_share(&share);
+ host_unlock_component();
+
+ return ret;
+}
+
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = hyp_pfn_to_phys(pfn),
+ },
+ .completer = {
+ .id = PKVM_ID_FFA,
+ },
+ },
+ };
+
+ host_lock_component();
+ ret = do_unshare(&share);
+ host_unlock_component();
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index a06ece14a6d8..8033ef353a5d 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -27,6 +27,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 hcr_set = HCR_RW;
u64 hcr_clear = 0;
u64 cptr_set = 0;
+ u64 cptr_clear = 0;
/* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -43,6 +44,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW));
+ if (has_hvhe())
+ hcr_set |= HCR_E2H;
+
/* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_EL1_RAS_V1P1) {
@@ -57,12 +61,17 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
}
/* Trap SVE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
- cptr_set |= CPTR_EL2_TZ;
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
+ if (has_hvhe())
+ cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ else
+ cptr_set |= CPTR_EL2_TZ;
+ }
vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
+ vcpu->arch.cptr_el2 &= ~cptr_clear;
}
/*
@@ -120,8 +129,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
- cptr_set |= CPTR_EL2_TTA;
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
+ if (has_hvhe())
+ cptr_set |= CPACR_EL1_TTA;
+ else
+ cptr_set |= CPTR_EL2_TTA;
+ }
vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
@@ -176,8 +189,10 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
/* Clear res0 and set res1 bits to trap potential new features. */
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ if (!has_hvhe()) {
+ vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+ vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ }
}
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 110f04627785..bb98630dfeaf 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -11,6 +11,7 @@
#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h>
+#include <nvhe/ffa.h>
#include <nvhe/fixed_config.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
@@ -28,6 +29,7 @@ static void *vmemmap_base;
static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
+static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@@ -57,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!host_s2_pgt_base)
return -ENOMEM;
+ nr_pages = hyp_ffa_proxy_pages();
+ ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
+ if (!ffa_proxy_pages)
+ return -ENOMEM;
+
return 0;
}
@@ -314,6 +321,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
+ ret = hyp_ffa_init(ffa_proxy_pages);
+ if (ret)
+ goto out;
+
pkvm_hyp_vm_table_init(vm_table_base);
out:
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 77791495c995..0a6271052def 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -44,13 +44,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
val = vcpu->arch.cptr_el2;
- val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
+ val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
+ val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
+ if (cpus_have_final_cap(ARM64_SME)) {
+ if (has_hvhe())
+ val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+ else
+ val |= CPTR_EL2_TSM;
+ }
+
if (!guest_owns_fp_regs(vcpu)) {
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+ if (has_hvhe())
+ val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+ CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ else
+ val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+
__activate_traps_fpsimd32(vcpu);
}
- if (cpus_have_final_cap(ARM64_SME))
- val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
@@ -73,7 +84,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char __kvm_hyp_host_vector[];
- u64 cptr;
___deactivate_traps(vcpu);
@@ -98,13 +108,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
- cptr = CPTR_EL2_DEFAULT;
- if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
- cptr |= CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME))
- cptr &= ~CPTR_EL2_TSM;
-
- write_sysreg(cptr, cptr_el2);
+ kvm_reset_cptr_el2(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
index b185ac0dbd47..3aaab20ae5b4 100644
--- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
@@ -17,21 +17,24 @@ void __kvm_timer_set_cntvoff(u64 cntvoff)
}
/*
- * Should only be called on non-VHE systems.
+ * Should only be called on non-VHE or hVHE setups.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/
void __timer_disable_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 val, shift = 0;
+
+ if (has_hvhe())
+ shift = 10;
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
- val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift;
write_sysreg(val, cnthctl_el2);
}
/*
- * Should only be called on non-VHE systems.
+ * Should only be called on non-VHE or hVHE setups.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/
void __timer_enable_traps(struct kvm_vcpu *vcpu)
@@ -50,5 +53,10 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu)
else
clr |= CNTHCTL_EL1PCTEN;
+ if (has_hvhe()) {
+ clr <<= 10;
+ set <<= 10;
+ }
+
sysreg_clear_set(cnthctl_el2, clr, set);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 978179133f4b..b9991bbd8e3f 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -130,6 +130,58 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa, int level)
+{
+ struct tlb_inv_context cxt;
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt, true);
+
+ /*
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ ipa >>= 12;
+ __tlbi_level(ipas2e1, ipa, level);
+
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb(nsh);
+ __tlbi(vmalle1);
+ dsb(nsh);
+ isb();
+
+ /*
+ * If the host is running at EL1 and we have a VPIPT I-cache,
+ * then we must perform I-cache maintenance at EL2 in order for
+ * it to have an effect on the guest. Since the guest cannot hit
+ * I-cache lines allocated with a different VMID, we don't need
+ * to worry about junk out of guest reset (we nuke the I-cache on
+ * VMID rollover), but we do need to be careful when remapping
+ * executable pages for the same guest. This can happen when KSM
+ * takes a CoW fault on an executable page, copies the page into
+ * a page that was previously mapped in the guest and then needs
+ * to invalidate the guest view of the I-cache for that page
+ * from EL1. To solve this, we invalidate the entire I-cache when
+ * unmapping a page from a guest if we have a VPIPT I-cache but
+ * the host is running at EL1. As above, we could do better if
+ * we had the VA.
+ *
+ * The moral of this story is: if you have a VPIPT I-cache, then
+ * you should be running with VHE enabled.
+ */
+ if (icache_is_vpipt())
+ icache_inval_all_pou();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 95dae02ccc2e..aa740a974e02 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -21,8 +21,10 @@
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
-#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
-#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
+ ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
+ ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
@@ -34,7 +36,7 @@
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
-#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
+#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
@@ -42,6 +44,8 @@
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
+#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
+
#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
@@ -63,6 +67,16 @@ struct kvm_pgtable_walk_data {
const u64 end;
};
+static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
+{
+ return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
+}
+
+static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
+{
+ return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
+}
+
static bool kvm_phys_is_valid(u64 phys)
{
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
@@ -386,6 +400,9 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
if (device)
return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
+ attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
} else {
attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
}
@@ -623,10 +640,18 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable the Hardware Access Flag management, unconditionally
- * on all CPUs. The features is RES0 on CPUs without the support
- * and must be ignored by the CPUs.
+ * on all CPUs. In systems that have asymmetric support for the feature
+ * this allows KVM to leverage hardware support on the subset of cores
+ * that implement the feature.
+ *
+ * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
+ * hardware) on implementations that do not advertise support for the
+ * feature. As such, setting HA unconditionally is safe, unless you
+ * happen to be running on a design that has unadvertised support for
+ * HAFDBS. Here be dragons.
*/
- vtcr |= VTCR_EL2_HA;
+ if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ vtcr |= VTCR_EL2_HA;
#endif /* CONFIG_ARM64_HW_AFDBM */
/* Set the vmid bits */
@@ -755,14 +780,17 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
return false;
- /*
- * Perform the appropriate TLB invalidation based on the evicted pte
- * value (if any).
- */
- if (kvm_pte_table(ctx->old, ctx->level))
- kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
- else if (kvm_pte_valid(ctx->old))
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+ if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
+ /*
+ * Perform the appropriate TLB invalidation based on the
+ * evicted pte value (if any).
+ */
+ if (kvm_pte_table(ctx->old, ctx->level))
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ else if (kvm_pte_valid(ctx->old))
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+ ctx->addr, ctx->level);
+ }
if (stage2_pte_is_counted(ctx->old))
mm_ops->put_page(ctx->ptep);
@@ -869,11 +897,13 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
return -EAGAIN;
/* Perform CMOs before installation of the guest stage-2 PTE */
- if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
+ if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
+ stage2_pte_cacheable(pgt, new))
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
- granule);
+ granule);
- if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
+ if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
+ stage2_pte_executable(new))
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
stage2_make_pte(ctx, new);
@@ -895,7 +925,7 @@ static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
if (ret)
return ret;
- mm_ops->free_removed_table(childp, ctx->level);
+ mm_ops->free_unlinked_table(childp, ctx->level);
return 0;
}
@@ -940,7 +970,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
* The TABLE_PRE callback runs for table entries on the way down, looking
* for table entries which we could conceivably replace with a block entry
* for this mapping. If it finds one it replaces the entry and calls
- * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
+ * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
*
* Otherwise, the LEAF callback performs the mapping at the existing leaves
* instead.
@@ -1209,7 +1239,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
if (!ret)
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
return ret;
}
@@ -1242,6 +1272,162 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
+kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
+ u64 phys, u32 level,
+ enum kvm_pgtable_prot prot,
+ void *mc, bool force_pte)
+{
+ struct stage2_map_data map_data = {
+ .phys = phys,
+ .mmu = pgt->mmu,
+ .memcache = mc,
+ .force_pte = force_pte,
+ };
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_map_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF |
+ KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
+ KVM_PGTABLE_WALK_SKIP_CMO,
+ .arg = &map_data,
+ };
+ /*
+ * The input address (.addr) is irrelevant for walking an
+ * unlinked table. Construct an ambiguous IA range to map
+ * kvm_granule_size(level) worth of memory.
+ */
+ struct kvm_pgtable_walk_data data = {
+ .walker = &walker,
+ .addr = 0,
+ .end = kvm_granule_size(level),
+ };
+ struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
+ kvm_pte_t *pgtable;
+ int ret;
+
+ if (!IS_ALIGNED(phys, kvm_granule_size(level)))
+ return ERR_PTR(-EINVAL);
+
+ ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pgtable = mm_ops->zalloc_page(mc);
+ if (!pgtable)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
+ level + 1);
+ if (ret) {
+ kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
+ mm_ops->put_page(pgtable);
+ return ERR_PTR(ret);
+ }
+
+ return pgtable;
+}
+
+/*
+ * Get the number of page-tables needed to replace a block with a
+ * fully populated tree up to the PTE entries. Note that @level is
+ * interpreted as in "level @level entry".
+ */
+static int stage2_block_get_nr_page_tables(u32 level)
+{
+ switch (level) {
+ case 1:
+ return PTRS_PER_PTE + 1;
+ case 2:
+ return 1;
+ case 3:
+ return 0;
+ default:
+ WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
+ level >= KVM_PGTABLE_MAX_LEVELS);
+ return -EINVAL;
+ };
+}
+
+static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ struct kvm_mmu_memory_cache *mc = ctx->arg;
+ struct kvm_s2_mmu *mmu;
+ kvm_pte_t pte = ctx->old, new, *childp;
+ enum kvm_pgtable_prot prot;
+ u32 level = ctx->level;
+ bool force_pte;
+ int nr_pages;
+ u64 phys;
+
+ /* No huge-pages exist at the last level */
+ if (level == KVM_PGTABLE_MAX_LEVELS - 1)
+ return 0;
+
+ /* We only split valid block mappings */
+ if (!kvm_pte_valid(pte))
+ return 0;
+
+ nr_pages = stage2_block_get_nr_page_tables(level);
+ if (nr_pages < 0)
+ return nr_pages;
+
+ if (mc->nobjs >= nr_pages) {
+ /* Build a tree mapped down to the PTE granularity. */
+ force_pte = true;
+ } else {
+ /*
+ * Don't force PTEs, so create_unlinked() below does
+ * not populate the tree up to the PTE level. The
+ * consequence is that the call will require a single
+ * page of level 2 entries at level 1, or a single
+ * page of PTEs at level 2. If we are at level 1, the
+ * PTEs will be created recursively.
+ */
+ force_pte = false;
+ nr_pages = 1;
+ }
+
+ if (mc->nobjs < nr_pages)
+ return -ENOMEM;
+
+ mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
+ phys = kvm_pte_to_phys(pte);
+ prot = kvm_pgtable_stage2_pte_prot(pte);
+
+ childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
+ level, prot, mc, force_pte);
+ if (IS_ERR(childp))
+ return PTR_ERR(childp);
+
+ if (!stage2_try_break_pte(ctx, mmu)) {
+ kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
+ mm_ops->put_page(childp);
+ return -EAGAIN;
+ }
+
+ /*
+ * Note, the contents of the page table are guaranteed to be made
+ * visible before the new PTE is assigned because stage2_make_pte()
+ * writes the PTE using smp_store_release().
+ */
+ new = kvm_init_table_pte(childp, mm_ops);
+ stage2_make_pte(ctx, new);
+ dsb(ishst);
+ return 0;
+}
+
+int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct kvm_mmu_memory_cache *mc)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_split_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = mc,
+ };
+
+ return kvm_pgtable_walk(pgt, addr, size, &walker);
+}
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops,
@@ -1311,7 +1497,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
pgt->pgd = NULL;
}
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
{
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
struct kvm_pgtable_walker walker = {
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index b37e7c96efea..6537f58b1a8c 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -84,7 +84,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
- write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
+ kvm_reset_cptr_el2(vcpu);
if (!arm64_kernel_unmapped_at_el0())
host_vectors = __this_cpu_read(this_cpu_vector);
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 24cef9b87f9e..e69da550cdc5 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -111,6 +111,38 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa, int level)
+{
+ struct tlb_inv_context cxt;
+
+ dsb(nshst);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt);
+
+ /*
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ ipa >>= 12;
+ __tlbi_level(ipas2e1, ipa, level);
+
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb(nsh);
+ __tlbi(vmalle1);
+ dsb(nsh);
+ isb();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 3b9d4d24c361..6db9ef288ec3 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -31,14 +31,21 @@ static phys_addr_t __ro_after_init hyp_idmap_vector;
static unsigned long __ro_after_init io_map_base;
-static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
+static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
+ phys_addr_t size)
{
- phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
return (boundary - 1 < end - 1) ? boundary : end;
}
+static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+ phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
+
+ return __stage2_range_addr_end(addr, end, size);
+}
+
/*
* Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
* we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
@@ -75,6 +82,79 @@ static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr,
#define stage2_apply_range_resched(mmu, addr, end, fn) \
stage2_apply_range(mmu, addr, end, fn, true)
+/*
+ * Get the maximum number of page-tables pages needed to split a range
+ * of blocks into PAGE_SIZE PTEs. It assumes the range is already
+ * mapped at level 2, or at level 1 if allowed.
+ */
+static int kvm_mmu_split_nr_page_tables(u64 range)
+{
+ int n = 0;
+
+ if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2)
+ n += DIV_ROUND_UP(range, PUD_SIZE);
+ n += DIV_ROUND_UP(range, PMD_SIZE);
+ return n;
+}
+
+static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
+{
+ struct kvm_mmu_memory_cache *cache;
+ u64 chunk_size, min;
+
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
+ return true;
+
+ chunk_size = kvm->arch.mmu.split_page_chunk_size;
+ min = kvm_mmu_split_nr_page_tables(chunk_size);
+ cache = &kvm->arch.mmu.split_page_cache;
+ return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
+}
+
+static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
+ phys_addr_t end)
+{
+ struct kvm_mmu_memory_cache *cache;
+ struct kvm_pgtable *pgt;
+ int ret, cache_capacity;
+ u64 next, chunk_size;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ chunk_size = kvm->arch.mmu.split_page_chunk_size;
+ cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size);
+
+ if (chunk_size == 0)
+ return 0;
+
+ cache = &kvm->arch.mmu.split_page_cache;
+
+ do {
+ if (need_split_memcache_topup_or_resched(kvm)) {
+ write_unlock(&kvm->mmu_lock);
+ cond_resched();
+ /* Eager page splitting is best-effort. */
+ ret = __kvm_mmu_topup_memory_cache(cache,
+ cache_capacity,
+ cache_capacity);
+ write_lock(&kvm->mmu_lock);
+ if (ret)
+ break;
+ }
+
+ pgt = kvm->arch.mmu.pgt;
+ if (!pgt)
+ return -EINVAL;
+
+ next = __stage2_range_addr_end(addr, end, chunk_size);
+ ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
+ if (ret)
+ break;
+ } while (addr = next, addr != end);
+
+ return ret;
+}
+
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@@ -131,21 +211,21 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
-static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
+static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
{
struct page *page = container_of(head, struct page, rcu_head);
void *pgtable = page_to_virt(page);
u32 level = page_private(page);
- kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
+ kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
}
-static void stage2_free_removed_table(void *addr, u32 level)
+static void stage2_free_unlinked_table(void *addr, u32 level)
{
struct page *page = virt_to_page(addr);
set_page_private(page, (unsigned long)level);
- call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
+ call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
}
static void kvm_host_get_page(void *addr)
@@ -701,7 +781,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
.zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
.free_pages_exact = kvm_s2_free_pages_exact,
- .free_removed_table = stage2_free_removed_table,
+ .free_unlinked_table = stage2_free_unlinked_table,
.get_page = kvm_host_get_page,
.put_page = kvm_s2_put_page,
.page_count = kvm_host_page_count,
@@ -775,6 +855,10 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
+ /* The eager page splitting is disabled by default */
+ mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+ mmu->split_page_cache.gfp_zero = __GFP_ZERO;
+
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
return 0;
@@ -786,6 +870,12 @@ out_free_pgtable:
return err;
}
+void kvm_uninit_stage2_mmu(struct kvm *kvm)
+{
+ kvm_free_stage2_pgd(&kvm->arch.mmu);
+ kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
+}
+
static void stage2_unmap_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
@@ -989,39 +1079,66 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
}
/**
- * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
+ * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
+ * pages for memory slot
* @kvm: The KVM pointer
- * @slot: The memory slot associated with mask
- * @gfn_offset: The gfn offset in memory slot
- * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
- * slot to be write protected
+ * @slot: The memory slot to split
*
- * Walks bits set in mask write protects the associated pte's. Caller must
- * acquire kvm_mmu_lock.
+ * Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
*/
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
+static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
{
- phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
- phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
- phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ phys_addr_t start, end;
- stage2_wp_range(&kvm->arch.mmu, start, end);
+ lockdep_assert_held(&kvm->slots_lock);
+
+ slots = kvm_memslots(kvm);
+ memslot = id_to_memslot(slots, slot);
+
+ start = memslot->base_gfn << PAGE_SHIFT;
+ end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+ write_lock(&kvm->mmu_lock);
+ kvm_mmu_split_huge_pages(kvm, start, end);
+ write_unlock(&kvm->mmu_lock);
}
/*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * dirty pages.
+ * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
+ * @kvm: The KVM pointer
+ * @slot: The memory slot associated with mask
+ * @gfn_offset: The gfn offset in memory slot
+ * @mask: The mask of pages at offset 'gfn_offset' in this memory
+ * slot to enable dirty logging on
*
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
+ * Writes protect selected pages to enable dirty logging, and then
+ * splits them to PAGE_SIZE. Caller must acquire kvm->mmu_lock.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ stage2_wp_range(&kvm->arch.mmu, start, end);
+
+ /*
+ * Eager-splitting is done when manual-protect is set. We
+ * also check for initially-all-set because we can avoid
+ * eager-splitting if initially-all-set is false.
+ * Initially-all-set equal false implies that huge-pages were
+ * already split when enabling dirty logging: no need to do it
+ * again.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ kvm_mmu_split_huge_pages(kvm, start, end);
}
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
@@ -1790,20 +1907,42 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
+ bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES;
+
/*
* At this point memslot has been committed and there is an
* allocated dirty_bitmap[], dirty pages will be tracked while the
* memory slot is write protected.
*/
- if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (log_dirty_pages) {
+
+ if (change == KVM_MR_DELETE)
+ return;
+
/*
- * If we're with initial-all-set, we don't need to write
- * protect any pages because they're all reported as dirty.
- * Huge pages and normal pages will be write protect gradually.
+ * Huge and normal pages are write-protected and split
+ * on either of these two cases:
+ *
+ * 1. with initial-all-set: gradually with CLEAR ioctls,
*/
- if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
- kvm_mmu_wp_memory_region(kvm, new->id);
- }
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
+ /*
+ * or
+ * 2. without initial-all-set: all in one shot when
+ * enabling dirty logging.
+ */
+ kvm_mmu_wp_memory_region(kvm, new->id);
+ kvm_mmu_split_memory_region(kvm, new->id);
+ } else {
+ /*
+ * Free any leftovers from the eager page splitting cache. Do
+ * this when deleting, moving, disabling dirty logging, or
+ * creating the memslot (a nop). Doing it for deletes makes
+ * sure we don't leak memory, and there's no need to keep the
+ * cache around for any of the other cases.
+ */
+ kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
}
}
@@ -1877,7 +2016,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
- kvm_free_stage2_pgd(&kvm->arch.mmu);
+ kvm_uninit_stage2_mmu(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 6e9ece1ebbe7..994a494703c3 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -78,6 +78,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
+ hyp_mem_pages += hyp_ffa_proxy_pages();
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b5dee8e57e77..bc8556b6f459 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -187,57 +187,6 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
}
/**
- * kvm_set_vm_width() - set the register width for the guest
- * @vcpu: Pointer to the vcpu being configured
- *
- * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
- * in the VM flags based on the vcpu's requested register width, the HW
- * capabilities and other options (such as MTE).
- * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
- * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
- *
- * Return: 0 on success, negative error code on failure.
- */
-static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- bool is32bit;
-
- is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
-
- lockdep_assert_held(&kvm->arch.config_lock);
-
- if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
- /*
- * The guest's register width is already configured.
- * Make sure that the vcpu is consistent with it.
- */
- if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
- return 0;
-
- return -EINVAL;
- }
-
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
- return -EINVAL;
-
- /* MTE is incompatible with AArch32 */
- if (kvm_has_mte(kvm) && is32bit)
- return -EINVAL;
-
- /* NV is incompatible with AArch32 */
- if (vcpu_has_nv(vcpu) && is32bit)
- return -EINVAL;
-
- if (is32bit)
- set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
-
- set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
-
- return 0;
-}
-
-/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
@@ -262,13 +211,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
bool loaded;
u32 pstate;
- mutex_lock(&vcpu->kvm->arch.config_lock);
- ret = kvm_set_vm_width(vcpu);
- mutex_unlock(&vcpu->kvm->arch.config_lock);
-
- if (ret)
- return ret;
-
spin_lock(&vcpu->arch.mp_state_lock);
reset_state = vcpu->arch.reset_state;
vcpu->arch.reset_state.reset = false;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 5b5d5e5449dc..bd3431823ec5 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -42,6 +42,8 @@
*/
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 val);
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
@@ -553,10 +555,11 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static void reset_bvr(struct kvm_vcpu *vcpu,
+static u64 reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
+ return rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
@@ -589,10 +592,11 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static void reset_bcr(struct kvm_vcpu *vcpu,
+static u64 reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
+ return rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
@@ -626,10 +630,11 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static void reset_wvr(struct kvm_vcpu *vcpu,
+static u64 reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
+ return rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
@@ -662,25 +667,28 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
-static void reset_wcr(struct kvm_vcpu *vcpu,
+static u64 reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ return rd->val;
}
-static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair = read_sysreg(amair_el1);
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
+ return amair;
}
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 actlr = read_sysreg(actlr_el1);
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
+ return actlr;
}
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 mpidr;
@@ -694,7 +702,10 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
- vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
+ mpidr |= (1ULL << 31);
+ vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
+
+ return mpidr;
}
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
@@ -706,13 +717,13 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
-static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
/* No PMU available, any PMU reg may UNDEF... */
if (!kvm_arm_support_pmu_v3())
- return;
+ return 0;
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
@@ -721,33 +732,41 @@ static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= mask;
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
-static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
-static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
-static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
-static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr;
/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
- return;
+ return 0;
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
@@ -755,6 +774,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
pmcr |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -1187,25 +1208,89 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
-static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
+static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
+ s64 new, s64 cur)
{
- if (kvm_vcpu_has_pmu(vcpu))
- return vcpu->kvm->arch.dfr0_pmuver.imp;
+ struct arm64_ftr_bits kvm_ftr = *ftrp;
+
+ /* Some features have different safe value type in KVM than host features */
+ switch (id) {
+ case SYS_ID_AA64DFR0_EL1:
+ if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT)
+ kvm_ftr.type = FTR_LOWER_SAFE;
+ break;
+ case SYS_ID_DFR0_EL1:
+ if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
+ kvm_ftr.type = FTR_LOWER_SAFE;
+ break;
+ }
- return vcpu->kvm->arch.dfr0_pmuver.unimp;
+ return arm64_ftr_safe_value(&kvm_ftr, new, cur);
}
-static u8 perfmon_to_pmuver(u8 perfmon)
+/**
+ * arm64_check_features() - Check if a feature register value constitutes
+ * a subset of features indicated by the idreg's KVM sanitised limit.
+ *
+ * This function will check if each feature field of @val is the "safe" value
+ * against idreg's KVM sanitised limit return from reset() callback.
+ * If a field value in @val is the same as the one in limit, it is always
+ * considered the safe value regardless For register fields that are not in
+ * writable, only the value in limit is considered the safe value.
+ *
+ * Return: 0 if all the fields are safe. Otherwise, return negative errno.
+ */
+static int arm64_check_features(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 val)
{
- switch (perfmon) {
- case ID_DFR0_EL1_PerfMon_PMUv3:
- return ID_AA64DFR0_EL1_PMUVer_IMP;
- case ID_DFR0_EL1_PerfMon_IMPDEF:
- return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
- default:
- /* Anything ARMv8.1+ and NI have the same value. For now. */
- return perfmon;
+ const struct arm64_ftr_reg *ftr_reg;
+ const struct arm64_ftr_bits *ftrp = NULL;
+ u32 id = reg_to_encoding(rd);
+ u64 writable_mask = rd->val;
+ u64 limit = rd->reset(vcpu, rd);
+ u64 mask = 0;
+
+ /*
+ * Hidden and unallocated ID registers may not have a corresponding
+ * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
+ * only safe value is 0.
+ */
+ if (sysreg_visible_as_raz(vcpu, rd))
+ return val ? -E2BIG : 0;
+
+ ftr_reg = get_arm64_ftr_reg(id);
+ if (!ftr_reg)
+ return -EINVAL;
+
+ ftrp = ftr_reg->ftr_bits;
+
+ for (; ftrp && ftrp->width; ftrp++) {
+ s64 f_val, f_lim, safe_val;
+ u64 ftr_mask;
+
+ ftr_mask = arm64_ftr_mask(ftrp);
+ if ((ftr_mask & writable_mask) != ftr_mask)
+ continue;
+
+ f_val = arm64_ftr_value(ftrp, val);
+ f_lim = arm64_ftr_value(ftrp, limit);
+ mask |= ftr_mask;
+
+ if (f_val == f_lim)
+ safe_val = f_val;
+ else
+ safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
+
+ if (safe_val != f_val)
+ return -E2BIG;
}
+
+ /* For fields that are not writable, values in limit are the safe values. */
+ if ((val & ~mask) != (limit & ~mask))
+ return -E2BIG;
+
+ return 0;
}
static u8 pmuver_to_perfmon(u8 pmuver)
@@ -1222,7 +1307,8 @@ static u8 pmuver_to_perfmon(u8 pmuver)
}
/* Read a sanitised cpufeature ID register by sys_reg_desc */
-static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
+static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
{
u32 id = reg_to_encoding(r);
u64 val;
@@ -1233,19 +1319,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
val = read_sanitised_ftr_reg(id);
switch (id) {
- case SYS_ID_AA64PFR0_EL1:
- if (!vcpu_has_sve(vcpu))
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
- if (kvm_vgic_global_state.type == VGIC_V3) {
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
- }
- break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
@@ -1267,22 +1340,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
break;
- case SYS_ID_AA64DFR0_EL1:
- /* Limit debug to ARMv8.0 */
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
- /* Set PMUver to the required version */
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
- vcpu_pmuver(vcpu));
- /* Hide SPE from guests */
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
- break;
- case SYS_ID_DFR0_EL1:
- val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
- pmuver_to_perfmon(vcpu_pmuver(vcpu)));
- break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
break;
@@ -1294,6 +1351,28 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
return val;
}
+static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ return __kvm_read_sanitised_id_reg(vcpu, r);
+}
+
+static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ return IDREG(vcpu->kvm, reg_to_encoding(r));
+}
+
+/*
+ * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
+ * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
+ */
+static inline bool is_id_reg(u32 id)
+{
+ return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
+ sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
+ sys_reg_CRm(id) < 8);
+}
+
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
@@ -1355,88 +1434,113 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
-static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd,
- u64 val)
+static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
{
- u8 csv2, csv3;
+ u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ if (!vcpu_has_sve(vcpu))
+ val &= ~ID_AA64PFR0_EL1_SVE_MASK;
/*
- * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
- * it doesn't promise more than what is actually provided (the
- * guest could otherwise be covered in ectoplasmic residue).
+ * The default is to expose CSV2 == 1 if the HW isn't affected.
+ * Although this is a per-CPU feature, we make it global because
+ * asymmetric systems are just a nuisance.
+ *
+ * Userspace can override this as long as it doesn't promise
+ * the impossible.
*/
- csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
- if (csv2 > 1 ||
- (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
+ if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
+ val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
+ }
+ if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
+ val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
+ }
- /* Same thing for CSV3 */
- csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
- if (csv3 > 1 ||
- (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
+ if (kvm_vgic_global_state.type == VGIC_V3) {
+ val &= ~ID_AA64PFR0_EL1_GIC_MASK;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
+ }
- /* We can only differ with CSV[23], and anything else is an error */
- val ^= read_id_reg(vcpu, rd);
- val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
- if (val)
- return -EINVAL;
+ val &= ~ID_AA64PFR0_EL1_AMU_MASK;
- vcpu->kvm->arch.pfr0_csv2 = csv2;
- vcpu->kvm->arch.pfr0_csv3 = csv3;
+ return val;
+}
- return 0;
+static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+
+ /* Limit debug to ARMv8.0 */
+ val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
+
+ /*
+ * Only initialize the PMU version if the vCPU was configured with one.
+ */
+ val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
+ if (kvm_vcpu_has_pmu(vcpu))
+ val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
+ kvm_arm_pmu_get_pmuver_limit());
+
+ /* Hide SPE from guests */
+ val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
+
+ return val;
}
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
u64 val)
{
- u8 pmuver, host_pmuver;
- bool valid_pmu;
-
- host_pmuver = kvm_arm_pmu_get_pmuver_limit();
+ u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
/*
- * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
- * as it doesn't promise more than what the HW gives us. We
- * allow an IMPDEF PMU though, only if no PMU is supported
- * (KVM backward compatibility handling).
+ * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
+ * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
+ * exposed an IMP_DEF PMU to userspace and the guest on systems w/
+ * non-architectural PMUs. Of course, PMUv3 is the only game in town for
+ * PMU virtualization, so the IMP_DEF value was rather user-hostile.
+ *
+ * At minimum, we're on the hook to allow values that were given to
+ * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
+ * with a more sensible NI. The value of an ID register changing under
+ * the nose of the guest is unfortunate, but is certainly no more
+ * surprising than an ill-guided PMU driver poking at impdef system
+ * registers that end in an UNDEF...
*/
- pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
- if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
- return -EINVAL;
+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
- valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
-
- /* Make sure view register and PMU support do match */
- if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
- return -EINVAL;
+ return set_id_reg(vcpu, rd, val);
+}
- /* We can only differ with PMUver, and anything else is an error */
- val ^= read_id_reg(vcpu, rd);
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
- if (val)
- return -EINVAL;
+static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+ u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
- if (valid_pmu)
- vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
- else
- vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
+ val &= ~ID_DFR0_EL1_PerfMon_MASK;
+ if (kvm_vcpu_has_pmu(vcpu))
+ val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
- return 0;
+ return val;
}
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
u64 val)
{
- u8 perfmon, host_perfmon;
- bool valid_pmu;
+ u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
- host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+ if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
+ val &= ~ID_DFR0_EL1_PerfMon_MASK;
+ perfmon = 0;
+ }
/*
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
@@ -1444,29 +1548,10 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
* AArch64 side (as everything is emulated with that), and
* that this is a PMUv3.
*/
- perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
- if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
- (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
+ if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
return -EINVAL;
- valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
-
- /* Make sure view register and PMU support do match */
- if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
- return -EINVAL;
-
- /* We can only differ with PerfMon, and anything else is an error */
- val ^= read_id_reg(vcpu, rd);
- val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
- if (val)
- return -EINVAL;
-
- if (valid_pmu)
- vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
- else
- vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
-
- return 0;
+ return set_id_reg(vcpu, rd, val);
}
/*
@@ -1479,18 +1564,60 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 *val)
{
+ /*
+ * Avoid locking if the VM has already started, as the ID registers are
+ * guaranteed to be invariant at that point.
+ */
+ if (kvm_vm_has_ran_once(vcpu->kvm)) {
+ *val = read_id_reg(vcpu, rd);
+ return 0;
+ }
+
+ mutex_lock(&vcpu->kvm->arch.config_lock);
*val = read_id_reg(vcpu, rd);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+
return 0;
}
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val)
{
- /* This is what we mean by invariant: you can't change it. */
- if (val != read_id_reg(vcpu, rd))
- return -EINVAL;
+ u32 id = reg_to_encoding(rd);
+ int ret;
- return 0;
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+
+ /*
+ * Once the VM has started the ID registers are immutable. Reject any
+ * write that does not match the final register value.
+ */
+ if (kvm_vm_has_ran_once(vcpu->kvm)) {
+ if (val != read_id_reg(vcpu, rd))
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+ return ret;
+ }
+
+ ret = arm64_check_features(vcpu, rd, val);
+ if (!ret)
+ IDREG(vcpu->kvm, id) = val;
+
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+
+ /*
+ * arm64_check_features() returns -E2BIG to indicate the register's
+ * feature set is a superset of the maximally-allowed register value.
+ * While it would be nice to precisely describe this to userspace, the
+ * existing UAPI for KVM_SET_ONE_REG has it that invalid register
+ * writes return -EINVAL.
+ */
+ if (ret == -E2BIG)
+ ret = -EINVAL;
+ return ret;
}
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@@ -1530,7 +1657,7 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
* by the physical CPU which the vcpu currently resides in.
*/
-static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
u64 clidr;
@@ -1578,6 +1705,8 @@ static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
__vcpu_sys_reg(vcpu, r->reg) = clidr;
+
+ return __vcpu_sys_reg(vcpu, r->reg);
}
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@@ -1677,6 +1806,17 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.visibility = elx2_visibility, \
}
+/*
+ * Since reset() callback and field val are not used for idregs, they will be
+ * used for specific purposes for idregs.
+ * The reset() would return KVM sanitised register value. The value would be the
+ * same as the host kernel sanitised value if there is no KVM sanitisation.
+ * The val would be used as a mask indicating writable fields for the idreg.
+ * Only bits with 1 are writable from userspace. This mask might not be
+ * necessary in the future whenever all ID registers are enabled as writable
+ * from userspace.
+ */
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1684,6 +1824,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = id_visibility, \
+ .reset = kvm_read_sanitised_id_reg, \
+ .val = 0, \
}
/* sys_reg_desc initialiser for known cpufeature ID registers */
@@ -1693,6 +1835,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = aa32_id_visibility, \
+ .reset = kvm_read_sanitised_id_reg, \
+ .val = 0, \
}
/*
@@ -1705,7 +1849,9 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.access = access_id_reg, \
.get_user = get_id_reg, \
.set_user = set_id_reg, \
- .visibility = raz_visibility \
+ .visibility = raz_visibility, \
+ .reset = kvm_read_sanitised_id_reg, \
+ .val = 0, \
}
/*
@@ -1719,6 +1865,8 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.get_user = get_id_reg, \
.set_user = set_id_reg, \
.visibility = raz_visibility, \
+ .reset = kvm_read_sanitised_id_reg, \
+ .val = 0, \
}
static bool access_sp_el1(struct kvm_vcpu *vcpu,
@@ -1826,9 +1974,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* CRm=1 */
AA32_ID_SANITISED(ID_PFR0_EL1),
AA32_ID_SANITISED(ID_PFR1_EL1),
- { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
- .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
- .visibility = aa32_id_visibility, },
+ { SYS_DESC(SYS_ID_DFR0_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_dfr0_el1,
+ .visibility = aa32_id_visibility,
+ .reset = read_sanitised_id_dfr0_el1,
+ .val = ID_DFR0_EL1_PerfMon_MASK, },
ID_HIDDEN(ID_AFR0_EL1),
AA32_ID_SANITISED(ID_MMFR0_EL1),
AA32_ID_SANITISED(ID_MMFR1_EL1),
@@ -1857,8 +2009,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
- { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
- .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
+ { SYS_DESC(SYS_ID_AA64PFR0_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_reg,
+ .reset = read_sanitised_id_aa64pfr0_el1,
+ .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
@@ -1868,8 +2024,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(4,7),
/* CRm=5 */
- { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
- .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
+ { SYS_DESC(SYS_ID_AA64DFR0_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_aa64dfr0_el1,
+ .reset = read_sanitised_id_aa64dfr0_el1,
+ .val = ID_AA64DFR0_EL1_PMUVer_MASK, },
ID_SANITISED(ID_AA64DFR1_EL1),
ID_UNALLOCATED(5,2),
ID_UNALLOCATED(5,3),
@@ -2203,7 +2363,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
EL2_REG(HCR_EL2, access_rw, reset_val, 0),
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
- EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
+ EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
EL2_REG(HACR_EL2, access_rw, reset_val, 0),
@@ -2260,6 +2420,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
+static const struct sys_reg_desc *first_idreg;
+
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -2950,6 +3112,28 @@ static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
return false;
}
+static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
+{
+ const struct sys_reg_desc *idreg = first_idreg;
+ u32 id = reg_to_encoding(idreg);
+ struct kvm *kvm = vcpu->kvm;
+
+ if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
+ return;
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ /* Initialize all idregs */
+ while (is_id_reg(id)) {
+ IDREG(kvm, id) = idreg->reset(vcpu, idreg);
+
+ idreg++;
+ id = reg_to_encoding(idreg);
+ }
+
+ set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+}
+
/**
* kvm_reset_sys_regs - sets system registers to reset value
* @vcpu: The VCPU pointer
@@ -2961,9 +3145,17 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
unsigned long i;
- for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
- if (sys_reg_descs[i].reset)
- sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
+ kvm_reset_id_regs(vcpu);
+
+ for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
+ const struct sys_reg_desc *r = &sys_reg_descs[i];
+
+ if (is_id_reg(reg_to_encoding(r)))
+ continue;
+
+ if (r->reset)
+ r->reset(vcpu, r);
+ }
}
/**
@@ -3064,19 +3256,21 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
*/
#define FUNCTION_INVARIANT(reg) \
- static void get_##reg(struct kvm_vcpu *v, \
+ static u64 get_##reg(struct kvm_vcpu *v, \
const struct sys_reg_desc *r) \
{ \
((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
+ return ((struct sys_reg_desc *)r)->val; \
}
FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(aidr_el1)
-static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
+static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
{
((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ return ((struct sys_reg_desc *)r)->val;
}
/* ->val is filled in by kvm_sys_reg_table_init() */
@@ -3368,6 +3562,7 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
int __init kvm_sys_reg_table_init(void)
{
+ struct sys_reg_params params;
bool valid = true;
unsigned int i;
@@ -3386,5 +3581,11 @@ int __init kvm_sys_reg_table_init(void)
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
+ /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
+ params = encoding_to_params(SYS_ID_PFR0_EL1);
+ first_idreg = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+ if (!first_idreg)
+ return -EINVAL;
+
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 6b11f2cc7146..c65c129b3500 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -27,6 +27,13 @@ struct sys_reg_params {
bool is_write;
};
+#define encoding_to_params(reg) \
+ ((struct sys_reg_params){ .Op0 = sys_reg_Op0(reg), \
+ .Op1 = sys_reg_Op1(reg), \
+ .CRn = sys_reg_CRn(reg), \
+ .CRm = sys_reg_CRm(reg), \
+ .Op2 = sys_reg_Op2(reg) })
+
#define esr_sys64_to_params(esr) \
((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
.Op1 = ((esr) >> 14) & 0x7, \
@@ -64,13 +71,16 @@ struct sys_reg_desc {
struct sys_reg_params *,
const struct sys_reg_desc *);
- /* Initialization for vcpu. */
- void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
+ /*
+ * Initialization for vcpu. Return initialized value, or KVM
+ * sanitized value for ID registers.
+ */
+ u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
/* Index into sys_reg[], or 0 if we don't need to save it. */
int reg;
- /* Value (usually reset value) */
+ /* Value (usually reset value), or write mask for idregs */
u64 val;
/* Custom get/set_user functions, fallback to generic if NULL */
@@ -123,19 +133,21 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
}
/* Reset functions */
-static inline void reset_unknown(struct kvm_vcpu *vcpu,
+static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
+ return __vcpu_sys_reg(vcpu, r->reg);
}
-static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
__vcpu_sys_reg(vcpu, r->reg) = r->val;
+ return __vcpu_sys_reg(vcpu, r->reg);
}
static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 935f0a8911f9..3fe516b32577 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -536,9 +536,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
-#ifdef CONFIG_PER_VMA_LOCK
struct vm_area_struct *vma;
-#endif
if (kprobe_page_fault(regs, esr))
return 0;
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 19c23c4fa2da..c80ed4f3cbce 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -25,6 +25,7 @@ HAS_E0PD
HAS_ECV
HAS_ECV_CNTPOFF
HAS_EPAN
+HAS_EVT
HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5
@@ -51,6 +52,7 @@ HAS_TLB_RANGE
HAS_VIRT_HOST_EXTN
HAS_WFXT
HW_DBM
+KVM_HVHE
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
MTE
@@ -81,6 +83,7 @@ WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417
+WORKAROUND_AMPERE_AC03_CPU_38
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c
index 2d31a12e46cf..936bea6fd32d 100644
--- a/arch/csky/kernel/probes/uprobes.c
+++ b/arch/csky/kernel/probes/uprobes.c
@@ -64,6 +64,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
+ current->thread.trap_no = utask->autask.saved_trap_no;
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
@@ -101,6 +102,8 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
+ current->thread.trap_no = utask->autask.saved_trap_no;
+
/*
* Task has received a fatal signal, so reset back to probed
* address.
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index aefae2efde9f..33733245f42b 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += agp.h
+generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += vtime.h
diff --git a/arch/ia64/include/asm/export.h b/arch/ia64/include/asm/export.h
deleted file mode 100644
index ad18c6583252..000000000000
--- a/arch/ia64/include/asm/export.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
-#define KSYM_FUNC(name) @fptr(name)
-#include <asm-generic/export.h>
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index f22469f1c1fc..c096500590e9 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -170,7 +170,7 @@ RestRR: \
__PAGE_ALIGNED_DATA
.global empty_zero_page
-EXPORT_DATA_SYMBOL_GPL(empty_zero_page)
+EXPORT_SYMBOL_GPL(empty_zero_page)
empty_zero_page:
.skip PAGE_SIZE
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index d6d4229b28db..7a418e324d30 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -87,7 +87,7 @@
.align 32768 // align on 32KB boundary
.global ia64_ivt
- EXPORT_DATA_SYMBOL(ia64_ivt)
+ EXPORT_SYMBOL(ia64_ivt)
ia64_ivt:
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 00b4ba1e5cdf..000000000000
--- a/arch/loongarch/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_LOONGARCH_BITSPERLONG_H
-#define __ASM_LOONGARCH_BITSPERLONG_H
-
-#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index d97fbb812f63..43e8da8465f9 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -115,7 +115,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
-#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
+#define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK))
#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
@@ -134,7 +134,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_val(*ptep) = 0;
}
-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pte_page(pte) virt_to_page(__pte_page(pte))
static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 3903db2e8da7..363aa0f9ba8a 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -121,8 +121,15 @@ static inline void *__va(unsigned long x)
* TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
* of the shifts unnecessary.
*/
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn << PAGE_SHIFT);
+}
extern int m68k_virt_to_node_shift;
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 060e4c0e7605..af3a10973233 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -19,8 +19,15 @@ extern unsigned long memory_end;
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn << PAGE_SHIFT);
+}
#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
#define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index e582b0484a55..9e7bf8a5f8f8 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_set(pmdp,ptep) do {} while (0)
#define __pte_page(pte) \
-((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
+(__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
@@ -111,7 +111,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
#define pte_page(pte) virt_to_page(__pte_page(pte))
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
-#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 42f45abea37a..a6efaa7cacde 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -69,7 +69,8 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
- pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ pte_t pte = pfn_pte(virt_to_pfn((void *)address),
+ PAGE_INIT);
if (address >= (unsigned long) high_memory)
pte_val(pte) = 0;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 911301224078..c75984e2d86b 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -102,7 +102,7 @@ static struct list_head ptable_list[2] = {
LIST_HEAD_INIT(ptable_list[1]),
};
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
@@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type)
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
- pgtable_pte_page_dtor(virt_to_page(page));
+ pgtable_pte_page_dtor(virt_to_page((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index b619d0d4319c..c5e6a23e0262 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -75,7 +75,7 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
pg_table = (pte_t *) __va ((unsigned long) pg_table);
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
- pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT);
if (address >= (unsigned long)high_memory)
pte_val (pte) = 0;
set_pte (pg_table, pte);
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index f15ff16b9997..83fcae6a0e79 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -29,7 +29,7 @@ static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
j = *(volatile unsigned long *)kaddr;
*(volatile unsigned long *)kaddr = j;
- ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
+ ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL);
pte = pte_val(ptep);
// pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index 08bb92113026..a6034ba05845 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -125,7 +125,7 @@ inline int dvma_map_cpu(unsigned long kaddr,
do {
pr_debug("mapping %08lx phys to %08lx\n",
__pa(kaddr), vaddr);
- set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
+ set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr),
PAGE_KERNEL));
pte++;
kaddr += PAGE_SIZE;
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index a7a4ee66a9d3..ef7b05ae92ce 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -346,7 +346,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
endif
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index b8f3397c59c9..d4ab34b3b404 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -51,9 +51,9 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
#define PORT(_base, _irq) \
{ \
.mapbase = _base, \
+ .mapsize = 0x1000, \
.irq = _irq, \
.regshift = 2, \
- .iotype = UPIO_AU, \
.flags = UPF_SKIP_TEST | UPF_IOREMAP | \
UPF_FIXED_TYPE, \
.type = PORT_16550A, \
@@ -124,8 +124,14 @@ static void __init alchemy_setup_uarts(int ctype)
au1xx0_uart_device.dev.platform_data = ports;
/* Fill up uartclk. */
- for (s = 0; s < c; s++)
+ for (s = 0; s < c; s++) {
ports[s].uartclk = uartclk;
+ if (au_platform_setup(&ports[s]) < 0) {
+ kfree(ports);
+ printk(KERN_INFO "Alchemy: missing support for UARTs\n");
+ return;
+ }
+ }
if (platform_device_register(&au1xx0_uart_device))
printk(KERN_INFO "Alchemy: failed to register UARTs\n");
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c0b4b1c253d1..4cb46d5c64a2 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -304,16 +304,6 @@ config IRQSTACKS
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
-config TLB_PTLOCK
- bool "Use page table locks in TLB fault handler"
- depends on SMP
- default n
- help
- Select this option to enable page table locking in the TLB
- fault handler. This ensures that page table entries are
- updated consistently on SMP machines at the expense of some
- loss in performance.
-
config HOTPLUG_CPU
bool
default y if SMP
@@ -346,7 +336,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4" if 64BIT
+ default "8" if 64BIT
default "16"
config KEXEC
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index 3a059cb5e112..1401e4c5fe5f 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -10,3 +10,14 @@ config LIGHTWEIGHT_SPINLOCK_CHECK
spinlock debugging you should choose the DEBUG_SPINLOCK option
which will detect unitialized spinlocks too.
If unsure say Y here.
+
+config TLB_PTLOCK
+ bool "Use page table locks in TLB fault handler"
+ depends on SMP
+ default n
+ help
+ Select this option to enable page table locking in the TLB
+ fault handler. This ensures that page table entries are
+ updated consistently on SMP machines at the expense of some
+ loss in performance.
+
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 8c4d4844321f..9651f4390029 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -82,7 +82,6 @@ CONFIG_TUN=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
CONFIG_NET_TULIP=y
@@ -97,6 +96,7 @@ CONFIG_LASI_82596=y
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
@@ -129,17 +129,53 @@ CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=m
+CONFIG_DRM=m
+CONFIG_DRM_DP_CEC=y
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+CONFIG_DRM_VGEM=m
+CONFIG_DRM_UDL=m
+CONFIG_DRM_MGAG200=m
CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_PM2=m
+CONFIG_FB_PM2_FIFO_DISCONNECT=y
+CONFIG_FB_NVIDIA=m
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_NVIDIA_BACKLIGHT is not set
+CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
+# CONFIG_FB_RIVA_BACKLIGHT is not set
CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
CONFIG_FB_MATROX_G=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
+CONFIG_FB_ATY128=m
+# CONFIG_FB_ATY128_BACKLIGHT is not set
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_BACKLIGHT is not set
+CONFIG_FB_S3=m
+CONFIG_FB_SAVAGE=m
+CONFIG_FB_SAVAGE_I2C=y
+CONFIG_FB_SAVAGE_ACCEL=y
+CONFIG_FB_SIS=m
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -216,17 +252,17 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 57501b0aed92..6758c030b09d 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -20,9 +20,6 @@ CONFIG_USER_NS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_HPPB=y
CONFIG_IOMMU_CCIO=y
@@ -37,6 +34,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPACTION is not set
CONFIG_MEMORY_FAILURE=y
CONFIG_NET=y
@@ -103,7 +101,6 @@ CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
CONFIG_NET_TULIP=y
@@ -121,6 +118,7 @@ CONFIG_E1000=y
# CONFIG_NET_VENDOR_OKI is not set
CONFIG_QLA3XXX=m
CONFIG_QLCNIC=m
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
@@ -186,7 +184,6 @@ CONFIG_WATCHDOG=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_SSB=m
CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_HTC_PASIC3=m
CONFIG_LPC_SCH=m
CONFIG_MFD_SM501=m
CONFIG_REGULATOR=y
@@ -196,14 +193,46 @@ CONFIG_MEDIA_SUPPORT=m
CONFIG_AGP=y
CONFIG_AGP_PARISC=y
CONFIG_DRM=y
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
CONFIG_DRM_RADEON=y
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+CONFIG_DRM_MGAG200=m
CONFIG_FB=y
-CONFIG_FB_MATROX=y
+CONFIG_FB_PM2=m
+CONFIG_FB_PM2_FIFO_DISCONNECT=y
+CONFIG_FB_NVIDIA=m
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_NVIDIA_BACKLIGHT is not set
+CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
+# CONFIG_FB_RIVA_BACKLIGHT is not set
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=y
-CONFIG_FB_MATROX_MAVEN=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
CONFIG_FB_RADEON=y
+# CONFIG_FB_RADEON_BACKLIGHT is not set
+CONFIG_FB_ATY128=m
+# CONFIG_FB_ATY128_BACKLIGHT is not set
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_BACKLIGHT is not set
+CONFIG_FB_S3=m
+CONFIG_FB_SAVAGE=m
+CONFIG_FB_SAVAGE_I2C=y
+CONFIG_FB_SAVAGE_ACCEL=y
+CONFIG_FB_SIS=m
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
+CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HIDRAW=y
@@ -257,12 +286,12 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_UTF8=m
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h
index 38a19c0bac3a..00fd87724588 100644
--- a/arch/parisc/include/asm/irqflags.h
+++ b/arch/parisc/include/asm/irqflags.h
@@ -31,6 +31,11 @@ static inline unsigned long arch_local_irq_save(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
+ /* warn if IRQs are on although they should be off */
+ if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+ if (arch_local_save_flags() & PSW_I)
+ asm volatile("break 6,6\n"); /* SPINLOCK_BREAK_INSN */
+
asm volatile("mtsm %0" : : "r" (flags) : "memory");
}
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 2b4fad8328e8..269b9a159f01 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -88,8 +88,8 @@ int pdc_iodc_print(const unsigned char *str, unsigned count);
void pdc_emergency_unlock(void);
int pdc_sti_call(unsigned long func, unsigned long flags,
- unsigned long inptr, unsigned long outputr,
- unsigned long glob_cfg);
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg, int do_call64);
int __pdc_cpu_rendezvous(void);
void pdc_cpu_rendezvous_lock(void);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index a608970b249a..e132b2819fc9 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -11,6 +11,7 @@
#ifndef __ASSEMBLY__
#include <linux/threads.h>
+#include <linux/irqreturn.h>
#include <asm/assembly.h>
#include <asm/prefetch.h>
@@ -291,6 +292,42 @@ extern void __noreturn toc_intr(struct pt_regs *regs);
extern void toc_handler(void);
extern unsigned int toc_handler_size;
extern unsigned int toc_handler_csum;
+extern void do_cpu_irq_mask(struct pt_regs *);
+extern irqreturn_t timer_interrupt(int, void *);
+extern irqreturn_t ipi_interrupt(int, void *);
+extern void start_cpu_itimer(void);
+extern void handle_interruption(int, struct pt_regs *);
+
+/* called from assembly code: */
+extern void start_parisc(void);
+extern void smp_callin(unsigned long);
+extern void sys_rt_sigreturn(struct pt_regs *, int);
+extern void do_notify_resume(struct pt_regs *, long);
+extern long do_syscall_trace_enter(struct pt_regs *);
+extern void do_syscall_trace_exit(struct pt_regs *);
+
+/* CPU startup and info */
+struct seq_file;
+extern void early_trap_init(void);
+extern void collect_boot_cpu_data(void);
+extern int show_cpuinfo (struct seq_file *m, void *v);
+
+/* driver code in driver/parisc */
+extern void gsc_init(void);
+extern void processor_init(void);
+extern void ccio_init(void);
+extern void hppb_init(void);
+extern void dino_init(void);
+extern void iosapic_init(void);
+extern void lba_init(void);
+extern void sba_init(void);
+extern void parisc_eisa_init(void);
+struct parisc_device;
+struct resource;
+extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
+extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
+extern void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask);
+extern void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/uapi/asm/cachectl.h b/arch/parisc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..68d6b455498b
--- /dev/null
+++ b/arch/parisc/include/uapi/asm/cachectl.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
+
+/*
+ * Options for cacheflush system call
+ */
+#define ICACHE (1<<0) /* flush instruction cache */
+#define DCACHE (1<<1) /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+#endif /* _ASM_CACHECTL */
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 3d138c9cf9ce..2d1478fc4aa5 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -21,6 +21,9 @@ CFLAGS_REMOVE_unwind.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
endif
+CFLAGS_REMOVE_sys_parisc.o = -Wmissing-prototypes -Wmissing-declarations
+CFLAGS_REMOVE_sys_parisc32.o = -Wmissing-prototypes -Wmissing-declarations
+
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
index f420b5552140..375cd73b5281 100644
--- a/arch/parisc/kernel/audit.c
+++ b/arch/parisc/kernel/audit.c
@@ -40,11 +40,6 @@ int audit_classify_arch(int arch)
int audit_classify_syscall(int abi, unsigned syscall)
{
-#ifdef CONFIG_COMPAT
- extern int parisc32_classify_syscall(unsigned);
- if (abi == AUDIT_ARCH_PARISC)
- return parisc32_classify_syscall(syscall);
-#endif
switch (syscall) {
case __NR_open:
return AUDITSC_OPEN;
@@ -55,6 +50,10 @@ int audit_classify_syscall(int abi, unsigned syscall)
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
+#ifdef CONFIG_COMPAT
+ if (abi == AUDIT_ARCH_PARISC)
+ return AUDITSC_COMPAT;
+#endif
return AUDITSC_NATIVE;
}
}
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 501160250bb7..b55b35c89d6a 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/syscalls.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -28,6 +29,7 @@
#include <asm/sections.h>
#include <asm/shmparam.h>
#include <asm/mmu_context.h>
+#include <asm/cachectl.h>
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
@@ -790,3 +792,50 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+
+
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ unsigned int, cache)
+{
+ unsigned long start, end;
+ ASM_EXCEPTIONTABLE_VAR(error);
+
+ if (bytes == 0)
+ return 0;
+ if (!access_ok((void __user *) addr, bytes))
+ return -EFAULT;
+
+ end = addr + bytes;
+
+ if (cache & DCACHE) {
+ start = addr;
+ __asm__ __volatile__ (
+#ifdef CONFIG_64BIT
+ "1: cmpb,*<<,n %0,%2,1b\n"
+#else
+ "1: cmpb,<<,n %0,%2,1b\n"
+#endif
+ " fic,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (dcache_stride), "i" (SR_USER));
+ }
+
+ if (cache & ICACHE && error == 0) {
+ start = addr;
+ __asm__ __volatile__ (
+#ifdef CONFIG_64BIT
+ "1: cmpb,*<<,n %0,%2,1b\n"
+#else
+ "1: cmpb,<<,n %0,%2,1b\n"
+#endif
+ " fdc,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (icache_stride), "i" (SR_USER));
+ }
+
+ return error;
+}
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
index 539b16891bdf..3ac53f1ab860 100644
--- a/arch/parisc/kernel/compat_audit.c
+++ b/arch/parisc/kernel/compat_audit.c
@@ -26,19 +26,3 @@ unsigned int parisc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
-
-int parisc32_classify_syscall(unsigned syscall)
-{
- switch (syscall) {
- case __NR_open:
- return AUDITSC_OPEN;
- case __NR_openat:
- return AUDITSC_OPENAT;
- case __NR_execve:
- return AUDITSC_EXECVE;
- case __NR_openat2:
- return AUDITSC_OPENAT2;
- default:
- return AUDITSC_COMPAT;
- }
-}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 28f47285d448..8f4b77648491 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
- * Copyright (c) 2001 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001-2023 Helge Deller <deller@gmx.de>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
*
@@ -74,13 +74,13 @@ static int descend_children(struct device * dev, void * data)
}
/**
- * for_each_padev - Iterate over all devices in the tree
- * @fn: Function to call for each device.
- * @data: Data to pass to the called function.
+ * for_each_padev - Iterate over all devices in the tree
+ * @fn: Function to call for each device.
+ * @data: Data to pass to the called function.
*
- * This performs a depth-first traversal of the tree, calling the
- * function passed for each node. It calls the function for parents
- * before children.
+ * This performs a depth-first traversal of the tree, calling the
+ * function passed for each node. It calls the function for parents
+ * before children.
*/
static int for_each_padev(int (*fn)(struct device *, void *), void * data)
@@ -280,7 +280,7 @@ int __init machine_has_merced_bus(void)
/**
* find_pa_parent_type - Find a parent of a specific type
- * @dev: The device to start searching from
+ * @padev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
@@ -344,8 +344,8 @@ static char *print_hwpath(struct hardware_path *path, char *output)
/**
* print_pa_hwpath - Returns hardware path for PA devices
- * dev: The device to return the path for
- * output: Pointer to a previously-allocated array to place the path in.
+ * @dev: The device to return the path for
+ * @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
@@ -379,8 +379,8 @@ EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
- * dev: The device to return the path for
- * output: Pointer to a previously-allocated array to place the path in.
+ * @dev: The device to return the path for
+ * @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
@@ -415,7 +415,8 @@ static void setup_bus_id(struct parisc_device *padev)
dev_set_name(&padev->dev, name);
}
-struct parisc_device * __init create_tree_node(char id, struct device *parent)
+static struct parisc_device * __init create_tree_node(char id,
+ struct device *parent)
{
struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -741,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
};
if (device_for_each_child(parent, &recurse_data, descend_children))
- /* nothing */;
+ { /* nothing */ };
return d.dev;
}
@@ -771,8 +772,8 @@ EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
- * @param dev the target device
- * @param path pointer to a previously allocated hwpath struct to be filled in
+ * @dev: the target device
+ * @path: pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index cc124d9f1f7f..6d1c781eb1db 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -134,7 +134,7 @@ static unsigned long f_extend(unsigned long address)
/**
* convert_to_wide - Convert the return buffer addresses into kernel addresses.
- * @address: The return buffer from PDC.
+ * @addr: The return buffer from PDC.
*
* This function is used to convert the return buffer addresses retrieved from PDC
* into kernel addresses when the PDC address size and kernel address size are
@@ -160,6 +160,8 @@ void set_firmware_width_unlocked(void)
ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
__pa(pdc_result), 0);
+ if (ret < 0)
+ return;
convert_to_wide(pdc_result);
if (pdc_result[0] != NARROW_FIRMWARE)
parisc_narrow_firmware = 0;
@@ -255,8 +257,8 @@ int __init pdc_instr(unsigned int *instr)
/**
* pdc_chassis_info - Return chassis information.
- * @result: The return buffer.
* @chassis_info: The memory buffer address.
+ * @led_info: The size of the memory buffer address.
* @len: The size of the memory buffer address.
*
* An HVERSION dependent call for returning the chassis information.
@@ -280,7 +282,8 @@ int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_inf
/**
* pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
- * @retval: -1 on error, 0 on success. Other value are PDC errors
+ * @state: state of the machine
+ * @data: value for that state
*
* Must be correctly formatted or expect system crash
*/
@@ -303,7 +306,7 @@ int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
/**
* pdc_chassis_disp - Updates chassis code
- * @retval: -1 on error, 0 on success
+ * @disp: value to show on display
*/
int pdc_chassis_disp(unsigned long disp)
{
@@ -318,8 +321,7 @@ int pdc_chassis_disp(unsigned long disp)
}
/**
- * pdc_cpu_rendenzvous - Stop currently executing CPU
- * @retval: -1 on error, 0 on success
+ * __pdc_cpu_rendezvous - Stop currently executing CPU and do not return.
*/
int __pdc_cpu_rendezvous(void)
{
@@ -347,7 +349,7 @@ void pdc_cpu_rendezvous_unlock(void)
/**
* pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU
- * @retval: -1 on error, 0 on success
+ * @pdc_entry: pointer to where the PDC entry point should be stored
*/
int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
{
@@ -369,7 +371,7 @@ int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
}
/**
* pdc_chassis_warn - Fetches chassis warnings
- * @retval: -1 on error, 0 on success
+ * @warn: The warning value to be shown
*/
int pdc_chassis_warn(unsigned long *warn)
{
@@ -521,6 +523,7 @@ int pdc_model_info(struct pdc_model *model)
/**
* pdc_model_sysmodel - Get the system model name.
+ * @os_id: The operating system ID asked for (an OS_ID_* value)
* @name: A char array of at least 81 characters.
*
* Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L).
@@ -549,7 +552,7 @@ int pdc_model_sysmodel(unsigned int os_id, char *name)
/**
* pdc_model_versions - Identify the version number of each processor.
- * @cpu_id: The return buffer.
+ * @versions: The return buffer.
* @id: The id of the processor to check.
*
* Returns the version number for each processor component.
@@ -996,8 +999,8 @@ int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
/**
* pdc_pci_config_read - read PCI config space.
- * @hpa token from PDC to indicate which PCI device
- * @pci_addr configuration space address to read from
+ * @hpa: Token from PDC to indicate which PCI device
+ * @cfg_addr: Configuration space address to read from
*
* Read PCI Configuration space *before* linux PCI subsystem is running.
*/
@@ -1019,9 +1022,9 @@ unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
/**
* pdc_pci_config_write - read PCI config space.
- * @hpa token from PDC to indicate which PCI device
- * @pci_addr configuration space address to write
- * @val value we want in the 32-bit register
+ * @hpa: Token from PDC to indicate which PCI device
+ * @cfg_addr: Configuration space address to write
+ * @val: Value we want in the 32-bit register
*
* Write PCI Configuration space *before* linux PCI subsystem is running.
*/
@@ -1389,17 +1392,25 @@ int pdc_iodc_getc(void)
}
int pdc_sti_call(unsigned long func, unsigned long flags,
- unsigned long inptr, unsigned long outputr,
- unsigned long glob_cfg)
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg, int do_call64)
{
- int retval;
+ int retval = 0;
unsigned long irqflags;
- spin_lock_irqsave(&pdc_lock, irqflags);
- retval = real32_call(func, flags, inptr, outputr, glob_cfg);
- spin_unlock_irqrestore(&pdc_lock, irqflags);
+ spin_lock_irqsave(&pdc_lock, irqflags);
+ if (IS_ENABLED(CONFIG_64BIT) && do_call64) {
+#ifdef CONFIG_64BIT
+ retval = real64_call(func, flags, inptr, outputr, glob_cfg);
+#else
+ WARN_ON(1);
+#endif
+ } else {
+ retval = real32_call(func, flags, inptr, outputr, glob_cfg);
+ }
+ spin_unlock_irqrestore(&pdc_lock, irqflags);
- return retval;
+ return retval;
}
EXPORT_SYMBOL(pdc_sti_call);
@@ -1549,7 +1560,7 @@ int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
/**
* pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
- * @actlen: The return buffer.
+ * @actual_len: The return buffer.
* @mem_addr: Pointer to the memory buffer.
* @count: The number of bytes to read from the buffer.
* @offset: The offset with respect to the beginning of the buffer.
@@ -1572,7 +1583,7 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
}
/**
- * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions.
+ * pdc_pat_pd_get_pdc_revisions - Retrieve PDC interface revisions.
* @legacy_rev: The legacy revision.
* @pat_rev: The PAT revision.
* @pdc_cap: The PDC capabilities.
@@ -1627,7 +1638,7 @@ int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
* pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
* @pci_addr: PCI configuration space address for which the write request is being made.
* @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
- * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
+ * @val: Pointer to 1, 2, or 4 byte value in low order end of argument to be
* written to PCI Config space.
*
*/
@@ -1645,7 +1656,7 @@ int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
}
/**
- * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table
+ * pdc_pat_mem_pdt_info - Retrieve information about page deallocation table
* @rinfo: memory pdt information
*
*/
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index b05055f3ba4b..12c4d4104ade 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -24,9 +24,6 @@
#undef PARISC_IRQ_CR16_COUNTS
-extern irqreturn_t timer_interrupt(int, void *);
-extern irqreturn_t ipi_interrupt(int, void *);
-
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
@@ -489,7 +486,7 @@ void do_softirq_own_stack(void)
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
-void do_cpu_irq_mask(struct pt_regs *regs)
+asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned long eirr_val;
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index f6e38c4d3904..d214bbe3c2af 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -845,7 +845,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const char *strtab = NULL;
const Elf_Shdr *s;
char *secstrings;
- int symindex = -1;
+ int symindex __maybe_unused = -1;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 415f12d5bab3..d818ece23b4a 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -245,7 +245,7 @@ static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
PCXL_SEARCH_LOOP(idx, mask, size); \
}
-unsigned long
+static unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index da154406d368..0a9d7008ef2a 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -40,7 +40,7 @@ static unsigned int pdc_chassis_enabled __read_mostly = 1;
/**
* pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
- * @str configuration param: 0 to disable chassis log
+ * @str: configuration param: 0 to disable chassis log
* @return 1
*/
@@ -55,7 +55,6 @@ __setup("pdcchassis=", pdc_chassis_setup);
/**
* pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
- * @pdc_chassis_old: 1 if old pdc chassis style
*
* Currently, only E class and A180 are known to work with this.
* Inspired by Christoph Plattner
@@ -80,6 +79,9 @@ static void __init pdc_chassis_checkold(void)
/**
* pdc_chassis_panic_event() - Called by the panic handler.
+ * @this: unused
+ * @event: unused
+ * @ptr: unused
*
* As soon as a panic occurs, we should inform the PDC.
*/
@@ -88,7 +90,7 @@ static int pdc_chassis_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
- return NOTIFY_DONE;
+ return NOTIFY_DONE;
}
@@ -99,7 +101,10 @@ static struct notifier_block pdc_chassis_panic_block = {
/**
- * parisc_reboot_event() - Called by the reboot handler.
+ * pdc_chassis_reboot_event() - Called by the reboot handler.
+ * @this: unused
+ * @event: unused
+ * @ptr: unused
*
* As soon as a reboot occurs, we should inform the PDC.
*/
@@ -108,7 +113,7 @@ static int pdc_chassis_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
- return NOTIFY_DONE;
+ return NOTIFY_DONE;
}
@@ -148,7 +153,7 @@ void __init parisc_pdc_chassis_init(void)
/**
* pdc_chassis_send_status() - Sends a predefined message to the chassis,
* and changes the front panel LEDs according to the new system state
- * @retval: PDC call return value.
+ * @message: Type of message, one of PDC_CHASSIS_DIRECT_* values.
*
* Only machines with 64 bits PDC PAT and those reported in
* pdc_chassis_checkold() are supported atm.
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index 80943a00e245..0d24735bd918 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -16,6 +16,7 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/kthread.h>
+#include <linux/proc_fs.h>
#include <linux/initrd.h>
#include <linux/pgtable.h>
#include <linux/mm.h>
@@ -23,6 +24,7 @@
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
+#include <asm/pgtable.h>
enum pdt_access_type {
PDT_NONE,
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index d46b6709ec56..90b04d8af212 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -288,7 +288,7 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- size_t image_size;
+ size_t image_size __maybe_unused;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index ba07e760d3c7..00b0df97afb1 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
*/
/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * init_percpu_prof - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 0797db617962..573f8303e2b0 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -45,7 +45,7 @@ struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
-void __init setup_cmdline(char **cmdline_p)
+static void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
char *p;
@@ -86,7 +86,7 @@ void __init setup_cmdline(char **cmdline_p)
}
#ifdef CONFIG_PA11
-void __init dma_ops_init(void)
+static void __init dma_ops_init(void)
{
switch (boot_cpu_data.cpu_type) {
case pcx:
@@ -106,8 +106,6 @@ void __init dma_ops_init(void)
}
#endif
-extern void collect_boot_cpu_data(void);
-
void __init setup_arch(char **cmdline_p)
{
#ifdef CONFIG_64BIT
@@ -167,10 +165,7 @@ void __init setup_arch(char **cmdline_p)
/*
* Display CPU info for all CPUs.
- * for parisc this is in processor.c
*/
-extern int show_cpuinfo (struct seq_file *m, void *v);
-
static void *
c_start (struct seq_file *m, loff_t *pos)
{
@@ -295,16 +290,6 @@ static int __init parisc_init_resources(void)
return 0;
}
-extern void gsc_init(void);
-extern void processor_init(void);
-extern void ccio_init(void);
-extern void hppb_init(void);
-extern void dino_init(void);
-extern void iosapic_init(void);
-extern void lba_init(void);
-extern void sba_init(void);
-extern void eisa_init(void);
-
static int __init parisc_init(void)
{
u32 osid = (OS_ID_LINUX << 16);
@@ -370,7 +355,7 @@ static int __init parisc_init(void)
gsc_init();
#endif
#ifdef CONFIG_EISA
- eisa_init();
+ parisc_eisa_init();
#endif
#if defined(CONFIG_HPPB)
@@ -391,8 +376,6 @@ arch_initcall(parisc_init);
void __init start_parisc(void)
{
- extern void early_trap_init(void);
-
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8bc0ddaa6219..f886ff0c75df 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -73,7 +73,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
return err;
}
-void
+asmlinkage void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
@@ -176,7 +176,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
}
static long
-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, long in_syscall)
{
unsigned long flags = 0;
@@ -211,7 +211,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_sysc
static long
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
- int in_syscall)
+ long in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
@@ -380,7 +380,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
*/
static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
+handle_signal(struct ksignal *ksig, struct pt_regs *regs, long in_syscall)
{
int ret;
sigset_t *oldset = sigmask_to_save();
@@ -578,7 +578,7 @@ static void do_signal(struct pt_regs *regs, long in_syscall)
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, long in_syscall)
+asmlinkage void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_NOTIFY_SIGNAL))
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index d0eb1bd19a13..4098f9a0964b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -271,8 +271,6 @@ void arch_send_call_function_single_ipi(int cpu)
static void
smp_cpu_init(int cpunum)
{
- extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
-
/* Set modes and Enable floating point coprocessor */
init_per_cpu(cpunum);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 39acccabf2ed..9915062d5243 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -24,6 +24,7 @@
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/compat.h>
+#include <linux/elf-randomize.h>
/*
* Construct an artificial page offset for the mapping based on the physical
@@ -339,7 +340,7 @@ asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
((u64)lenhi << 32) | lenlo);
}
-long parisc_personality(unsigned long personality)
+asmlinkage long parisc_personality(unsigned long personality)
{
long err;
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 3c71fad78318..a0a9145b6dd4 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -400,6 +400,7 @@
353 common pkey_free sys_pkey_free
354 common rseq sys_rseq
355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
+356 common cacheflush sys_cacheflush
# up to 402 is unassigned and reserved for arch specific syscalls
403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
404 32 clock_settime64 sys_clock_settime sys_clock_settime
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 304eebd1c83e..3b97944c7291 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -338,7 +338,7 @@ static void default_trap(int code, struct pt_regs *regs)
void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
-void transfer_pim_to_trap_frame(struct pt_regs *regs)
+static void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
@@ -800,14 +800,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
-void __init initialize_ivt(const void *iva)
+static void __init initialize_ivt(const void *iva)
{
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
- u32 *hpmcp;
u32 instr;
if (strcmp((const char *)iva, "cows can fly"))
@@ -840,8 +839,6 @@ void __init initialize_ivt(const void *iva)
/* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc);
- hpmcp = (u32 *)os_hpmc;
-
for (i=0; i<8; i++)
check += ivap[i];
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index e8a4d77cff53..033b9e50b44a 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -11,6 +11,7 @@
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <asm/hardirq.h>
#include <asm/traps.h>
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 42acc3b52017..27ae40a443b8 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -24,12 +24,13 @@
#include <asm/unwind.h>
#include <asm/switch_to.h>
#include <asm/sections.h>
+#include <asm/ftrace.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) pr_debug(x)
#else
-#define dbg(x...)
+#define dbg(x...) do { } while (0)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
@@ -179,7 +180,7 @@ void unwind_table_remove(struct unwind_table *table)
/* Called from setup_arch to import the kernel unwind info */
int __init unwind_init(void)
{
- long start, stop;
+ long start __maybe_unused, stop __maybe_unused;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
@@ -220,7 +221,6 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
* Note: We could use dereference_kernel_function_descriptor()
* instead but we want to keep it simple here.
*/
- extern void * const handle_interruption;
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
@@ -228,8 +228,10 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
#ifdef CONFIG_IRQSTACKS
extern void * const _call_on_stack;
#endif /* CONFIG_IRQSTACKS */
+ void *ptr;
- if (pc_is_kernel_fn(pc, handle_interruption)) {
+ ptr = dereference_kernel_function_descriptor(&handle_interruption);
+ if (pc_is_kernel_fn(pc, ptr)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index 3747a0cbd3b8..7b64740e150a 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -6,7 +6,8 @@
# See arch/parisc/math-emu/README
ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
- -Wno-implicit-int
+ -Wno-implicit-int -Wno-missing-prototypes -Wno-missing-declarations \
+ -Wno-old-style-definition -Wno-unused-but-set-variable
obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
dfmpy.o sfmpy.o sfsqrt.o dfsqrt.o dfadd.o fmpyfadd.o \
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0c43f3b0a5f..406c52fe23d5 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -626,12 +626,10 @@ static void __init pagetable_init(void)
for (range = 0; range < npmem_ranges; range++) {
unsigned long start_paddr;
- unsigned long end_paddr;
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
size = pmem_ranges[range].pages << PAGE_SHIFT;
- end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 6aaf8dc60610..2a54fadbeaf5 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -240,7 +240,7 @@ config PPC_EARLY_DEBUG_40x
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
- depends on SERIAL_CPM
+ depends on SERIAL_CPM=y
help
Select this to enable early debugging for Freescale chips
using a CPM-based serial port. This assumes that the bootwrapper
diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
index 6612160c19d5..dff1ea074d9d 100644
--- a/arch/powerpc/boot/dts/turris1x.dts
+++ b/arch/powerpc/boot/dts/turris1x.dts
@@ -476,12 +476,12 @@
* channel 1 (but only USB 2.0 subset) to USB 2.0 pins on mPCIe
* slot 1 (CN5), channels 2 and 3 to connector P600.
*
- * P2020 PCIe Root Port uses 1MB of PCIe MEM and xHCI controller
+ * P2020 PCIe Root Port does not use PCIe MEM and xHCI controller
* uses 64kB + 8kB of PCIe MEM. No PCIe IO is used or required.
- * So allocate 2MB of PCIe MEM for this PCIe bus.
+ * So allocate 128kB of PCIe MEM for this PCIe bus.
*/
reg = <0 0xffe08000 0 0x1000>;
- ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00200000>, /* MEM */
+ ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00020000>, /* MEM */
<0x01000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; /* IO */
pcie@0 {
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c9ad12461d44..6ee65741dbd5 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -508,12 +508,16 @@ static void __init fixup_port_irq(int index,
port->irq = virq;
-#ifdef CONFIG_SERIAL_8250_FSL
- if (of_device_is_compatible(np, "fsl,ns16550")) {
- port->handle_irq = fsl8250_handle_irq;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ if (IS_ENABLED(CONFIG_SERIAL_8250) &&
+ of_device_is_compatible(np, "fsl,ns16550")) {
+ if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL)) {
+ port->handle_irq = fsl8250_handle_irq;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ } else {
+ pr_warn_once("Not activating Freescale specific workaround for device %pOFP\n",
+ np);
+ }
}
-#endif
}
static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 23ee96106537..0c7d82c270c3 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -54,7 +54,7 @@ KASAN_SANITIZE := n
KCSAN_SANITIZE := n
ccflags-y := -fno-common -fno-builtin
-ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack
+ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS)
ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index cd632ba9ebff..0161226d8fec 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -24,6 +24,7 @@
#include <linux/stringify.h>
#include <asm/machdep.h>
+#include <asm/nmi.h>
#include <asm/rtas.h>
#include "pseries.h"
#include "vas.h" /* vas_migration_handler() */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b49793cf34eb..4c07b9189c86 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -100,11 +100,18 @@ config RISCV
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
+ select HAVE_ARCH_USERFAULTFD_MINOR if 64BIT && USERFAULTFD
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select HAVE_EBPF_JIT if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
@@ -114,7 +121,8 @@ config RISCV
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
- select HAVE_RETHOOK if !XIP_KERNEL
+ # https://github.com/ClangBuiltLinux/linux/issues/1881
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if !LD_IS_LLD
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_PCI
@@ -123,6 +131,7 @@ config RISCV
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
@@ -148,12 +157,6 @@ config RISCV
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
select ZONE_DMA32 if 64BIT
- select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
@@ -872,6 +875,9 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
endmenu # "Power management options"
menu "CPU Power Management"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index ce10a38dff37..6833d01e2e70 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -43,6 +43,7 @@ config ARCH_SUNXI
config ARCH_THEAD
bool "T-HEAD RISC-V SoCs"
+ depends on MMU && !XIP_KERNEL
select ERRATA_THEAD
help
This enables support for the RISC-V based T-HEAD SoCs.
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index c259dc925ec1..be84b14f0118 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -45,8 +45,11 @@ static bool errata_probe_cmo(unsigned int stage,
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
- riscv_cbom_block_size = L1_CACHE_BYTES;
- riscv_noncoherent_supported();
+ if (stage == RISCV_ALTERNATIVES_BOOT) {
+ riscv_cbom_block_size = L1_CACHE_BYTES;
+ riscv_noncoherent_supported();
+ }
+
return true;
}
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index b98b3b6c9da2..7bac43a3176e 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -90,7 +90,9 @@
#define EXC_INST_ACCESS 1
#define EXC_INST_ILLEGAL 2
#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
#define EXC_STORE_ACCESS 7
#define EXC_SYSCALL 8
#define EXC_HYPERVISOR_SYSCALL 9
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 1de0717112e5..1f37b600ca47 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -20,6 +20,33 @@ struct kvm_aia {
/* In-kernel irqchip initialized */
bool initialized;
+
+ /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
+ u32 mode;
+
+ /* Number of MSIs */
+ u32 nr_ids;
+
+ /* Number of wired IRQs */
+ u32 nr_sources;
+
+ /* Number of group bits in IMSIC address */
+ u32 nr_group_bits;
+
+ /* Position of group bits in IMSIC address */
+ u32 nr_group_shift;
+
+ /* Number of hart bits in IMSIC address */
+ u32 nr_hart_bits;
+
+ /* Number of guest bits in IMSIC address */
+ u32 nr_guest_bits;
+
+ /* Guest physical address of APLIC */
+ gpa_t aplic_addr;
+
+ /* Internal state of APLIC */
+ void *aplic_state;
};
struct kvm_vcpu_aia_csr {
@@ -38,25 +65,53 @@ struct kvm_vcpu_aia {
/* CPU AIA CSR context upon Guest VCPU reset */
struct kvm_vcpu_aia_csr guest_reset_csr;
+
+ /* Guest physical address of IMSIC for this VCPU */
+ gpa_t imsic_addr;
+
+ /* HART index of IMSIC extacted from guest physical address */
+ u32 hart_index;
+
+ /* Internal state of IMSIC for this VCPU */
+ void *imsic_state;
};
+#define KVM_RISCV_AIA_UNDEF_ADDR (-1)
+
#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
+extern unsigned int kvm_riscv_aia_nr_hgei;
+extern unsigned int kvm_riscv_aia_max_ids;
DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
#define kvm_riscv_aia_available() \
static_branch_unlikely(&kvm_riscv_aia_available)
+extern struct kvm_device_ops kvm_riscv_aia_device_ops;
+
+void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
+
#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
-static inline int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu,
- unsigned long isel,
- unsigned long *val,
- unsigned long new_val,
- unsigned long wr_mask)
-{
- return 0;
-}
+int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
+ bool write, unsigned long *val);
+int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
+void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
+ u32 guest_index, u32 offset, u32 iid);
+int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
+int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
+int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
+int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
+int kvm_riscv_aia_aplic_init(struct kvm *kvm);
+void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
#ifdef CONFIG_32BIT
void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
@@ -93,31 +148,23 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
-static inline int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
-{
- return 1;
-}
-
-static inline void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
+int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
-static inline void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
-{
-}
+int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
+ u32 guest_index, u32 iid);
+int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
+int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
-static inline void kvm_riscv_aia_init_vm(struct kvm *kvm)
-{
-}
+void kvm_riscv_aia_init_vm(struct kvm *kvm);
+void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
-static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
-{
-}
+int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
+ void __iomem **hgei_va, phys_addr_t *hgei_pa);
+void kvm_riscv_aia_free_hgei(int cpu, int hgei);
+void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void);
diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h
new file mode 100644
index 000000000000..6dd1a4809ec1
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia_aplic.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_EIID_MASK 0x7ff
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h
new file mode 100644
index 000000000000..da5881d2bde0
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia_imsic.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/types.h>
+#include <asm/csr.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index bd47a1dc2ff8..2d8ee53b66c7 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -28,6 +28,8 @@
#define KVM_VCPU_MAX_FEATURES 0
+#define KVM_IRQCHIP_NUM_PINS 1024
+
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
@@ -320,6 +322,8 @@ int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
+int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
+
void __kvm_riscv_unpriv_trap(void);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index 4278125a38a5..cdcf0ff07be7 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -14,9 +14,15 @@
#define KVM_SBI_VERSION_MAJOR 1
#define KVM_SBI_VERSION_MINOR 0
+enum kvm_riscv_sbi_ext_status {
+ KVM_RISCV_SBI_EXT_UNINITIALIZED,
+ KVM_RISCV_SBI_EXT_AVAILABLE,
+ KVM_RISCV_SBI_EXT_UNAVAILABLE,
+};
+
struct kvm_vcpu_sbi_context {
int return_handled;
- bool extension_disabled[KVM_RISCV_SBI_EXT_MAX];
+ enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX];
};
struct kvm_vcpu_sbi_return {
@@ -66,4 +72,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+#ifdef CONFIG_RISCV_PMU_SBI
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
+#endif
#endif /* __RISCV_KVM_VCPU_SBI_H__ */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 04c0b07bf6cd..3d78930cab51 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -33,6 +33,11 @@ static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
}
+static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+}
+
static inline void riscv_v_vstate_off(struct pt_regs *regs)
{
regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
@@ -128,6 +133,34 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
riscv_v_disable();
}
+static inline void __riscv_v_vstate_discard(void)
+{
+ unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
+
+ riscv_v_enable();
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vmv.v.i v0, -1\n\t"
+ "vmv.v.i v8, -1\n\t"
+ "vmv.v.i v16, -1\n\t"
+ "vmv.v.i v24, -1\n\t"
+ "vsetvl %0, x0, %1\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ riscv_v_disable();
+}
+
+static inline void riscv_v_vstate_discard(struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) == SR_VS_OFF)
+ return;
+
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+}
+
static inline void riscv_v_vstate_save(struct task_struct *task,
struct pt_regs *regs)
{
@@ -173,6 +206,7 @@ static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return fals
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#define riscv_v_vsize (0)
+#define riscv_v_vstate_discard(regs) do {} while (0)
#define riscv_v_vstate_save(task, regs) do {} while (0)
#define riscv_v_vstate_restore(task, regs) do {} while (0)
#define __switch_to_vector(__prev, __next) do {} while (0)
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 7d0b32e3b701..000000000000
--- a/arch/riscv/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2015 Regents of the University of California
- */
-
-#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
-#define _UAPI_ASM_RISCV_BITSPERLONG_H
-
-#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 855c047e86d4..930fdc4101cd 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -15,6 +15,7 @@
#include <asm/bitsperlong.h>
#include <asm/ptrace.h>
+#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -122,6 +123,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZBB,
KVM_RISCV_ISA_EXT_SSAIA,
KVM_RISCV_ISA_EXT_V,
+ KVM_RISCV_ISA_EXT_SVNAPOT,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -211,6 +213,77 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_VECTOR_REG(n) \
((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
+/* Device Control API: RISC-V AIA */
+#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_APLIC_SIZE 0x4000
+#define KVM_DEV_RISCV_APLIC_MAX_HARTS 0x4000
+#define KVM_DEV_RISCV_IMSIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_IMSIC_SIZE 0x1000
+
+#define KVM_DEV_RISCV_AIA_GRP_CONFIG 0
+#define KVM_DEV_RISCV_AIA_CONFIG_MODE 0
+#define KVM_DEV_RISCV_AIA_CONFIG_IDS 1
+#define KVM_DEV_RISCV_AIA_CONFIG_SRCS 2
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS 3
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT 4
+#define KVM_DEV_RISCV_AIA_CONFIG_HART_BITS 5
+#define KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS 6
+
+/*
+ * Modes of RISC-V AIA device:
+ * 1) EMUL (aka Emulation): Trap-n-emulate IMSIC
+ * 2) HWACCEL (aka HW Acceleration): Virtualize IMSIC using IMSIC guest files
+ * 3) AUTO (aka Automatic): Virtualize IMSIC using IMSIC guest files whenever
+ * available otherwise fallback to trap-n-emulation
+ */
+#define KVM_DEV_RISCV_AIA_MODE_EMUL 0
+#define KVM_DEV_RISCV_AIA_MODE_HWACCEL 1
+#define KVM_DEV_RISCV_AIA_MODE_AUTO 2
+
+#define KVM_DEV_RISCV_AIA_IDS_MIN 63
+#define KVM_DEV_RISCV_AIA_IDS_MAX 2048
+#define KVM_DEV_RISCV_AIA_SRCS_MAX 1024
+#define KVM_DEV_RISCV_AIA_GROUP_BITS_MAX 8
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN 24
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX 56
+#define KVM_DEV_RISCV_AIA_HART_BITS_MAX 16
+#define KVM_DEV_RISCV_AIA_GUEST_BITS_MAX 8
+
+#define KVM_DEV_RISCV_AIA_GRP_ADDR 1
+#define KVM_DEV_RISCV_AIA_ADDR_APLIC 0
+#define KVM_DEV_RISCV_AIA_ADDR_IMSIC(__vcpu) (1 + (__vcpu))
+#define KVM_DEV_RISCV_AIA_ADDR_MAX \
+ (1 + KVM_DEV_RISCV_APLIC_MAX_HARTS)
+
+#define KVM_DEV_RISCV_AIA_GRP_CTRL 2
+#define KVM_DEV_RISCV_AIA_CTRL_INIT 0
+
+/*
+ * The device attribute type contains the memory mapped offset of the
+ * APLIC register (range 0x0000-0x3FFF) and it must be 4-byte aligned.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_APLIC 3
+
+/*
+ * The lower 12-bits of the device attribute type contains the iselect
+ * value of the IMSIC register (range 0x70-0xFF) whereas the higher order
+ * bits contains the VCPU id.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_IMSIC 4
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS 12
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK \
+ ((1U << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) - 1)
+#define KVM_DEV_RISCV_AIA_IMSIC_MKATTR(__vcpu, __isel) \
+ (((__vcpu) << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) | \
+ ((__isel) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK))
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(__attr) \
+ ((__attr) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK)
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(__attr) \
+ ((__attr) >> KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS)
+
+/* One single KVM irqchip, ie. the AIA */
+#define KVM_NR_IRQCHIPS 1
+
#endif
#endif /* __LINUX_KVM_RISCV_H */
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
index 8b8a8541673a..8c8712aa9551 100644
--- a/arch/riscv/include/uapi/asm/sigcontext.h
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -15,6 +15,8 @@
/* The size of END signal context header. */
#define END_HDR_SIZE 0x0
+#ifndef __ASSEMBLY__
+
struct __sc_riscv_v_state {
struct __riscv_v_ext_state v_state;
} __attribute__((aligned(16)));
@@ -33,4 +35,6 @@ struct sigcontext {
};
};
+#endif /*!__ASSEMBLY__*/
+
#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 23e533766a49..85bbce0f758c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -58,7 +58,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
if (cpuid_to_hartid_map(i) == hartid)
return i;
- pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
return -ENOENT;
}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index bb0b76e1a6d4..f4d6acb38dd0 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -238,10 +238,11 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;
- riscv_ipi_enable();
-
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
+
+ riscv_ipi_enable();
+
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
probe_vendor_features(curr_cpuid);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 5158961ea977..f910dfccbf5d 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -150,12 +150,18 @@ DO_ERROR_INFO(do_trap_insn_fault,
asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
{
+ bool handled;
+
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
local_irq_enable();
- if (!riscv_v_first_use_handler(regs))
+ handled = riscv_v_first_use_handler(regs);
+
+ local_irq_disable();
+
+ if (!handled)
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
"Oops - illegal instruction");
@@ -296,6 +302,8 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
regs->epc += 4;
regs->orig_a0 = regs->a0;
+ riscv_v_vstate_discard(regs);
+
syscall = syscall_enter_from_user_mode(regs, syscall);
if (syscall < NR_syscalls)
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 9a68e7eaae4d..2cf76218a5bd 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -15,6 +15,7 @@
#include <asm/vdso.h>
#include <linux/time_namespace.h>
#include <vdso/datapage.h>
+#include <vdso/vsyscall.h>
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index f9c8e19ab301..8d92fb6c522c 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -167,6 +167,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
return true;
}
riscv_v_vstate_on(regs);
+ riscv_v_vstate_restore(current, regs);
return true;
}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index eab9edc3b631..50767647fbc6 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -98,12 +98,6 @@ SECTIONS
__soc_builtin_dtb_table_end = .;
}
- . = ALIGN(8);
- .alternative : {
- __alt_start = .;
- *(.alternative)
- __alt_end = .;
- }
__init_end = .;
. = ALIGN(16);
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index e5f9f4677bbf..492dd4b8f3d6 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -85,11 +85,11 @@ SECTIONS
INIT_DATA_SECTION(16)
.init.pi : {
- *(.init.pi*)
+ KEEP(*(.init.pi*))
}
.init.bss : {
- *(.init.bss) /* from the EFI stub */
+ KEEP(*(.init.bss*)) /* from the EFI stub */
}
.exit.data :
{
@@ -112,7 +112,7 @@ SECTIONS
. = ALIGN(8);
.alternative : {
__alt_start = .;
- *(.alternative)
+ KEEP(*(.alternative))
__alt_end = .;
}
__init_end = .;
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index 28891e583259..dfc237d7875b 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -21,6 +21,10 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
depends on RISCV_SBI && MMU
select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_MSI
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 7b4c21f9aa6a..fee0671e2dc1 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -28,3 +28,6 @@ kvm-y += vcpu_sbi_hsm.o
kvm-y += vcpu_timer.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
kvm-y += aia.o
+kvm-y += aia_device.o
+kvm-y += aia_aplic.o
+kvm-y += aia_imsic.o
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 4f1286fc7f17..585a3b42c52c 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -8,11 +8,49 @@
*/
#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
#include <asm/hwcap.h>
+#include <asm/kvm_aia_imsic.h>
+struct aia_hgei_control {
+ raw_spinlock_t lock;
+ unsigned long free_bitmap;
+ struct kvm_vcpu *owners[BITS_PER_LONG];
+};
+static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
+static int hgei_parent_irq;
+
+unsigned int kvm_riscv_aia_nr_hgei;
+unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+static int aia_find_hgei(struct kvm_vcpu *owner)
+{
+ int i, hgei;
+ unsigned long flags;
+ struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ hgei = -1;
+ for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
+ if (hgctrl->owners[i] == owner) {
+ hgei = i;
+ break;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ put_cpu_ptr(&aia_hgei);
+ return hgei;
+}
+
static void aia_set_hvictl(bool ext_irq_pending)
{
unsigned long hvictl;
@@ -56,6 +94,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
+ int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
@@ -74,6 +113,10 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
+ hgei = aia_find_hgei(vcpu);
+ if (hgei > 0)
+ return !!(csr_read(CSR_HGEIP) & BIT(hgei));
+
return false;
}
@@ -323,8 +366,6 @@ static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
-#define IMSIC_FIRST 0x70
-#define IMSIC_LAST 0xff
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
@@ -348,6 +389,143 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
return KVM_INSN_EXIT_TO_USER_SPACE;
}
+int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
+ void __iomem **hgei_va, phys_addr_t *hgei_pa)
+{
+ int ret = -ENOENT;
+ unsigned long flags;
+ struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+
+ if (!kvm_riscv_aia_available() || !hgctrl)
+ return -ENODEV;
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ if (hgctrl->free_bitmap) {
+ ret = __ffs(hgctrl->free_bitmap);
+ hgctrl->free_bitmap &= ~BIT(ret);
+ hgctrl->owners[ret] = owner;
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ /* TODO: To be updated later by AIA IMSIC HW guest file support */
+ if (hgei_va)
+ *hgei_va = NULL;
+ if (hgei_pa)
+ *hgei_pa = 0;
+
+ return ret;
+}
+
+void kvm_riscv_aia_free_hgei(int cpu, int hgei)
+{
+ unsigned long flags;
+ struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+
+ if (!kvm_riscv_aia_available() || !hgctrl)
+ return;
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
+ if (!(hgctrl->free_bitmap & BIT(hgei))) {
+ hgctrl->free_bitmap |= BIT(hgei);
+ hgctrl->owners[hgei] = NULL;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+}
+
+void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
+{
+ int hgei;
+
+ if (!kvm_riscv_aia_available())
+ return;
+
+ hgei = aia_find_hgei(owner);
+ if (hgei > 0) {
+ if (enable)
+ csr_set(CSR_HGEIE, BIT(hgei));
+ else
+ csr_clear(CSR_HGEIE, BIT(hgei));
+ }
+}
+
+static irqreturn_t hgei_interrupt(int irq, void *dev_id)
+{
+ int i;
+ unsigned long hgei_mask, flags;
+ struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
+
+ hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
+ csr_clear(CSR_HGEIE, hgei_mask);
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
+ if (hgctrl->owners[i])
+ kvm_vcpu_kick(hgctrl->owners[i]);
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ put_cpu_ptr(&aia_hgei);
+ return IRQ_HANDLED;
+}
+
+static int aia_hgei_init(void)
+{
+ int cpu, rc;
+ struct irq_domain *domain;
+ struct aia_hgei_control *hgctrl;
+
+ /* Initialize per-CPU guest external interrupt line management */
+ for_each_possible_cpu(cpu) {
+ hgctrl = per_cpu_ptr(&aia_hgei, cpu);
+ raw_spin_lock_init(&hgctrl->lock);
+ if (kvm_riscv_aia_nr_hgei) {
+ hgctrl->free_bitmap =
+ BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
+ hgctrl->free_bitmap &= ~BIT(0);
+ } else
+ hgctrl->free_bitmap = 0;
+ }
+
+ /* Find INTC irq domain */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ DOMAIN_BUS_ANY);
+ if (!domain) {
+ kvm_err("unable to find INTC domain\n");
+ return -ENOENT;
+ }
+
+ /* Map per-CPU SGEI interrupt from INTC domain */
+ hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
+ if (!hgei_parent_irq) {
+ kvm_err("unable to map SGEI IRQ\n");
+ return -ENOMEM;
+ }
+
+ /* Request per-CPU SGEI interrupt */
+ rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
+ "riscv-kvm", &aia_hgei);
+ if (rc) {
+ kvm_err("failed to request SGEI IRQ\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void aia_hgei_exit(void)
+{
+ /* Free per-CPU SGEI interrupt */
+ free_percpu_irq(hgei_parent_irq, &aia_hgei);
+}
+
void kvm_riscv_aia_enable(void)
{
if (!kvm_riscv_aia_available())
@@ -362,21 +540,105 @@ void kvm_riscv_aia_enable(void)
csr_write(CSR_HVIPRIO1H, 0x0);
csr_write(CSR_HVIPRIO2H, 0x0);
#endif
+
+ /* Enable per-CPU SGEI interrupt */
+ enable_percpu_irq(hgei_parent_irq,
+ irq_get_trigger_type(hgei_parent_irq));
+ csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
}
void kvm_riscv_aia_disable(void)
{
+ int i;
+ unsigned long flags;
+ struct kvm_vcpu *vcpu;
+ struct aia_hgei_control *hgctrl;
+
if (!kvm_riscv_aia_available())
return;
+ hgctrl = get_cpu_ptr(&aia_hgei);
+
+ /* Disable per-CPU SGEI interrupt */
+ csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
+ disable_percpu_irq(hgei_parent_irq);
aia_set_hvictl(false);
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+
+ for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
+ vcpu = hgctrl->owners[i];
+ if (!vcpu)
+ continue;
+
+ /*
+ * We release hgctrl->lock before notifying IMSIC
+ * so that we don't have lock ordering issues.
+ */
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ /* Notify IMSIC */
+ kvm_riscv_vcpu_aia_imsic_release(vcpu);
+
+ /*
+ * Wakeup VCPU if it was blocked so that it can
+ * run on other HARTs
+ */
+ if (csr_read(CSR_HGEIE) & BIT(i)) {
+ csr_clear(CSR_HGEIE, BIT(i));
+ kvm_vcpu_kick(vcpu);
+ }
+
+ raw_spin_lock_irqsave(&hgctrl->lock, flags);
+ }
+
+ raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
+
+ put_cpu_ptr(&aia_hgei);
}
int kvm_riscv_aia_init(void)
{
+ int rc;
+
if (!riscv_isa_extension_available(NULL, SxAIA))
return -ENODEV;
+ /* Figure-out number of bits in HGEIE */
+ csr_write(CSR_HGEIE, -1UL);
+ kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
+ csr_write(CSR_HGEIE, 0);
+ if (kvm_riscv_aia_nr_hgei)
+ kvm_riscv_aia_nr_hgei--;
+
+ /*
+ * Number of usable HGEI lines should be minimum of per-HART
+ * IMSIC guest files and number of bits in HGEIE
+ *
+ * TODO: To be updated later by AIA IMSIC HW guest file support
+ */
+ kvm_riscv_aia_nr_hgei = 0;
+
+ /*
+ * Find number of guest MSI IDs
+ *
+ * TODO: To be updated later by AIA IMSIC HW guest file support
+ */
+ kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
+
+ /* Initialize guest external interrupt line management */
+ rc = aia_hgei_init();
+ if (rc)
+ return rc;
+
+ /* Register device operations */
+ rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
+ KVM_DEV_TYPE_RISCV_AIA);
+ if (rc) {
+ aia_hgei_exit();
+ return rc;
+ }
+
/* Enable KVM AIA support */
static_branch_enable(&kvm_riscv_aia_available);
@@ -385,4 +647,12 @@ int kvm_riscv_aia_init(void)
void kvm_riscv_aia_exit(void)
{
+ if (!kvm_riscv_aia_available())
+ return;
+
+ /* Unregister device operations */
+ kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
+
+ /* Cleanup the HGEI state */
+ aia_hgei_exit();
}
diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
new file mode 100644
index 000000000000..39e72aa016a4
--- /dev/null
+++ b/arch/riscv/kvm/aia_aplic.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/math.h>
+#include <linux/spinlock.h>
+#include <linux/swab.h>
+#include <kvm/iodev.h>
+#include <asm/kvm_aia_aplic.h>
+
+struct aplic_irq {
+ raw_spinlock_t lock;
+ u32 sourcecfg;
+ u32 state;
+#define APLIC_IRQ_STATE_PENDING BIT(0)
+#define APLIC_IRQ_STATE_ENABLED BIT(1)
+#define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \
+ APLIC_IRQ_STATE_ENABLED)
+#define APLIC_IRQ_STATE_INPUT BIT(8)
+ u32 target;
+};
+
+struct aplic {
+ struct kvm_io_device iodev;
+
+ u32 domaincfg;
+ u32 genmsi;
+
+ u32 nr_irqs;
+ u32 nr_words;
+ struct aplic_irq *irqs;
+};
+
+static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
+{
+ u32 ret;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return 0;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ ret = irqd->sourcecfg;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+}
+
+static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
+{
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return;
+ irqd = &aplic->irqs[irq];
+
+ if (val & APLIC_SOURCECFG_D)
+ val = 0;
+ else
+ val &= APLIC_SOURCECFG_SM_MASK;
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ irqd->sourcecfg = val;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+}
+
+static u32 aplic_read_target(struct aplic *aplic, u32 irq)
+{
+ u32 ret;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return 0;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ ret = irqd->target;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+}
+
+static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
+{
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return;
+ irqd = &aplic->irqs[irq];
+
+ val &= APLIC_TARGET_EIID_MASK |
+ (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
+ (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ irqd->target = val;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+}
+
+static bool aplic_read_pending(struct aplic *aplic, u32 irq)
+{
+ bool ret;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return false;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+}
+
+static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
+{
+ unsigned long flags, sm;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+
+ sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
+ if (!pending &&
+ ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
+ (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
+ goto skip_write_pending;
+
+ if (pending)
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ else
+ irqd->state &= ~APLIC_IRQ_STATE_PENDING;
+
+skip_write_pending:
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+}
+
+static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
+{
+ bool ret;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return false;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+}
+
+static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
+{
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ if (enabled)
+ irqd->state |= APLIC_IRQ_STATE_ENABLED;
+ else
+ irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+}
+
+static bool aplic_read_input(struct aplic *aplic, u32 irq)
+{
+ bool ret;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return false;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+ ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+}
+
+static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
+{
+ u32 hart_idx, guest_idx, eiid;
+
+ hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
+ hart_idx &= APLIC_TARGET_HART_IDX_MASK;
+ guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
+ guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
+ eiid = target & APLIC_TARGET_EIID_MASK;
+ kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
+}
+
+static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
+{
+ bool inject;
+ u32 irq, target;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+ struct aplic *aplic = kvm->arch.aia.aplic_state;
+
+ if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
+ return;
+
+ for (irq = first; irq <= last; irq++) {
+ if (!irq || aplic->nr_irqs <= irq)
+ continue;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+
+ inject = false;
+ target = irqd->target;
+ if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
+ APLIC_IRQ_STATE_ENPEND) {
+ irqd->state &= ~APLIC_IRQ_STATE_PENDING;
+ inject = true;
+ }
+
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ if (inject)
+ aplic_inject_msi(kvm, irq, target);
+ }
+}
+
+int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
+{
+ u32 target;
+ bool inject = false, ie;
+ unsigned long flags;
+ struct aplic_irq *irqd;
+ struct aplic *aplic = kvm->arch.aia.aplic_state;
+
+ if (!aplic || !source || (aplic->nr_irqs <= source))
+ return -ENODEV;
+ irqd = &aplic->irqs[source];
+ ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+
+ if (irqd->sourcecfg & APLIC_SOURCECFG_D)
+ goto skip_unlock;
+
+ switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
+ case APLIC_SOURCECFG_SM_EDGE_RISE:
+ if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ !(irqd->state & APLIC_IRQ_STATE_PENDING))
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ break;
+ case APLIC_SOURCECFG_SM_EDGE_FALL:
+ if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ !(irqd->state & APLIC_IRQ_STATE_PENDING))
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ break;
+ case APLIC_SOURCECFG_SM_LEVEL_HIGH:
+ if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ break;
+ case APLIC_SOURCECFG_SM_LEVEL_LOW:
+ if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ break;
+ }
+
+ if (level)
+ irqd->state |= APLIC_IRQ_STATE_INPUT;
+ else
+ irqd->state &= ~APLIC_IRQ_STATE_INPUT;
+
+ target = irqd->target;
+ if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
+ APLIC_IRQ_STATE_ENPEND)) {
+ irqd->state &= ~APLIC_IRQ_STATE_PENDING;
+ inject = true;
+ }
+
+skip_unlock:
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ if (inject)
+ aplic_inject_msi(kvm, source, target);
+
+ return 0;
+}
+
+static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
+{
+ u32 i, ret = 0;
+
+ for (i = 0; i < 32; i++)
+ ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
+
+ return ret;
+}
+
+static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
+{
+ u32 i, ret = 0;
+
+ for (i = 0; i < 32; i++)
+ ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
+
+ return ret;
+}
+
+static void aplic_write_pending_word(struct aplic *aplic, u32 word,
+ u32 val, bool pending)
+{
+ u32 i;
+
+ for (i = 0; i < 32; i++) {
+ if (val & BIT(i))
+ aplic_write_pending(aplic, word * 32 + i, pending);
+ }
+}
+
+static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
+{
+ u32 i, ret = 0;
+
+ for (i = 0; i < 32; i++)
+ ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
+
+ return ret;
+}
+
+static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
+ u32 val, bool enabled)
+{
+ u32 i;
+
+ for (i = 0; i < 32; i++) {
+ if (val & BIT(i))
+ aplic_write_enabled(aplic, word * 32 + i, enabled);
+ }
+}
+
+static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
+{
+ u32 i;
+ struct aplic *aplic = kvm->arch.aia.aplic_state;
+
+ if ((off & 0x3) != 0)
+ return -EOPNOTSUPP;
+
+ if (off == APLIC_DOMAINCFG) {
+ *val32 = APLIC_DOMAINCFG_RDONLY |
+ aplic->domaincfg | APLIC_DOMAINCFG_DM;
+ } else if ((off >= APLIC_SOURCECFG_BASE) &&
+ (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
+ i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
+ *val32 = aplic_read_sourcecfg(aplic, i);
+ } else if ((off >= APLIC_SETIP_BASE) &&
+ (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_SETIP_BASE) >> 2;
+ *val32 = aplic_read_pending_word(aplic, i);
+ } else if (off == APLIC_SETIPNUM) {
+ *val32 = 0;
+ } else if ((off >= APLIC_CLRIP_BASE) &&
+ (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_CLRIP_BASE) >> 2;
+ *val32 = aplic_read_input_word(aplic, i);
+ } else if (off == APLIC_CLRIPNUM) {
+ *val32 = 0;
+ } else if ((off >= APLIC_SETIE_BASE) &&
+ (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_SETIE_BASE) >> 2;
+ *val32 = aplic_read_enabled_word(aplic, i);
+ } else if (off == APLIC_SETIENUM) {
+ *val32 = 0;
+ } else if ((off >= APLIC_CLRIE_BASE) &&
+ (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
+ *val32 = 0;
+ } else if (off == APLIC_CLRIENUM) {
+ *val32 = 0;
+ } else if (off == APLIC_SETIPNUM_LE) {
+ *val32 = 0;
+ } else if (off == APLIC_SETIPNUM_BE) {
+ *val32 = 0;
+ } else if (off == APLIC_GENMSI) {
+ *val32 = aplic->genmsi;
+ } else if ((off >= APLIC_TARGET_BASE) &&
+ (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
+ i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
+ *val32 = aplic_read_target(aplic, i);
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ if (len != 4)
+ return -EOPNOTSUPP;
+
+ return aplic_mmio_read_offset(vcpu->kvm,
+ addr - vcpu->kvm->arch.aia.aplic_addr,
+ val);
+}
+
+static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
+{
+ u32 i;
+ struct aplic *aplic = kvm->arch.aia.aplic_state;
+
+ if ((off & 0x3) != 0)
+ return -EOPNOTSUPP;
+
+ if (off == APLIC_DOMAINCFG) {
+ /* Only IE bit writeable */
+ aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
+ } else if ((off >= APLIC_SOURCECFG_BASE) &&
+ (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
+ i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
+ aplic_write_sourcecfg(aplic, i, val32);
+ } else if ((off >= APLIC_SETIP_BASE) &&
+ (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_SETIP_BASE) >> 2;
+ aplic_write_pending_word(aplic, i, val32, true);
+ } else if (off == APLIC_SETIPNUM) {
+ aplic_write_pending(aplic, val32, true);
+ } else if ((off >= APLIC_CLRIP_BASE) &&
+ (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_CLRIP_BASE) >> 2;
+ aplic_write_pending_word(aplic, i, val32, false);
+ } else if (off == APLIC_CLRIPNUM) {
+ aplic_write_pending(aplic, val32, false);
+ } else if ((off >= APLIC_SETIE_BASE) &&
+ (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_SETIE_BASE) >> 2;
+ aplic_write_enabled_word(aplic, i, val32, true);
+ } else if (off == APLIC_SETIENUM) {
+ aplic_write_enabled(aplic, val32, true);
+ } else if ((off >= APLIC_CLRIE_BASE) &&
+ (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
+ i = (off - APLIC_CLRIE_BASE) >> 2;
+ aplic_write_enabled_word(aplic, i, val32, false);
+ } else if (off == APLIC_CLRIENUM) {
+ aplic_write_enabled(aplic, val32, false);
+ } else if (off == APLIC_SETIPNUM_LE) {
+ aplic_write_pending(aplic, val32, true);
+ } else if (off == APLIC_SETIPNUM_BE) {
+ aplic_write_pending(aplic, __swab32(val32), true);
+ } else if (off == APLIC_GENMSI) {
+ aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
+ APLIC_TARGET_GUEST_IDX_SHIFT);
+ kvm_riscv_aia_inject_msi_by_id(kvm,
+ val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
+ val32 & APLIC_TARGET_EIID_MASK);
+ } else if ((off >= APLIC_TARGET_BASE) &&
+ (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
+ i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
+ aplic_write_target(aplic, i, val32);
+ } else
+ return -ENODEV;
+
+ aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
+
+ return 0;
+}
+
+static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ if (len != 4)
+ return -EOPNOTSUPP;
+
+ return aplic_mmio_write_offset(vcpu->kvm,
+ addr - vcpu->kvm->arch.aia.aplic_addr,
+ *((const u32 *)val));
+}
+
+static struct kvm_io_device_ops aplic_iodoev_ops = {
+ .read = aplic_mmio_read,
+ .write = aplic_mmio_write,
+};
+
+int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
+{
+ int rc;
+
+ if (!kvm->arch.aia.aplic_state)
+ return -ENODEV;
+
+ rc = aplic_mmio_write_offset(kvm, type, v);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
+{
+ int rc;
+
+ if (!kvm->arch.aia.aplic_state)
+ return -ENODEV;
+
+ rc = aplic_mmio_read_offset(kvm, type, v);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
+{
+ int rc;
+ u32 val;
+
+ if (!kvm->arch.aia.aplic_state)
+ return -ENODEV;
+
+ rc = aplic_mmio_read_offset(kvm, type, &val);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int kvm_riscv_aia_aplic_init(struct kvm *kvm)
+{
+ int i, ret = 0;
+ struct aplic *aplic;
+
+ /* Do nothing if we have zero sources */
+ if (!kvm->arch.aia.nr_sources)
+ return 0;
+
+ /* Allocate APLIC global state */
+ aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
+ if (!aplic)
+ return -ENOMEM;
+ kvm->arch.aia.aplic_state = aplic;
+
+ /* Setup APLIC IRQs */
+ aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
+ aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
+ aplic->irqs = kcalloc(aplic->nr_irqs,
+ sizeof(*aplic->irqs), GFP_KERNEL);
+ if (!aplic->irqs) {
+ ret = -ENOMEM;
+ goto fail_free_aplic;
+ }
+ for (i = 0; i < aplic->nr_irqs; i++)
+ raw_spin_lock_init(&aplic->irqs[i].lock);
+
+ /* Setup IO device */
+ kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
+ kvm->arch.aia.aplic_addr,
+ KVM_DEV_RISCV_APLIC_SIZE,
+ &aplic->iodev);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret)
+ goto fail_free_aplic_irqs;
+
+ /* Setup default IRQ routing */
+ ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
+ if (ret)
+ goto fail_unreg_iodev;
+
+ return 0;
+
+fail_unreg_iodev:
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
+ mutex_unlock(&kvm->slots_lock);
+fail_free_aplic_irqs:
+ kfree(aplic->irqs);
+fail_free_aplic:
+ kvm->arch.aia.aplic_state = NULL;
+ kfree(aplic);
+ return ret;
+}
+
+void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
+{
+ struct aplic *aplic = kvm->arch.aia.aplic_state;
+
+ if (!aplic)
+ return;
+
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
+ mutex_unlock(&kvm->slots_lock);
+
+ kfree(aplic->irqs);
+
+ kvm->arch.aia.aplic_state = NULL;
+ kfree(aplic);
+}
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
new file mode 100644
index 000000000000..0eb689351b7d
--- /dev/null
+++ b/arch/riscv/kvm/aia_device.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_aia_imsic.h>
+
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+ struct kvm_vcpu *tmp_vcpu;
+
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+}
+
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+ unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *tmp_vcpu;
+ unsigned long c;
+
+ kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex)) {
+ unlock_vcpus(kvm, c - 1);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int aia_create(struct kvm_device *dev, u32 type)
+{
+ int ret;
+ unsigned long i;
+ struct kvm *kvm = dev->kvm;
+ struct kvm_vcpu *vcpu;
+
+ if (irqchip_in_kernel(kvm))
+ return -EEXIST;
+
+ ret = -EBUSY;
+ if (!lock_all_vcpus(kvm))
+ return ret;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.ran_atleast_once)
+ goto out_unlock;
+ }
+ ret = 0;
+
+ kvm->arch.aia.in_kernel = true;
+
+out_unlock:
+ unlock_all_vcpus(kvm);
+ return ret;
+}
+
+static void aia_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int aia_config(struct kvm *kvm, unsigned long type,
+ u32 *nr, bool write)
+{
+ struct kvm_aia *aia = &kvm->arch.aia;
+
+ /* Writes can only be done before irqchip is initialized */
+ if (write && kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ switch (type) {
+ case KVM_DEV_RISCV_AIA_CONFIG_MODE:
+ if (write) {
+ switch (*nr) {
+ case KVM_DEV_RISCV_AIA_MODE_EMUL:
+ break;
+ case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
+ case KVM_DEV_RISCV_AIA_MODE_AUTO:
+ /*
+ * HW Acceleration and Auto modes only
+ * supported on host with non-zero guest
+ * external interrupts (i.e. non-zero
+ * VS-level IMSIC pages).
+ */
+ if (!kvm_riscv_aia_nr_hgei)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ aia->mode = *nr;
+ } else
+ *nr = aia->mode;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_IDS:
+ if (write) {
+ if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
+ (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
+ ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
+ KVM_DEV_RISCV_AIA_IDS_MIN) ||
+ (kvm_riscv_aia_max_ids <= *nr))
+ return -EINVAL;
+ aia->nr_ids = *nr;
+ } else
+ *nr = aia->nr_ids;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
+ if (write) {
+ if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
+ (*nr >= kvm_riscv_aia_max_ids))
+ return -EINVAL;
+ aia->nr_sources = *nr;
+ } else
+ *nr = aia->nr_sources;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
+ if (write) {
+ if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
+ return -EINVAL;
+ aia->nr_group_bits = *nr;
+ } else
+ *nr = aia->nr_group_bits;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
+ if (write) {
+ if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
+ (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
+ return -EINVAL;
+ aia->nr_group_shift = *nr;
+ } else
+ *nr = aia->nr_group_shift;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
+ if (write) {
+ if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
+ return -EINVAL;
+ aia->nr_hart_bits = *nr;
+ } else
+ *nr = aia->nr_hart_bits;
+ break;
+ case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
+ if (write) {
+ if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
+ return -EINVAL;
+ aia->nr_guest_bits = *nr;
+ } else
+ *nr = aia->nr_guest_bits;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
+{
+ struct kvm_aia *aia = &kvm->arch.aia;
+
+ if (write) {
+ /* Writes can only be done before irqchip is initialized */
+ if (kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
+ return -EINVAL;
+
+ aia->aplic_addr = *addr;
+ } else
+ *addr = aia->aplic_addr;
+
+ return 0;
+}
+
+static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
+ unsigned long vcpu_idx, bool write)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vcpu_aia *vcpu_aia;
+
+ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ if (!vcpu)
+ return -EINVAL;
+ vcpu_aia = &vcpu->arch.aia_context;
+
+ if (write) {
+ /* Writes can only be done before irqchip is initialized */
+ if (kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcpu->mutex);
+ if (write)
+ vcpu_aia->imsic_addr = *addr;
+ else
+ *addr = vcpu_aia->imsic_addr;
+ mutex_unlock(&vcpu->mutex);
+
+ return 0;
+}
+
+static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
+{
+ u32 h, l;
+ gpa_t mask = 0;
+
+ h = aia->nr_hart_bits + aia->nr_guest_bits +
+ IMSIC_MMIO_PAGE_SHIFT - 1;
+ mask = GENMASK_ULL(h, 0);
+
+ if (aia->nr_group_bits) {
+ h = aia->nr_group_bits + aia->nr_group_shift - 1;
+ l = aia->nr_group_shift;
+ mask |= GENMASK_ULL(h, l);
+ }
+
+ return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
+}
+
+static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
+{
+ u32 hart, group = 0;
+
+ hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+ GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ if (aia->nr_group_bits)
+ group = (addr >> aia->nr_group_shift) &
+ GENMASK_ULL(aia->nr_group_bits - 1, 0);
+
+ return (group << aia->nr_hart_bits) | hart;
+}
+
+static int aia_init(struct kvm *kvm)
+{
+ int ret, i;
+ unsigned long idx;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vcpu_aia *vaia;
+ struct kvm_aia *aia = &kvm->arch.aia;
+ gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
+
+ /* Irqchip can be initialized only once */
+ if (kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ /* We might be in the middle of creating a VCPU? */
+ if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
+ return -EBUSY;
+
+ /* Number of sources should be less than or equals number of IDs */
+ if (aia->nr_ids < aia->nr_sources)
+ return -EINVAL;
+
+ /* APLIC base is required for non-zero number of sources */
+ if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
+ return -EINVAL;
+
+ /* Initialize APLIC */
+ ret = kvm_riscv_aia_aplic_init(kvm);
+ if (ret)
+ return ret;
+
+ /* Iterate over each VCPU */
+ kvm_for_each_vcpu(idx, vcpu, kvm) {
+ vaia = &vcpu->arch.aia_context;
+
+ /* IMSIC base is required */
+ if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
+ ret = -EINVAL;
+ goto fail_cleanup_imsics;
+ }
+
+ /* All IMSICs should have matching base PPN */
+ if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
+ base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
+ if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
+ ret = -EINVAL;
+ goto fail_cleanup_imsics;
+ }
+
+ /* Update HART index of the IMSIC based on IMSIC base */
+ vaia->hart_index = aia_imsic_hart_index(aia,
+ vaia->imsic_addr);
+
+ /* Initialize IMSIC for this VCPU */
+ ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
+ if (ret)
+ goto fail_cleanup_imsics;
+ }
+
+ /* Set the initialized flag */
+ kvm->arch.aia.initialized = true;
+
+ return 0;
+
+fail_cleanup_imsics:
+ for (i = idx - 1; i >= 0; i--) {
+ vcpu = kvm_get_vcpu(kvm, i);
+ if (!vcpu)
+ continue;
+ kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
+ }
+ kvm_riscv_aia_aplic_cleanup(kvm);
+ return ret;
+}
+
+static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ u32 nr;
+ u64 addr;
+ int nr_vcpus, r = -ENXIO;
+ unsigned long v, type = (unsigned long)attr->attr;
+ void __user *uaddr = (void __user *)(long)attr->addr;
+
+ switch (attr->group) {
+ case KVM_DEV_RISCV_AIA_GRP_CONFIG:
+ if (copy_from_user(&nr, uaddr, sizeof(nr)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = aia_config(dev->kvm, type, &nr, true);
+ mutex_unlock(&dev->kvm->lock);
+
+ break;
+
+ case KVM_DEV_RISCV_AIA_GRP_ADDR:
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
+ mutex_lock(&dev->kvm->lock);
+ if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
+ r = aia_aplic_addr(dev->kvm, &addr, true);
+ else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
+ r = aia_imsic_addr(dev->kvm, &addr,
+ type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
+ mutex_unlock(&dev->kvm->lock);
+
+ break;
+
+ case KVM_DEV_RISCV_AIA_GRP_CTRL:
+ switch (type) {
+ case KVM_DEV_RISCV_AIA_CTRL_INIT:
+ mutex_lock(&dev->kvm->lock);
+ r = aia_init(dev->kvm);
+ mutex_unlock(&dev->kvm->lock);
+ break;
+ }
+
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_APLIC:
+ if (copy_from_user(&nr, uaddr, sizeof(nr)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
+ mutex_unlock(&dev->kvm->lock);
+
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_IMSIC:
+ if (copy_from_user(&v, uaddr, sizeof(v)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
+ mutex_unlock(&dev->kvm->lock);
+
+ break;
+ }
+
+ return r;
+}
+
+static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ u32 nr;
+ u64 addr;
+ int nr_vcpus, r = -ENXIO;
+ void __user *uaddr = (void __user *)(long)attr->addr;
+ unsigned long v, type = (unsigned long)attr->attr;
+
+ switch (attr->group) {
+ case KVM_DEV_RISCV_AIA_GRP_CONFIG:
+ if (copy_from_user(&nr, uaddr, sizeof(nr)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = aia_config(dev->kvm, type, &nr, false);
+ mutex_unlock(&dev->kvm->lock);
+ if (r)
+ return r;
+
+ if (copy_to_user(uaddr, &nr, sizeof(nr)))
+ return -EFAULT;
+
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_ADDR:
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
+ mutex_lock(&dev->kvm->lock);
+ if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
+ r = aia_aplic_addr(dev->kvm, &addr, false);
+ else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
+ r = aia_imsic_addr(dev->kvm, &addr,
+ type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
+ mutex_unlock(&dev->kvm->lock);
+ if (r)
+ return r;
+
+ if (copy_to_user(uaddr, &addr, sizeof(addr)))
+ return -EFAULT;
+
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_APLIC:
+ if (copy_from_user(&nr, uaddr, sizeof(nr)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
+ mutex_unlock(&dev->kvm->lock);
+ if (r)
+ return r;
+
+ if (copy_to_user(uaddr, &nr, sizeof(nr)))
+ return -EFAULT;
+
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_IMSIC:
+ if (copy_from_user(&v, uaddr, sizeof(v)))
+ return -EFAULT;
+
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
+ mutex_unlock(&dev->kvm->lock);
+ if (r)
+ return r;
+
+ if (copy_to_user(uaddr, &v, sizeof(v)))
+ return -EFAULT;
+
+ break;
+ }
+
+ return r;
+}
+
+static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ int nr_vcpus;
+
+ switch (attr->group) {
+ case KVM_DEV_RISCV_AIA_GRP_CONFIG:
+ switch (attr->attr) {
+ case KVM_DEV_RISCV_AIA_CONFIG_MODE:
+ case KVM_DEV_RISCV_AIA_CONFIG_IDS:
+ case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
+ case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
+ case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
+ case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
+ case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
+ return 0;
+ }
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_ADDR:
+ nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
+ if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
+ return 0;
+ else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
+ return 0;
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_RISCV_AIA_CTRL_INIT:
+ return 0;
+ }
+ break;
+ case KVM_DEV_RISCV_AIA_GRP_APLIC:
+ return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
+ case KVM_DEV_RISCV_AIA_GRP_IMSIC:
+ return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
+ }
+
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_riscv_aia_device_ops = {
+ .name = "kvm-riscv-aia",
+ .create = aia_create,
+ .destroy = aia_destroy,
+ .set_attr = aia_set_attr,
+ .get_attr = aia_get_attr,
+ .has_attr = aia_has_attr,
+};
+
+int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
+{
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(vcpu->kvm))
+ return 1;
+
+ /* Update the IMSIC HW state before entering guest mode */
+ return kvm_riscv_vcpu_aia_imsic_update(vcpu);
+}
+
+void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ struct kvm_vcpu_aia_csr *reset_csr =
+ &vcpu->arch.aia_context.guest_reset_csr;
+
+ if (!kvm_riscv_aia_available())
+ return;
+ memcpy(csr, reset_csr, sizeof(*csr));
+
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(vcpu->kvm))
+ return;
+
+ /* Reset the IMSIC context */
+ kvm_riscv_vcpu_aia_imsic_reset(vcpu);
+}
+
+int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
+
+ if (!kvm_riscv_aia_available())
+ return 0;
+
+ /*
+ * We don't do any memory allocations over here because these
+ * will be done after AIA device is initialized by the user-space.
+ *
+ * Refer, aia_init() implementation for more details.
+ */
+
+ /* Initialize default values in AIA vcpu context */
+ vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
+ vaia->hart_index = vcpu->vcpu_idx;
+
+ return 0;
+}
+
+void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
+{
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(vcpu->kvm))
+ return;
+
+ /* Cleanup IMSIC context */
+ kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
+}
+
+int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
+ u32 guest_index, u32 iid)
+{
+ unsigned long idx;
+ struct kvm_vcpu *vcpu;
+
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ /* Inject MSI to matching VCPU */
+ kvm_for_each_vcpu(idx, vcpu, kvm) {
+ if (vcpu->arch.aia_context.hart_index == hart_index)
+ return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
+ guest_index,
+ 0, iid);
+ }
+
+ return 0;
+}
+
+int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+ gpa_t tppn, ippn;
+ unsigned long idx;
+ struct kvm_vcpu *vcpu;
+ u32 g, toff, iid = msi->data;
+ struct kvm_aia *aia = &kvm->arch.aia;
+ gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
+
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ /* Convert target address to target PPN */
+ tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
+
+ /* Extract and clear Guest ID from target PPN */
+ g = tppn & (BIT(aia->nr_guest_bits) - 1);
+ tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
+
+ /* Inject MSI to matching VCPU */
+ kvm_for_each_vcpu(idx, vcpu, kvm) {
+ ippn = vcpu->arch.aia_context.imsic_addr >>
+ IMSIC_MMIO_PAGE_SHIFT;
+ if (ippn == tppn) {
+ toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
+ return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
+ toff, iid);
+ }
+ }
+
+ return 0;
+}
+
+int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
+{
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(kvm))
+ return -EBUSY;
+
+ /* Inject interrupt level change in APLIC */
+ return kvm_riscv_aia_aplic_inject(kvm, irq, level);
+}
+
+void kvm_riscv_aia_init_vm(struct kvm *kvm)
+{
+ struct kvm_aia *aia = &kvm->arch.aia;
+
+ if (!kvm_riscv_aia_available())
+ return;
+
+ /*
+ * We don't do any memory allocations over here because these
+ * will be done after AIA device is initialized by the user-space.
+ *
+ * Refer, aia_init() implementation for more details.
+ */
+
+ /* Initialize default values in AIA global context */
+ aia->mode = (kvm_riscv_aia_nr_hgei) ?
+ KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
+ aia->nr_ids = kvm_riscv_aia_max_ids - 1;
+ aia->nr_sources = 0;
+ aia->nr_group_bits = 0;
+ aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
+ aia->nr_hart_bits = 0;
+ aia->nr_guest_bits = 0;
+ aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
+}
+
+void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
+{
+ /* Proceed only if AIA was initialized successfully */
+ if (!kvm_riscv_aia_initialized(kvm))
+ return;
+
+ /* Cleanup APLIC context */
+ kvm_riscv_aia_aplic_cleanup(kvm);
+}
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
new file mode 100644
index 000000000000..6cf23b8adb71
--- /dev/null
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -0,0 +1,1084 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/kvm_host.h>
+#include <linux/math.h>
+#include <linux/spinlock.h>
+#include <linux/swab.h>
+#include <kvm/iodev.h>
+#include <asm/csr.h>
+#include <asm/kvm_aia_imsic.h>
+
+#define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
+
+struct imsic_mrif_eix {
+ unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
+ unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
+};
+
+struct imsic_mrif {
+ struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
+ unsigned long eithreshold;
+ unsigned long eidelivery;
+};
+
+struct imsic {
+ struct kvm_io_device iodev;
+
+ u32 nr_msis;
+ u32 nr_eix;
+ u32 nr_hw_eix;
+
+ /*
+ * At any point in time, the register state is in
+ * one of the following places:
+ *
+ * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
+ * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
+ */
+
+ /* IMSIC VS-file */
+ rwlock_t vsfile_lock;
+ int vsfile_cpu;
+ int vsfile_hgei;
+ void __iomem *vsfile_va;
+ phys_addr_t vsfile_pa;
+
+ /* IMSIC SW-file */
+ struct imsic_mrif *swfile;
+ phys_addr_t swfile_pa;
+};
+
+#define imsic_vs_csr_read(__c) \
+({ \
+ unsigned long __r; \
+ csr_write(CSR_VSISELECT, __c); \
+ __r = csr_read(CSR_VSIREG); \
+ __r; \
+})
+
+#define imsic_read_switchcase(__ireg) \
+ case __ireg: \
+ return imsic_vs_csr_read(__ireg);
+#define imsic_read_switchcase_2(__ireg) \
+ imsic_read_switchcase(__ireg + 0) \
+ imsic_read_switchcase(__ireg + 1)
+#define imsic_read_switchcase_4(__ireg) \
+ imsic_read_switchcase_2(__ireg + 0) \
+ imsic_read_switchcase_2(__ireg + 2)
+#define imsic_read_switchcase_8(__ireg) \
+ imsic_read_switchcase_4(__ireg + 0) \
+ imsic_read_switchcase_4(__ireg + 4)
+#define imsic_read_switchcase_16(__ireg) \
+ imsic_read_switchcase_8(__ireg + 0) \
+ imsic_read_switchcase_8(__ireg + 8)
+#define imsic_read_switchcase_32(__ireg) \
+ imsic_read_switchcase_16(__ireg + 0) \
+ imsic_read_switchcase_16(__ireg + 16)
+#define imsic_read_switchcase_64(__ireg) \
+ imsic_read_switchcase_32(__ireg + 0) \
+ imsic_read_switchcase_32(__ireg + 32)
+
+static unsigned long imsic_eix_read(int ireg)
+{
+ switch (ireg) {
+ imsic_read_switchcase_64(IMSIC_EIP0)
+ imsic_read_switchcase_64(IMSIC_EIE0)
+ }
+
+ return 0;
+}
+
+#define imsic_vs_csr_swap(__c, __v) \
+({ \
+ unsigned long __r; \
+ csr_write(CSR_VSISELECT, __c); \
+ __r = csr_swap(CSR_VSIREG, __v); \
+ __r; \
+})
+
+#define imsic_swap_switchcase(__ireg, __v) \
+ case __ireg: \
+ return imsic_vs_csr_swap(__ireg, __v);
+#define imsic_swap_switchcase_2(__ireg, __v) \
+ imsic_swap_switchcase(__ireg + 0, __v) \
+ imsic_swap_switchcase(__ireg + 1, __v)
+#define imsic_swap_switchcase_4(__ireg, __v) \
+ imsic_swap_switchcase_2(__ireg + 0, __v) \
+ imsic_swap_switchcase_2(__ireg + 2, __v)
+#define imsic_swap_switchcase_8(__ireg, __v) \
+ imsic_swap_switchcase_4(__ireg + 0, __v) \
+ imsic_swap_switchcase_4(__ireg + 4, __v)
+#define imsic_swap_switchcase_16(__ireg, __v) \
+ imsic_swap_switchcase_8(__ireg + 0, __v) \
+ imsic_swap_switchcase_8(__ireg + 8, __v)
+#define imsic_swap_switchcase_32(__ireg, __v) \
+ imsic_swap_switchcase_16(__ireg + 0, __v) \
+ imsic_swap_switchcase_16(__ireg + 16, __v)
+#define imsic_swap_switchcase_64(__ireg, __v) \
+ imsic_swap_switchcase_32(__ireg + 0, __v) \
+ imsic_swap_switchcase_32(__ireg + 32, __v)
+
+static unsigned long imsic_eix_swap(int ireg, unsigned long val)
+{
+ switch (ireg) {
+ imsic_swap_switchcase_64(IMSIC_EIP0, val)
+ imsic_swap_switchcase_64(IMSIC_EIE0, val)
+ }
+
+ return 0;
+}
+
+#define imsic_vs_csr_write(__c, __v) \
+do { \
+ csr_write(CSR_VSISELECT, __c); \
+ csr_write(CSR_VSIREG, __v); \
+} while (0)
+
+#define imsic_write_switchcase(__ireg, __v) \
+ case __ireg: \
+ imsic_vs_csr_write(__ireg, __v); \
+ break;
+#define imsic_write_switchcase_2(__ireg, __v) \
+ imsic_write_switchcase(__ireg + 0, __v) \
+ imsic_write_switchcase(__ireg + 1, __v)
+#define imsic_write_switchcase_4(__ireg, __v) \
+ imsic_write_switchcase_2(__ireg + 0, __v) \
+ imsic_write_switchcase_2(__ireg + 2, __v)
+#define imsic_write_switchcase_8(__ireg, __v) \
+ imsic_write_switchcase_4(__ireg + 0, __v) \
+ imsic_write_switchcase_4(__ireg + 4, __v)
+#define imsic_write_switchcase_16(__ireg, __v) \
+ imsic_write_switchcase_8(__ireg + 0, __v) \
+ imsic_write_switchcase_8(__ireg + 8, __v)
+#define imsic_write_switchcase_32(__ireg, __v) \
+ imsic_write_switchcase_16(__ireg + 0, __v) \
+ imsic_write_switchcase_16(__ireg + 16, __v)
+#define imsic_write_switchcase_64(__ireg, __v) \
+ imsic_write_switchcase_32(__ireg + 0, __v) \
+ imsic_write_switchcase_32(__ireg + 32, __v)
+
+static void imsic_eix_write(int ireg, unsigned long val)
+{
+ switch (ireg) {
+ imsic_write_switchcase_64(IMSIC_EIP0, val)
+ imsic_write_switchcase_64(IMSIC_EIE0, val)
+ }
+}
+
+#define imsic_vs_csr_set(__c, __v) \
+do { \
+ csr_write(CSR_VSISELECT, __c); \
+ csr_set(CSR_VSIREG, __v); \
+} while (0)
+
+#define imsic_set_switchcase(__ireg, __v) \
+ case __ireg: \
+ imsic_vs_csr_set(__ireg, __v); \
+ break;
+#define imsic_set_switchcase_2(__ireg, __v) \
+ imsic_set_switchcase(__ireg + 0, __v) \
+ imsic_set_switchcase(__ireg + 1, __v)
+#define imsic_set_switchcase_4(__ireg, __v) \
+ imsic_set_switchcase_2(__ireg + 0, __v) \
+ imsic_set_switchcase_2(__ireg + 2, __v)
+#define imsic_set_switchcase_8(__ireg, __v) \
+ imsic_set_switchcase_4(__ireg + 0, __v) \
+ imsic_set_switchcase_4(__ireg + 4, __v)
+#define imsic_set_switchcase_16(__ireg, __v) \
+ imsic_set_switchcase_8(__ireg + 0, __v) \
+ imsic_set_switchcase_8(__ireg + 8, __v)
+#define imsic_set_switchcase_32(__ireg, __v) \
+ imsic_set_switchcase_16(__ireg + 0, __v) \
+ imsic_set_switchcase_16(__ireg + 16, __v)
+#define imsic_set_switchcase_64(__ireg, __v) \
+ imsic_set_switchcase_32(__ireg + 0, __v) \
+ imsic_set_switchcase_32(__ireg + 32, __v)
+
+static void imsic_eix_set(int ireg, unsigned long val)
+{
+ switch (ireg) {
+ imsic_set_switchcase_64(IMSIC_EIP0, val)
+ imsic_set_switchcase_64(IMSIC_EIE0, val)
+ }
+}
+
+static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
+ unsigned long *ptr,
+ unsigned long new_val,
+ unsigned long wr_mask)
+{
+ unsigned long old_val = 0, tmp = 0;
+
+ __asm__ __volatile__ (
+ "0: lr.w.aq %1, %0\n"
+ " and %2, %1, %3\n"
+ " or %2, %2, %4\n"
+ " sc.w.rl %2, %2, %0\n"
+ " bnez %2, 0b"
+ : "+A" (*ptr), "+r" (old_val), "+r" (tmp)
+ : "r" (~wr_mask), "r" (new_val & wr_mask)
+ : "memory");
+
+ return old_val;
+}
+
+static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
+ unsigned long *ptr,
+ unsigned long val)
+{
+ return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
+}
+
+#define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \
+ imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
+#define imsic_mrif_atomic_read(__mrif, __ptr) \
+ imsic_mrif_atomic_or(__mrif, __ptr, 0)
+
+static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
+{
+ struct imsic_mrif_eix *eix;
+ u32 i, imin, imax, ei, max_msi;
+ unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
+ unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
+ &mrif->eithreshold);
+
+ max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
+ eithreshold : nr_msis;
+ for (ei = 0; ei < nr_eix; ei++) {
+ eix = &mrif->eix[ei];
+ eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
+ imsic_mrif_atomic_read(mrif, &eix->eip[0]);
+#ifdef CONFIG_32BIT
+ eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
+ imsic_mrif_atomic_read(mrif, &eix->eip[1]);
+ if (!eipend[0] && !eipend[1])
+#else
+ if (!eipend[0])
+#endif
+ continue;
+
+ imin = ei * BITS_PER_TYPE(u64);
+ imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
+ imin + BITS_PER_TYPE(u64) : max_msi;
+ for (i = (!imin) ? 1 : imin; i < imax; i++) {
+ if (test_bit(i - imin, eipend))
+ return (i << TOPEI_ID_SHIFT) | i;
+ }
+ }
+
+ return 0;
+}
+
+static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
+{
+ u32 num = 0;
+
+ switch (isel) {
+ case IMSIC_EIDELIVERY:
+ case IMSIC_EITHRESHOLD:
+ break;
+ case IMSIC_EIP0 ... IMSIC_EIP63:
+ num = isel - IMSIC_EIP0;
+ break;
+ case IMSIC_EIE0 ... IMSIC_EIE63:
+ num = isel - IMSIC_EIE0;
+ break;
+ default:
+ return -ENOENT;
+ }
+#ifndef CONFIG_32BIT
+ if (num & 0x1)
+ return -EINVAL;
+#endif
+ if ((num / 2) >= nr_eix)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
+ unsigned long isel, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask)
+{
+ bool pend;
+ struct imsic_mrif_eix *eix;
+ unsigned long *ei, num, old_val = 0;
+
+ switch (isel) {
+ case IMSIC_EIDELIVERY:
+ old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
+ new_val, wr_mask & 0x1);
+ break;
+ case IMSIC_EITHRESHOLD:
+ old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
+ new_val, wr_mask & (IMSIC_MAX_ID - 1));
+ break;
+ case IMSIC_EIP0 ... IMSIC_EIP63:
+ case IMSIC_EIE0 ... IMSIC_EIE63:
+ if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
+ pend = true;
+ num = isel - IMSIC_EIP0;
+ } else {
+ pend = false;
+ num = isel - IMSIC_EIE0;
+ }
+
+ if ((num / 2) >= nr_eix)
+ return -EINVAL;
+ eix = &mrif->eix[num / 2];
+
+#ifndef CONFIG_32BIT
+ if (num & 0x1)
+ return -EINVAL;
+ ei = (pend) ? &eix->eip[0] : &eix->eie[0];
+#else
+ ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
+#endif
+
+ /* Bit0 of EIP0 or EIE0 is read-only */
+ if (!num)
+ wr_mask &= ~BIT(0);
+
+ old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (val)
+ *val = old_val;
+
+ return 0;
+}
+
+struct imsic_vsfile_read_data {
+ int hgei;
+ u32 nr_eix;
+ bool clear;
+ struct imsic_mrif *mrif;
+};
+
+static void imsic_vsfile_local_read(void *data)
+{
+ u32 i;
+ struct imsic_mrif_eix *eix;
+ struct imsic_vsfile_read_data *idata = data;
+ struct imsic_mrif *mrif = idata->mrif;
+ unsigned long new_hstatus, old_hstatus, old_vsiselect;
+
+ old_vsiselect = csr_read(CSR_VSISELECT);
+ old_hstatus = csr_read(CSR_HSTATUS);
+ new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
+ new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
+ csr_write(CSR_HSTATUS, new_hstatus);
+
+ /*
+ * We don't use imsic_mrif_atomic_xyz() functions to store
+ * values in MRIF because imsic_vsfile_read() is always called
+ * with pointer to temporary MRIF on stack.
+ */
+
+ if (idata->clear) {
+ mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
+ mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
+ for (i = 0; i < idata->nr_eix; i++) {
+ eix = &mrif->eix[i];
+ eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
+ eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
+#ifdef CONFIG_32BIT
+ eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
+ eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
+#endif
+ }
+ } else {
+ mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
+ mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
+ for (i = 0; i < idata->nr_eix; i++) {
+ eix = &mrif->eix[i];
+ eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
+ eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
+#ifdef CONFIG_32BIT
+ eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
+ eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
+#endif
+ }
+ }
+
+ csr_write(CSR_HSTATUS, old_hstatus);
+ csr_write(CSR_VSISELECT, old_vsiselect);
+}
+
+static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
+ bool clear, struct imsic_mrif *mrif)
+{
+ struct imsic_vsfile_read_data idata;
+
+ /* We can only read clear if we have a IMSIC VS-file */
+ if (vsfile_cpu < 0 || vsfile_hgei <= 0)
+ return;
+
+ /* We can only read clear on local CPU */
+ idata.hgei = vsfile_hgei;
+ idata.nr_eix = nr_eix;
+ idata.clear = clear;
+ idata.mrif = mrif;
+ on_each_cpu_mask(cpumask_of(vsfile_cpu),
+ imsic_vsfile_local_read, &idata, 1);
+}
+
+struct imsic_vsfile_rw_data {
+ int hgei;
+ int isel;
+ bool write;
+ unsigned long val;
+};
+
+static void imsic_vsfile_local_rw(void *data)
+{
+ struct imsic_vsfile_rw_data *idata = data;
+ unsigned long new_hstatus, old_hstatus, old_vsiselect;
+
+ old_vsiselect = csr_read(CSR_VSISELECT);
+ old_hstatus = csr_read(CSR_HSTATUS);
+ new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
+ new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
+ csr_write(CSR_HSTATUS, new_hstatus);
+
+ switch (idata->isel) {
+ case IMSIC_EIDELIVERY:
+ if (idata->write)
+ imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
+ else
+ idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
+ break;
+ case IMSIC_EITHRESHOLD:
+ if (idata->write)
+ imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
+ else
+ idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
+ break;
+ case IMSIC_EIP0 ... IMSIC_EIP63:
+ case IMSIC_EIE0 ... IMSIC_EIE63:
+#ifndef CONFIG_32BIT
+ if (idata->isel & 0x1)
+ break;
+#endif
+ if (idata->write)
+ imsic_eix_write(idata->isel, idata->val);
+ else
+ idata->val = imsic_eix_read(idata->isel);
+ break;
+ default:
+ break;
+ }
+
+ csr_write(CSR_HSTATUS, old_hstatus);
+ csr_write(CSR_VSISELECT, old_vsiselect);
+}
+
+static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
+ unsigned long isel, bool write,
+ unsigned long *val)
+{
+ int rc;
+ struct imsic_vsfile_rw_data rdata;
+
+ /* We can only access register if we have a IMSIC VS-file */
+ if (vsfile_cpu < 0 || vsfile_hgei <= 0)
+ return -EINVAL;
+
+ /* Check IMSIC register iselect */
+ rc = imsic_mrif_isel_check(nr_eix, isel);
+ if (rc)
+ return rc;
+
+ /* We can only access register on local CPU */
+ rdata.hgei = vsfile_hgei;
+ rdata.isel = isel;
+ rdata.write = write;
+ rdata.val = (write) ? *val : 0;
+ on_each_cpu_mask(cpumask_of(vsfile_cpu),
+ imsic_vsfile_local_rw, &rdata, 1);
+
+ if (!write)
+ *val = rdata.val;
+
+ return 0;
+}
+
+static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
+{
+ u32 i;
+ unsigned long new_hstatus, old_hstatus, old_vsiselect;
+
+ /* We can only zero-out if we have a IMSIC VS-file */
+ if (vsfile_hgei <= 0)
+ return;
+
+ old_vsiselect = csr_read(CSR_VSISELECT);
+ old_hstatus = csr_read(CSR_HSTATUS);
+ new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
+ new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
+ csr_write(CSR_HSTATUS, new_hstatus);
+
+ imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
+ imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
+ for (i = 0; i < nr_eix; i++) {
+ imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
+ imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
+#ifdef CONFIG_32BIT
+ imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
+ imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
+#endif
+ }
+
+ csr_write(CSR_HSTATUS, old_hstatus);
+ csr_write(CSR_VSISELECT, old_vsiselect);
+}
+
+static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
+ struct imsic_mrif *mrif)
+{
+ u32 i;
+ struct imsic_mrif_eix *eix;
+ unsigned long new_hstatus, old_hstatus, old_vsiselect;
+
+ /* We can only update if we have a HW IMSIC context */
+ if (vsfile_hgei <= 0)
+ return;
+
+ /*
+ * We don't use imsic_mrif_atomic_xyz() functions to read values
+ * from MRIF in this function because it is always called with
+ * pointer to temporary MRIF on stack.
+ */
+
+ old_vsiselect = csr_read(CSR_VSISELECT);
+ old_hstatus = csr_read(CSR_HSTATUS);
+ new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
+ new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
+ csr_write(CSR_HSTATUS, new_hstatus);
+
+ for (i = 0; i < nr_eix; i++) {
+ eix = &mrif->eix[i];
+ imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
+ imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
+#ifdef CONFIG_32BIT
+ imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
+ imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
+#endif
+ }
+ imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
+ imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
+
+ csr_write(CSR_HSTATUS, old_hstatus);
+ csr_write(CSR_VSISELECT, old_vsiselect);
+}
+
+static void imsic_vsfile_cleanup(struct imsic *imsic)
+{
+ int old_vsfile_hgei, old_vsfile_cpu;
+ unsigned long flags;
+
+ /*
+ * We don't use imsic_mrif_atomic_xyz() functions to clear the
+ * SW-file in this function because it is always called when the
+ * VCPU is being destroyed.
+ */
+
+ write_lock_irqsave(&imsic->vsfile_lock, flags);
+ old_vsfile_hgei = imsic->vsfile_hgei;
+ old_vsfile_cpu = imsic->vsfile_cpu;
+ imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
+ imsic->vsfile_va = NULL;
+ imsic->vsfile_pa = 0;
+ write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ memset(imsic->swfile, 0, sizeof(*imsic->swfile));
+
+ if (old_vsfile_cpu >= 0)
+ kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
+}
+
+static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ struct imsic_mrif *mrif = imsic->swfile;
+
+ if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
+ imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
+ kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
+ else
+ kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
+}
+
+static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
+ struct imsic_mrif *mrif)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ /*
+ * We don't use imsic_mrif_atomic_xyz() functions to read and
+ * write SW-file and MRIF in this function because it is always
+ * called when VCPU is not using SW-file and the MRIF points to
+ * a temporary MRIF on stack.
+ */
+
+ memcpy(mrif, imsic->swfile, sizeof(*mrif));
+ if (clear) {
+ memset(imsic->swfile, 0, sizeof(*imsic->swfile));
+ kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
+ }
+}
+
+static void imsic_swfile_update(struct kvm_vcpu *vcpu,
+ struct imsic_mrif *mrif)
+{
+ u32 i;
+ struct imsic_mrif_eix *seix, *eix;
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ struct imsic_mrif *smrif = imsic->swfile;
+
+ imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
+ imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
+ for (i = 0; i < imsic->nr_eix; i++) {
+ seix = &smrif->eix[i];
+ eix = &mrif->eix[i];
+ imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
+ imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
+#ifdef CONFIG_32BIT
+ imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
+ imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
+#endif
+ }
+
+ imsic_swfile_extirq_update(vcpu);
+}
+
+void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ struct imsic_mrif tmrif;
+ int old_vsfile_hgei, old_vsfile_cpu;
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ /* Read and clear IMSIC VS-file details */
+ write_lock_irqsave(&imsic->vsfile_lock, flags);
+ old_vsfile_hgei = imsic->vsfile_hgei;
+ old_vsfile_cpu = imsic->vsfile_cpu;
+ imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
+ imsic->vsfile_va = NULL;
+ imsic->vsfile_pa = 0;
+ write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ /* Do nothing, if no IMSIC VS-file to release */
+ if (old_vsfile_cpu < 0)
+ return;
+
+ /*
+ * At this point, all interrupt producers are still using
+ * the old IMSIC VS-file so we first re-direct all interrupt
+ * producers.
+ */
+
+ /* Purge the G-stage mapping */
+ kvm_riscv_gstage_iounmap(vcpu->kvm,
+ vcpu->arch.aia_context.imsic_addr,
+ IMSIC_MMIO_PAGE_SZ);
+
+ /* TODO: Purge the IOMMU mapping ??? */
+
+ /*
+ * At this point, all interrupt producers have been re-directed
+ * to somewhere else so we move register state from the old IMSIC
+ * VS-file to the IMSIC SW-file.
+ */
+
+ /* Read and clear register state from old IMSIC VS-file */
+ memset(&tmrif, 0, sizeof(tmrif));
+ imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
+ true, &tmrif);
+
+ /* Update register state in IMSIC SW-file */
+ imsic_swfile_update(vcpu, &tmrif);
+
+ /* Free-up old IMSIC VS-file */
+ kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
+}
+
+int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ phys_addr_t new_vsfile_pa;
+ struct imsic_mrif tmrif;
+ void __iomem *new_vsfile_va;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_run *run = vcpu->run;
+ struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
+ struct imsic *imsic = vaia->imsic_state;
+ int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
+
+ /* Do nothing for emulation mode */
+ if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
+ return 1;
+
+ /* Read old IMSIC VS-file details */
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+ old_vsfile_hgei = imsic->vsfile_hgei;
+ old_vsfile_cpu = imsic->vsfile_cpu;
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ /* Do nothing if we are continuing on same CPU */
+ if (old_vsfile_cpu == vcpu->cpu)
+ return 1;
+
+ /* Allocate new IMSIC VS-file */
+ ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
+ &new_vsfile_va, &new_vsfile_pa);
+ if (ret <= 0) {
+ /* For HW acceleration mode, we can't continue */
+ if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
+ run->fail_entry.hardware_entry_failure_reason =
+ CSR_HSTATUS;
+ run->fail_entry.cpu = vcpu->cpu;
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ return 0;
+ }
+
+ /* Release old IMSIC VS-file */
+ if (old_vsfile_cpu >= 0)
+ kvm_riscv_vcpu_aia_imsic_release(vcpu);
+
+ /* For automatic mode, we continue */
+ goto done;
+ }
+ new_vsfile_hgei = ret;
+
+ /*
+ * At this point, all interrupt producers are still using
+ * to the old IMSIC VS-file so we first move all interrupt
+ * producers to the new IMSIC VS-file.
+ */
+
+ /* Zero-out new IMSIC VS-file */
+ imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
+
+ /* Update G-stage mapping for the new IMSIC VS-file */
+ ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
+ new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
+ true, true);
+ if (ret)
+ goto fail_free_vsfile_hgei;
+
+ /* TODO: Update the IOMMU mapping ??? */
+
+ /* Update new IMSIC VS-file details in IMSIC context */
+ write_lock_irqsave(&imsic->vsfile_lock, flags);
+ imsic->vsfile_hgei = new_vsfile_hgei;
+ imsic->vsfile_cpu = vcpu->cpu;
+ imsic->vsfile_va = new_vsfile_va;
+ imsic->vsfile_pa = new_vsfile_pa;
+ write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ /*
+ * At this point, all interrupt producers have been moved
+ * to the new IMSIC VS-file so we move register state from
+ * the old IMSIC VS/SW-file to the new IMSIC VS-file.
+ */
+
+ memset(&tmrif, 0, sizeof(tmrif));
+ if (old_vsfile_cpu >= 0) {
+ /* Read and clear register state from old IMSIC VS-file */
+ imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
+ imsic->nr_hw_eix, true, &tmrif);
+
+ /* Free-up old IMSIC VS-file */
+ kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
+ } else {
+ /* Read and clear register state from IMSIC SW-file */
+ imsic_swfile_read(vcpu, true, &tmrif);
+ }
+
+ /* Restore register state in the new IMSIC VS-file */
+ imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
+
+done:
+ /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
+ vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
+ if (new_vsfile_hgei > 0)
+ vcpu->arch.guest_context.hstatus |=
+ ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
+
+ /* Continue run-loop */
+ return 1;
+
+fail_free_vsfile_hgei:
+ kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
+ return ret;
+}
+
+int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask)
+{
+ u32 topei;
+ struct imsic_mrif_eix *eix;
+ int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
+ /* Read pending and enabled interrupt with highest priority */
+ topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
+ imsic->nr_msis);
+ if (val)
+ *val = topei;
+
+ /* Writes ignore value and clear top pending interrupt */
+ if (topei && wr_mask) {
+ topei >>= TOPEI_ID_SHIFT;
+ if (topei) {
+ eix = &imsic->swfile->eix[topei /
+ BITS_PER_TYPE(u64)];
+ clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
+ eix->eip);
+ }
+ }
+ } else {
+ r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
+ val, new_val, wr_mask);
+ /* Forward unknown IMSIC register to user-space */
+ if (r)
+ rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
+ }
+
+ if (wr_mask)
+ imsic_swfile_extirq_update(vcpu);
+
+ return rc;
+}
+
+int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
+ bool write, unsigned long *val)
+{
+ u32 isel, vcpu_id;
+ unsigned long flags;
+ struct imsic *imsic;
+ struct kvm_vcpu *vcpu;
+ int rc, vsfile_hgei, vsfile_cpu;
+
+ if (!kvm_riscv_aia_initialized(kvm))
+ return -ENODEV;
+
+ vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
+ if (!vcpu)
+ return -ENODEV;
+
+ isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
+ imsic = vcpu->arch.aia_context.imsic_state;
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+
+ rc = 0;
+ vsfile_hgei = imsic->vsfile_hgei;
+ vsfile_cpu = imsic->vsfile_cpu;
+ if (vsfile_cpu < 0) {
+ if (write) {
+ rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
+ isel, NULL, *val, -1UL);
+ imsic_swfile_extirq_update(vcpu);
+ } else
+ rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
+ isel, val, 0, 0);
+ }
+
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ if (!rc && vsfile_cpu >= 0)
+ rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
+ isel, write, val);
+
+ return rc;
+}
+
+int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
+{
+ u32 isel, vcpu_id;
+ struct imsic *imsic;
+ struct kvm_vcpu *vcpu;
+
+ if (!kvm_riscv_aia_initialized(kvm))
+ return -ENODEV;
+
+ vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
+ if (!vcpu)
+ return -ENODEV;
+
+ isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
+ imsic = vcpu->arch.aia_context.imsic_state;
+ return imsic_mrif_isel_check(imsic->nr_eix, isel);
+}
+
+void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ if (!imsic)
+ return;
+
+ kvm_riscv_vcpu_aia_imsic_release(vcpu);
+
+ memset(imsic->swfile, 0, sizeof(*imsic->swfile));
+}
+
+int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
+ u32 guest_index, u32 offset, u32 iid)
+{
+ unsigned long flags;
+ struct imsic_mrif_eix *eix;
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ /* We only emulate one IMSIC MMIO page for each Guest VCPU */
+ if (!imsic || !iid || guest_index ||
+ (offset != IMSIC_MMIO_SETIPNUM_LE &&
+ offset != IMSIC_MMIO_SETIPNUM_BE))
+ return -ENODEV;
+
+ iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
+ if (imsic->nr_msis <= iid)
+ return -EINVAL;
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+
+ if (imsic->vsfile_cpu >= 0) {
+ writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
+ kvm_vcpu_kick(vcpu);
+ } else {
+ eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
+ set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
+ imsic_swfile_extirq_update(vcpu);
+ }
+
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ return 0;
+}
+
+static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ if (len != 4 || (addr & 0x3) != 0)
+ return -EOPNOTSUPP;
+
+ *((u32 *)val) = 0;
+
+ return 0;
+}
+
+static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ struct kvm_msi msi = { 0 };
+
+ if (len != 4 || (addr & 0x3) != 0)
+ return -EOPNOTSUPP;
+
+ msi.address_hi = addr >> 32;
+ msi.address_lo = (u32)addr;
+ msi.data = *((const u32 *)val);
+ kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
+
+ return 0;
+};
+
+static struct kvm_io_device_ops imsic_iodoev_ops = {
+ .read = imsic_mmio_read,
+ .write = imsic_mmio_write,
+};
+
+int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ struct imsic *imsic;
+ struct page *swfile_page;
+ struct kvm *kvm = vcpu->kvm;
+
+ /* Fail if we have zero IDs */
+ if (!kvm->arch.aia.nr_ids)
+ return -EINVAL;
+
+ /* Allocate IMSIC context */
+ imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
+ if (!imsic)
+ return -ENOMEM;
+ vcpu->arch.aia_context.imsic_state = imsic;
+
+ /* Setup IMSIC context */
+ imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
+ rwlock_init(&imsic->vsfile_lock);
+ imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
+ imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
+ imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
+
+ /* Setup IMSIC SW-file */
+ swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(sizeof(*imsic->swfile)));
+ if (!swfile_page) {
+ ret = -ENOMEM;
+ goto fail_free_imsic;
+ }
+ imsic->swfile = page_to_virt(swfile_page);
+ imsic->swfile_pa = page_to_phys(swfile_page);
+
+ /* Setup IO device */
+ kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
+ vcpu->arch.aia_context.imsic_addr,
+ KVM_DEV_RISCV_IMSIC_SIZE,
+ &imsic->iodev);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret)
+ goto fail_free_swfile;
+
+ return 0;
+
+fail_free_swfile:
+ free_pages((unsigned long)imsic->swfile,
+ get_order(sizeof(*imsic->swfile)));
+fail_free_imsic:
+ vcpu->arch.aia_context.imsic_state = NULL;
+ kfree(imsic);
+ return ret;
+}
+
+void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+
+ if (!imsic)
+ return;
+
+ imsic_vsfile_cleanup(imsic);
+
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
+ mutex_unlock(&kvm->slots_lock);
+
+ free_pages((unsigned long)imsic->swfile,
+ get_order(sizeof(*imsic->swfile)));
+
+ vcpu->arch.aia_context.imsic_state = NULL;
+ kfree(imsic);
+}
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index a7112d583637..48ae0d4b3932 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -116,7 +116,8 @@ static int __init riscv_kvm_init(void)
kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
if (kvm_riscv_aia_available())
- kvm_info("AIA available\n");
+ kvm_info("AIA available with %d guest external interrupts\n",
+ kvm_riscv_aia_nr_hgei);
rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (rc) {
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 0e5479600695..44bc324aeeb0 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -296,7 +296,7 @@ static void make_xfence_request(struct kvm *kvm,
unsigned int actual_req = req;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
- bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS);
+ bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (hbase != -1UL) {
if (vcpu->vcpu_id < hbase)
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index de24127e7e93..d12ef99901fc 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -64,6 +64,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
+ KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
@@ -107,6 +108,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
+ case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZBB:
return false;
@@ -263,10 +265,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_aia_wakeon_hgei(vcpu, true);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_aia_wakeon_hgei(vcpu, false);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 4ea101a73d8b..2415722c01b8 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -183,6 +183,8 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (trap->scause) {
case EXC_INST_ILLEGAL:
+ case EXC_LOAD_MISALIGNED:
+ case EXC_STORE_MISALIGNED:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
kvm_riscv_vcpu_trap_redirect(vcpu, trap);
ret = 1;
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index e52fde504433..7b46e04fb667 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -20,9 +20,7 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
};
#endif
-#ifdef CONFIG_RISCV_PMU_SBI
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
-#else
+#ifndef CONFIG_RISCV_PMU_SBI
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
.extid_start = -1UL,
.extid_end = -1UL,
@@ -31,49 +29,49 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
#endif
struct kvm_riscv_sbi_extension_entry {
- enum KVM_RISCV_SBI_EXT_ID dis_idx;
+ enum KVM_RISCV_SBI_EXT_ID ext_idx;
const struct kvm_vcpu_sbi_extension *ext_ptr;
};
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
{
- .dis_idx = KVM_RISCV_SBI_EXT_V01,
+ .ext_idx = KVM_RISCV_SBI_EXT_V01,
.ext_ptr = &vcpu_sbi_ext_v01,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
+ .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
.ext_ptr = &vcpu_sbi_ext_base,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_TIME,
+ .ext_idx = KVM_RISCV_SBI_EXT_TIME,
.ext_ptr = &vcpu_sbi_ext_time,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_IPI,
+ .ext_idx = KVM_RISCV_SBI_EXT_IPI,
.ext_ptr = &vcpu_sbi_ext_ipi,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_RFENCE,
+ .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
.ext_ptr = &vcpu_sbi_ext_rfence,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_SRST,
+ .ext_idx = KVM_RISCV_SBI_EXT_SRST,
.ext_ptr = &vcpu_sbi_ext_srst,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_HSM,
+ .ext_idx = KVM_RISCV_SBI_EXT_HSM,
.ext_ptr = &vcpu_sbi_ext_hsm,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_PMU,
+ .ext_idx = KVM_RISCV_SBI_EXT_PMU,
.ext_ptr = &vcpu_sbi_ext_pmu,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+ .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_ptr = &vcpu_sbi_ext_experimental,
},
{
- .dis_idx = KVM_RISCV_SBI_EXT_VENDOR,
+ .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_ptr = &vcpu_sbi_ext_vendor,
},
};
@@ -147,7 +145,7 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
- if (sbi_ext[i].dis_idx == reg_num) {
+ if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
@@ -155,7 +153,15 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
if (!sext)
return -ENOENT;
- scontext->extension_disabled[sext->dis_idx] = !reg_val;
+ /*
+ * We can't set the extension status to available here, since it may
+ * have a probe() function which needs to confirm availability first,
+ * but it may be too early to call that here. We can set the status to
+ * unavailable, though.
+ */
+ if (!reg_val)
+ scontext->ext_status[sext->ext_idx] =
+ KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
@@ -172,7 +178,7 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
- if (sbi_ext[i].dis_idx == reg_num) {
+ if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
@@ -180,7 +186,15 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
if (!sext)
return -ENOENT;
- *reg_val = !scontext->extension_disabled[sext->dis_idx];
+ /*
+ * If the extension status is still uninitialized, then we should probe
+ * to determine if it's available, but it may be too early to do that
+ * here. The best we can do is report that the extension has not been
+ * disabled, i.e. we return 1 when the extension is available and also
+ * when it only may be available.
+ */
+ *reg_val = scontext->ext_status[sext->ext_idx] !=
+ KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
@@ -307,18 +321,32 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid)
{
- int i;
- const struct kvm_riscv_sbi_extension_entry *sext;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ const struct kvm_riscv_sbi_extension_entry *entry;
+ const struct kvm_vcpu_sbi_extension *ext;
+ int i;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
- sext = &sbi_ext[i];
- if (sext->ext_ptr->extid_start <= extid &&
- sext->ext_ptr->extid_end >= extid) {
- if (sext->dis_idx < KVM_RISCV_SBI_EXT_MAX &&
- scontext->extension_disabled[sext->dis_idx])
+ entry = &sbi_ext[i];
+ ext = entry->ext_ptr;
+
+ if (ext->extid_start <= extid && ext->extid_end >= extid) {
+ if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
+ scontext->ext_status[entry->ext_idx] ==
+ KVM_RISCV_SBI_EXT_AVAILABLE)
+ return ext;
+ if (scontext->ext_status[entry->ext_idx] ==
+ KVM_RISCV_SBI_EXT_UNAVAILABLE)
return NULL;
- return sbi_ext[i].ext_ptr;
+ if (ext->probe && !ext->probe(vcpu)) {
+ scontext->ext_status[entry->ext_idx] =
+ KVM_RISCV_SBI_EXT_UNAVAILABLE;
+ return NULL;
+ }
+
+ scontext->ext_status[entry->ext_idx] =
+ KVM_RISCV_SBI_EXT_AVAILABLE;
+ return ext;
}
}
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index 6ef15f78e80f..7e2b50c692c1 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -55,11 +55,129 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_riscv_aia_destroy_vm(kvm);
}
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
+ bool line_status)
+{
+ if (!irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+ return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
+}
+
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ struct kvm_msi msi;
+
+ if (!level)
+ return -1;
+
+ msi.address_lo = e->msi.address_lo;
+ msi.address_hi = e->msi.address_hi;
+ msi.data = e->msi.data;
+ msi.flags = e->msi.flags;
+ msi.devid = e->msi.devid;
+
+ return kvm_riscv_aia_inject_msi(kvm, &msi);
+}
+
+static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
+}
+
+int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
+{
+ struct kvm_irq_routing_entry *ents;
+ int i, rc;
+
+ ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
+ if (!ents)
+ return -ENOMEM;
+
+ for (i = 0; i < lines; i++) {
+ ents[i].gsi = i;
+ ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
+ ents[i].u.irqchip.irqchip = 0;
+ ents[i].u.irqchip.pin = i;
+ }
+ rc = kvm_set_irq_routing(kvm, ents, lines, 0);
+ kfree(ents);
+
+ return rc;
+}
+
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm);
+}
+
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ int r = -EINVAL;
+
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ e->set = kvm_riscv_set_irq;
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ e->irqchip.pin = ue->u.irqchip.pin;
+ if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
+ (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
+ goto out;
+ break;
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+ e->msi.flags = ue->flags;
+ e->msi.devid = ue->u.msi.devid;
+ break;
+ default:
+ goto out;
+ }
+ r = 0;
+out:
+ return r;
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ if (!level)
+ return -EWOULDBLOCK;
+
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_MSI:
+ return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
+
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ return kvm_riscv_set_irq(e, kvm, irq_source_id,
+ level, line_status);
+ }
+
+ return -EWOULDBLOCK;
+}
+
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm);
+}
+
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ r = kvm_riscv_aia_available();
+ break;
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index fca532ddf3ec..fbc59b3f69f2 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -104,9 +104,9 @@ EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
unsigned int riscv_cboz_block_size;
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
-static void cbo_get_block_size(struct device_node *node,
- const char *name, u32 *block_size,
- unsigned long *first_hartid)
+static void __init cbo_get_block_size(struct device_node *node,
+ const char *name, u32 *block_size,
+ unsigned long *first_hartid)
{
unsigned long hartid;
u32 val;
@@ -126,7 +126,7 @@ static void cbo_get_block_size(struct device_node *node,
}
}
-void riscv_init_cbo_blocksizes(void)
+void __init riscv_init_cbo_blocksizes(void)
{
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index d919efab6eba..d51a75864e53 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -10,7 +10,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
-static bool noncoherent_supported;
+static bool noncoherent_supported __ro_after_init;
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 542883b3b49b..96225a8533ad 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -73,7 +73,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}
out:
- WARN_ON_ONCE(pte && pte_present(*pte) && !pte_huge(*pte));
+ if (pte) {
+ pte_t pteval = ptep_get_lockless(pte);
+
+ WARN_ON_ONCE(pte_present(pteval) && !pte_huge(pteval));
+ }
return pte;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 3b1e927a06b7..70fb31960b63 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -267,7 +267,6 @@ static void __init setup_bootmem(void)
dma_contiguous_reserve(dma32_phys_limit);
if (IS_ENABLED(CONFIG_64BIT))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
- memblock_allow_resize();
}
#ifdef CONFIG_MMU
@@ -357,7 +356,7 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
+ BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}
@@ -440,7 +439,7 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
+ BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}
@@ -1370,6 +1369,9 @@ void __init paging_init(void)
{
setup_bootmem();
setup_vm_final();
+
+ /* Depend on that Linear Mapping is ready */
+ memblock_allow_resize();
}
void __init misc_mem_init(void)
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index ed646c583e4f..5ed242897b0d 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -27,6 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbac
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
+KBUILD_CFLAGS_DECOMPRESSOR += -fPIE
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b07b0610950e..bbefe5e86bdf 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -26,10 +26,10 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/appldata.h>
#include <asm/vtimer.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/smp.h>
#include "appldata.h"
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 21c3147bd92a..fc608f9b79ab 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -15,7 +15,7 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "appldata.h"
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 3f79b9efb803..637c29c3f6e3 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -67,7 +67,7 @@ ipl_start:
jz .Lagain1 # skip dataset header
larl %r13,.L_eof
clc 0(3,%r4),0(%r13) # if it is EOFx
- jz .Lagain1 # skip dateset trailer
+ jz .Lagain1 # skip data set trailer
lgr %r5,%r2
la %r6,COMMAND_LINE-PARMAREA(%r12)
lgr %r7,%r2
@@ -185,19 +185,19 @@ ipl_start:
larl %r13,.Lcrash
lpsw 0(%r13)
- .align 8
+ .balign 8
.Lwaitpsw:
.quad 0x0202000180000000,.Lioint
.Lnewpswmask:
.quad 0x0000000180000000
- .align 8
+ .balign 8
.Lorb: .long 0x00000000,0x0080ff00,.Lccws
.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
- .align 8
+ .balign 8
.Lcr6: .quad 0x00000000ff000000
- .align 8
+ .balign 8
.Lcrash:.long 0x000a0000,0x00000000
- .align 8
+ .balign 8
.Lccws: .rept 19
.long 0x02600050,0x00000000
.endr
@@ -207,7 +207,7 @@ ipl_start:
.byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
.L_eof: .long 0xc5d6c600 /* C'EOF' */
.L_hdr: .long 0xc8c4d900 /* C'HDR' */
- .align 8
+ .balign 8
.Lcpuid:.fill 8,1,0
#
@@ -265,7 +265,7 @@ SYM_CODE_START_LOCAL(startup_normal)
brasl %r14,startup_kernel
SYM_CODE_END(startup_normal)
- .align 8
+ .balign 8
6: .long 0x7fffffff,0xffffffff
.Lext_new_psw:
.quad 0x0002000180000000,0x1b0 # disabled wait
diff --git a/arch/s390/boot/head_kdump.S b/arch/s390/boot/head_kdump.S
index f015469e7db9..f7107c76258c 100644
--- a/arch/s390/boot/head_kdump.S
+++ b/arch/s390/boot/head_kdump.S
@@ -82,12 +82,12 @@ SYM_CODE_START_LOCAL(startup_kdump)
#
# Startup of kdump (relocated new kernel)
#
-.align 2
+ .balign 2
startup_kdump_relocated:
basr %r13,0
0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
SYM_CODE_END(startup_kdump)
-.align 8
+ .balign 8
.Lrestart_psw:
.quad 0x0000000080000000,0x0000000000000000 + startup
#else
@@ -95,7 +95,7 @@ SYM_CODE_START_LOCAL(startup_kdump)
larl %r13,startup_kdump_crash
lpswe 0(%r13)
SYM_CODE_END(startup_kdump)
-.align 8
+ .balign 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 0a077c0a2056..1e66d2cbb096 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -47,6 +47,10 @@ void uv_query_info(void)
uv_info.conf_dump_finalize_len = uvcb.conf_dump_finalize_len;
uv_info.supp_att_req_hdr_ver = uvcb.supp_att_req_hdr_ver;
uv_info.supp_att_pflags = uvcb.supp_att_pflags;
+ uv_info.supp_add_secret_req_ver = uvcb.supp_add_secret_req_ver;
+ uv_info.supp_add_secret_pcf = uvcb.supp_add_secret_pcf;
+ uv_info.supp_secret_types = uvcb.supp_secret_types;
+ uv_info.max_secrets = uvcb.max_secrets;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 6ea17628ea10..34ee47926891 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -48,7 +48,7 @@
*
* Note that the constant definitions below are extended in order to compute
* intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
- * The righmost doubleword can be 0 to prevent contribution to the result or
+ * The rightmost doubleword can be 0 to prevent contribution to the result or
* can be multiplied by 1 to perform an XOR without the need for a separate
* VECTOR EXCLUSIVE OR instruction.
*
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index d5d967166bac..40c2b82f083a 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -333,7 +333,7 @@ union ap_qact_ap_info {
};
/**
- * ap_qact(): Query AP combatibility type.
+ * ap_qact(): Query AP compatibility type.
* @qid: The AP queue number
* @apinfo: On input the info about the AP queue. On output the
* alternate AP queue info provided by the qact function
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index c5bd9f4437e5..f2240392c708 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -8,8 +8,8 @@
#ifndef _ASM_S390_APPLDATA_H
#define _ASM_S390_APPLDATA_H
+#include <linux/io.h>
#include <asm/diag.h>
-#include <asm/io.h>
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index 55a02a153dfc..e6532477f126 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -25,7 +25,7 @@
#define __EX_TABLE(_section, _fault, _target, _type) \
stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
+ stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
@@ -34,7 +34,7 @@
#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
+ stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index dec1c4ce628c..c260adb25997 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -2,7 +2,7 @@
#ifndef _ASM_S390_DMA_H
#define _ASM_S390_DMA_H
-#include <asm/io.h>
+#include <linux/io.h>
/*
* MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 8aa1f6530a3e..69ccc464a430 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -118,8 +118,8 @@ struct lowcore {
__u64 avg_steal_timer; /* 0x0300 */
__u64 last_update_timer; /* 0x0308 */
__u64 last_update_clock; /* 0x0310 */
- __u64 int_clock; /* 0x0318*/
- __u64 mcck_clock; /* 0x0320 */
+ __u64 int_clock; /* 0x0318 */
+ __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
__u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 8a2a3b5d1e29..a9c138fcd2ad 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -19,7 +19,7 @@
#define PAGE_SHIFT _PAGE_SHIFT
#define PAGE_SIZE _PAGE_SIZE
#define PAGE_MASK _PAGE_MASK
-#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
@@ -179,8 +179,6 @@ int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
-#endif /* !__ASSEMBLY__ */
-
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
@@ -204,6 +202,8 @@ int arch_make_page_accessible(struct page *page);
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+#endif /* !__ASSEMBLY__ */
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index bfb8c3cb8aee..d28bf8fb2799 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -23,7 +23,31 @@
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
-#ifndef __ASSEMBLY__
+#define PSW32_MASK_PER _AC(0x40000000, UL)
+#define PSW32_MASK_DAT _AC(0x04000000, UL)
+#define PSW32_MASK_IO _AC(0x02000000, UL)
+#define PSW32_MASK_EXT _AC(0x01000000, UL)
+#define PSW32_MASK_KEY _AC(0x00F00000, UL)
+#define PSW32_MASK_BASE _AC(0x00080000, UL) /* Always one */
+#define PSW32_MASK_MCHECK _AC(0x00040000, UL)
+#define PSW32_MASK_WAIT _AC(0x00020000, UL)
+#define PSW32_MASK_PSTATE _AC(0x00010000, UL)
+#define PSW32_MASK_ASC _AC(0x0000C000, UL)
+#define PSW32_MASK_CC _AC(0x00003000, UL)
+#define PSW32_MASK_PM _AC(0x00000f00, UL)
+#define PSW32_MASK_RI _AC(0x00000080, UL)
+
+#define PSW32_ADDR_AMODE _AC(0x80000000, UL)
+#define PSW32_ADDR_INSN _AC(0x7FFFFFFF, UL)
+
+#define PSW32_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 20)
+
+#define PSW32_ASC_PRIMARY _AC(0x00000000, UL)
+#define PSW32_ASC_ACCREG _AC(0x00004000, UL)
+#define PSW32_ASC_SECONDARY _AC(0x00008000, UL)
+#define PSW32_ASC_HOME _AC(0x0000C000, UL)
+
+#define PSW_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 52)
#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_DAT)
@@ -31,6 +55,8 @@
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
+#ifndef __ASSEMBLY__
+
struct psw_bits {
unsigned long : 1;
unsigned long per : 1; /* PER-Mask */
@@ -71,30 +97,6 @@ enum {
&(*(struct psw_bits *)(&(__psw))); \
}))
-#define PSW32_MASK_PER 0x40000000UL
-#define PSW32_MASK_DAT 0x04000000UL
-#define PSW32_MASK_IO 0x02000000UL
-#define PSW32_MASK_EXT 0x01000000UL
-#define PSW32_MASK_KEY 0x00F00000UL
-#define PSW32_MASK_BASE 0x00080000UL /* Always one */
-#define PSW32_MASK_MCHECK 0x00040000UL
-#define PSW32_MASK_WAIT 0x00020000UL
-#define PSW32_MASK_PSTATE 0x00010000UL
-#define PSW32_MASK_ASC 0x0000C000UL
-#define PSW32_MASK_CC 0x00003000UL
-#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_RI 0x00000080UL
-
-#define PSW32_ADDR_AMODE 0x80000000UL
-#define PSW32_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20)
-
-#define PSW32_ASC_PRIMARY 0x00000000UL
-#define PSW32_ASC_ACCREG 0x00004000UL
-#define PSW32_ASC_SECONDARY 0x00008000UL
-#define PSW32_ASC_HOME 0x0000C000UL
-
typedef struct {
unsigned int mask;
unsigned int addr;
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 28a9ad57b6f1..d6bb2f4f78d1 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -58,6 +58,9 @@
#define UVC_CMD_SET_SHARED_ACCESS 0x1000
#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
#define UVC_CMD_RETR_ATTEST 0x1020
+#define UVC_CMD_ADD_SECRET 0x1031
+#define UVC_CMD_LIST_SECRETS 0x1033
+#define UVC_CMD_LOCK_SECRETS 0x1034
/* Bits in installed uv calls */
enum uv_cmds_inst {
@@ -88,6 +91,9 @@ enum uv_cmds_inst {
BIT_UVC_CMD_DUMP_CPU = 26,
BIT_UVC_CMD_DUMP_COMPLETE = 27,
BIT_UVC_CMD_RETR_ATTEST = 28,
+ BIT_UVC_CMD_ADD_SECRET = 29,
+ BIT_UVC_CMD_LIST_SECRETS = 30,
+ BIT_UVC_CMD_LOCK_SECRETS = 31,
};
enum uv_feat_ind {
@@ -117,7 +123,7 @@ struct uv_cb_qui {
u32 reserved70[3]; /* 0x0070 */
u32 max_num_sec_conf; /* 0x007c */
u64 max_guest_stor_addr; /* 0x0080 */
- u8 reserved88[158 - 136]; /* 0x0088 */
+ u8 reserved88[0x9e - 0x88]; /* 0x0088 */
u16 max_guest_cpu_id; /* 0x009e */
u64 uv_feature_indications; /* 0x00a0 */
u64 reserveda8; /* 0x00a8 */
@@ -129,7 +135,12 @@ struct uv_cb_qui {
u64 reservedd8; /* 0x00d8 */
u64 supp_att_req_hdr_ver; /* 0x00e0 */
u64 supp_att_pflags; /* 0x00e8 */
- u8 reservedf0[256 - 240]; /* 0x00f0 */
+ u64 reservedf0; /* 0x00f0 */
+ u64 supp_add_secret_req_ver; /* 0x00f8 */
+ u64 supp_add_secret_pcf; /* 0x0100 */
+ u64 supp_secret_types; /* 0x0180 */
+ u16 max_secrets; /* 0x0110 */
+ u8 reserved112[0x120 - 0x112]; /* 0x0112 */
} __packed __aligned(8);
/* Initialize Ultravisor */
@@ -292,6 +303,19 @@ struct uv_cb_dump_complete {
u64 reserved30[5];
} __packed __aligned(8);
+/*
+ * A common UV call struct for pv guests that contains a single address
+ * Examples:
+ * Add Secret
+ * List Secrets
+ */
+struct uv_cb_guest_addr {
+ struct uv_cb_header header;
+ u64 reserved08[3];
+ u64 addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
@@ -365,6 +389,10 @@ struct uv_info {
unsigned long conf_dump_finalize_len;
unsigned long supp_att_req_hdr_ver;
unsigned long supp_att_pflags;
+ unsigned long supp_add_secret_req_ver;
+ unsigned long supp_add_secret_pcf;
+ unsigned long supp_secret_types;
+ unsigned short max_secrets;
};
extern struct uv_info uv_info;
diff --git a/arch/s390/include/uapi/asm/cmb.h b/arch/s390/include/uapi/asm/cmb.h
index ecbe94941403..115434ab98fb 100644
--- a/arch/s390/include/uapi/asm/cmb.h
+++ b/arch/s390/include/uapi/asm/cmb.h
@@ -31,7 +31,7 @@
struct cmbdata {
__u64 size;
__u64 elapsed_time;
- /* basic and exended format: */
+ /* basic and extended format: */
__u64 ssch_rsch_count;
__u64 sample_count;
__u64 device_connect_time;
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 9c49c3d67cd5..b11d98800458 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -24,7 +24,7 @@
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
- * including foramt and featueres.
+ * including format and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index f7bae1c63bd6..5faf0a1d2c16 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -353,7 +353,7 @@ struct pkey_kblob2pkey2 {
* Is able to find out which type of secure key is given (CCA AES secure
* key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
* key) and tries to find all matching crypto cards based on the MKVP and maybe
- * other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
+ * other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
* with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
* APQNs is further filtered by the key's mkvp which needs to match to either
* the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
@@ -370,7 +370,7 @@ struct pkey_kblob2pkey2 {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
- * field is updatedd with the number of reqired entries but there are no apqn
+ * field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/
@@ -408,7 +408,7 @@ struct pkey_apqns4key {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
- * field is updatedd with the number of reqired entries but there are no apqn
+ * field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index ad64d673b5e6..f0fe3bcc78a8 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -166,6 +166,64 @@
#endif /* __s390x__ */
+#ifndef __s390x__
+
+#define PSW_MASK_PER _AC(0x40000000, UL)
+#define PSW_MASK_DAT _AC(0x04000000, UL)
+#define PSW_MASK_IO _AC(0x02000000, UL)
+#define PSW_MASK_EXT _AC(0x01000000, UL)
+#define PSW_MASK_KEY _AC(0x00F00000, UL)
+#define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */
+#define PSW_MASK_MCHECK _AC(0x00040000, UL)
+#define PSW_MASK_WAIT _AC(0x00020000, UL)
+#define PSW_MASK_PSTATE _AC(0x00010000, UL)
+#define PSW_MASK_ASC _AC(0x0000C000, UL)
+#define PSW_MASK_CC _AC(0x00003000, UL)
+#define PSW_MASK_PM _AC(0x00000F00, UL)
+#define PSW_MASK_RI _AC(0x00000000, UL)
+#define PSW_MASK_EA _AC(0x00000000, UL)
+#define PSW_MASK_BA _AC(0x00000000, UL)
+
+#define PSW_MASK_USER _AC(0x0000FF00, UL)
+
+#define PSW_ADDR_AMODE _AC(0x80000000, UL)
+#define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL)
+
+#define PSW_ASC_PRIMARY _AC(0x00000000, UL)
+#define PSW_ASC_ACCREG _AC(0x00004000, UL)
+#define PSW_ASC_SECONDARY _AC(0x00008000, UL)
+#define PSW_ASC_HOME _AC(0x0000C000, UL)
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER _AC(0x4000000000000000, UL)
+#define PSW_MASK_DAT _AC(0x0400000000000000, UL)
+#define PSW_MASK_IO _AC(0x0200000000000000, UL)
+#define PSW_MASK_EXT _AC(0x0100000000000000, UL)
+#define PSW_MASK_BASE _AC(0x0000000000000000, UL)
+#define PSW_MASK_KEY _AC(0x00F0000000000000, UL)
+#define PSW_MASK_MCHECK _AC(0x0004000000000000, UL)
+#define PSW_MASK_WAIT _AC(0x0002000000000000, UL)
+#define PSW_MASK_PSTATE _AC(0x0001000000000000, UL)
+#define PSW_MASK_ASC _AC(0x0000C00000000000, UL)
+#define PSW_MASK_CC _AC(0x0000300000000000, UL)
+#define PSW_MASK_PM _AC(0x00000F0000000000, UL)
+#define PSW_MASK_RI _AC(0x0000008000000000, UL)
+#define PSW_MASK_EA _AC(0x0000000100000000, UL)
+#define PSW_MASK_BA _AC(0x0000000080000000, UL)
+
+#define PSW_MASK_USER _AC(0x0000FF0180000000, UL)
+
+#define PSW_ADDR_AMODE _AC(0x0000000000000000, UL)
+#define PSW_ADDR_INSN _AC(0xFFFFFFFFFFFFFFFF, UL)
+
+#define PSW_ASC_PRIMARY _AC(0x0000000000000000, UL)
+#define PSW_ASC_ACCREG _AC(0x0000400000000000, UL)
+#define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL)
+#define PSW_ASC_HOME _AC(0x0000C00000000000, UL)
+
+#endif /* __s390x__ */
+
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_CRS 16
@@ -214,69 +272,6 @@ typedef struct {
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-#ifndef __s390x__
-
-#define PSW_MASK_PER 0x40000000UL
-#define PSW_MASK_DAT 0x04000000UL
-#define PSW_MASK_IO 0x02000000UL
-#define PSW_MASK_EXT 0x01000000UL
-#define PSW_MASK_KEY 0x00F00000UL
-#define PSW_MASK_BASE 0x00080000UL /* always one */
-#define PSW_MASK_MCHECK 0x00040000UL
-#define PSW_MASK_WAIT 0x00020000UL
-#define PSW_MASK_PSTATE 0x00010000UL
-#define PSW_MASK_ASC 0x0000C000UL
-#define PSW_MASK_CC 0x00003000UL
-#define PSW_MASK_PM 0x00000F00UL
-#define PSW_MASK_RI 0x00000000UL
-#define PSW_MASK_EA 0x00000000UL
-#define PSW_MASK_BA 0x00000000UL
-
-#define PSW_MASK_USER 0x0000FF00UL
-
-#define PSW_ADDR_AMODE 0x80000000UL
-#define PSW_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
-
-#define PSW_ASC_PRIMARY 0x00000000UL
-#define PSW_ASC_ACCREG 0x00004000UL
-#define PSW_ASC_SECONDARY 0x00008000UL
-#define PSW_ASC_HOME 0x0000C000UL
-
-#else /* __s390x__ */
-
-#define PSW_MASK_PER 0x4000000000000000UL
-#define PSW_MASK_DAT 0x0400000000000000UL
-#define PSW_MASK_IO 0x0200000000000000UL
-#define PSW_MASK_EXT 0x0100000000000000UL
-#define PSW_MASK_BASE 0x0000000000000000UL
-#define PSW_MASK_KEY 0x00F0000000000000UL
-#define PSW_MASK_MCHECK 0x0004000000000000UL
-#define PSW_MASK_WAIT 0x0002000000000000UL
-#define PSW_MASK_PSTATE 0x0001000000000000UL
-#define PSW_MASK_ASC 0x0000C00000000000UL
-#define PSW_MASK_CC 0x0000300000000000UL
-#define PSW_MASK_PM 0x00000F0000000000UL
-#define PSW_MASK_RI 0x0000008000000000UL
-#define PSW_MASK_EA 0x0000000100000000UL
-#define PSW_MASK_BA 0x0000000080000000UL
-
-#define PSW_MASK_USER 0x0000FF0180000000UL
-
-#define PSW_ADDR_AMODE 0x0000000000000000UL
-#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
-
-#define PSW_ASC_PRIMARY 0x0000000000000000UL
-#define PSW_ASC_ACCREG 0x0000400000000000UL
-#define PSW_ASC_SECONDARY 0x0000800000000000UL
-#define PSW_ASC_HOME 0x0000C00000000000UL
-
-#endif /* __s390x__ */
-
-
/*
* The s390_regs structure is used to define the elf_gregset_t.
*/
diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h
index 10a5ac918e02..b9c2f14a6af3 100644
--- a/arch/s390/include/uapi/asm/uvdevice.h
+++ b/arch/s390/include/uapi/asm/uvdevice.h
@@ -32,6 +32,33 @@ struct uvio_attest {
__u16 reserved136; /* 0x0136 */
};
+/**
+ * uvio_uvdev_info - Information of supported functions
+ * @supp_uvio_cmds - supported IOCTLs by this device
+ * @supp_uv_cmds - supported UVCs corresponding to the IOCTL
+ *
+ * UVIO request to get information about supported request types by this
+ * uvdevice and the Ultravisor. Everything is output. Bits are in LSB0
+ * ordering. If the bit is set in both, @supp_uvio_cmds and @supp_uv_cmds, the
+ * uvdevice and the Ultravisor support that call.
+ *
+ * Note that bit 0 (UVIO_IOCTL_UVDEV_INFO_NR) is always zero for `supp_uv_cmds`
+ * as there is no corresponding UV-call.
+ */
+struct uvio_uvdev_info {
+ /*
+ * If bit `n` is set, this device supports the IOCTL with nr `n`.
+ */
+ __u64 supp_uvio_cmds;
+ /*
+ * If bit `n` is set, the Ultravisor(UV) supports the UV-call
+ * corresponding to the IOCTL with nr `n` in the calling contextx (host
+ * or guest). The value is only valid if the corresponding bit in
+ * @supp_uvio_cmds is set as well.
+ */
+ __u64 supp_uv_cmds;
+};
+
/*
* The following max values define an upper length for the IOCTL in/out buffers.
* However, they do not represent the maximum the Ultravisor allows which is
@@ -42,10 +69,34 @@ struct uvio_attest {
#define UVIO_ATT_ARCB_MAX_LEN 0x100000
#define UVIO_ATT_MEASUREMENT_MAX_LEN 0x8000
#define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000
+#define UVIO_ADD_SECRET_MAX_LEN 0x100000
+#define UVIO_LIST_SECRETS_LEN 0x1000
#define UVIO_DEVICE_NAME "uv"
#define UVIO_TYPE_UVC 'u'
-#define UVIO_IOCTL_ATT _IOWR(UVIO_TYPE_UVC, 0x01, struct uvio_ioctl_cb)
+enum UVIO_IOCTL_NR {
+ UVIO_IOCTL_UVDEV_INFO_NR = 0x00,
+ UVIO_IOCTL_ATT_NR,
+ UVIO_IOCTL_ADD_SECRET_NR,
+ UVIO_IOCTL_LIST_SECRETS_NR,
+ UVIO_IOCTL_LOCK_SECRETS_NR,
+ /* must be the last entry */
+ UVIO_IOCTL_NUM_IOCTLS
+};
+
+#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
+#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
+#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
+#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+
+#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
+#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
+#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
+#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
#endif /* __S390_ASM_UVDEVICE_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 3f8e760298c2..81cf72088041 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -122,7 +122,6 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
- OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 72e106cfd8c7..b210a29d3ee9 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,10 +16,10 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/io.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
-#include <asm/io.h>
static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 90bbb4ea1d08..89dc826a8d2e 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -24,8 +24,8 @@
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/io.h>
#include <asm/dis.h>
-#include <asm/io.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
@@ -516,7 +516,7 @@ void show_code(struct pt_regs *regs)
if (copy_from_regs(regs, code + end, (void *)addr, 2))
break;
}
- /* Code snapshot useable ? */
+ /* Code snapshot usable ? */
if ((regs->psw.addr & 1) || start >= end) {
printk("%s Code: Bad PSW.\n", mode);
return;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e5b6c1369e8e..a660f4b6d654 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -136,7 +136,7 @@ _LPP_OFFSET = __LC_LPP
clgfrl %r14,.Lrange_size\@
jhe \outside_label
.section .rodata, "a"
- .align 4
+ .balign 4
.Lrange_size\@:
.long \end - \start
.previous
@@ -488,7 +488,6 @@ SYM_FUNC_END(psw_idle)
* Machine check handler routines
*/
SYM_CODE_START(mcck_int_handler)
- stckf __LC_MCCK_CLOCK
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
@@ -598,8 +597,9 @@ SYM_CODE_START(restart_int_handler)
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA
-0: larl %r15,stosm_tmp
- stosm 0(%r15),0x04 # turn dat on, keep irqs off
+0: larl %r15,daton_psw
+ lpswe 0(%r15) # turn dat on, keep irqs off
+.Ldaton:
lg %r15,__LC_RESTART_STACK
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -646,7 +646,11 @@ SYM_CODE_END(stack_overflow)
.balign 4
SYM_DATA_LOCAL(stop_lock, .long 0)
SYM_DATA_LOCAL(this_cpu, .short 0)
-SYM_DATA_LOCAL(stosm_tmp, .byte 0)
+ .balign 8
+SYM_DATA_START_LOCAL(daton_psw)
+ .quad PSW_KERNEL_BITS
+ .quad .Ldaton
+SYM_DATA_END(daton_psw)
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index df77ba102096..45413b04efc5 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -36,5 +36,5 @@ SYM_CODE_START(startup_continue)
lpswe dw_psw-.(%r13) # load disabled wait psw
SYM_CODE_END(startup_continue)
- .align 16
+ .balign 16
SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)
diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S
index b6335296dcd8..0fe4d725e98b 100644
--- a/arch/s390/kernel/kprobes_insn_page.S
+++ b/arch/s390/kernel/kprobes_insn_page.S
@@ -13,7 +13,7 @@
* would be in the data section instead.
*/
.section .kprobes.text, "ax"
- .align 4096
+ .balign 4096
SYM_CODE_START(kprobes_insn_page)
.rept 2048
.word 0x07fe
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 717bbcc056e5..d1b16d83e49a 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -14,7 +14,7 @@ static int __init nobp_setup_early(char *str)
return rc;
if (enabled && test_facility(82)) {
/*
- * The user explicitely requested nobp=1, enable it and
+ * The user explicitly requested nobp=1, enable it and
* disable the expoline support.
*/
__set_facility(82, alt_stfle_fac_list);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 90679143534b..850c11ea631a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -172,9 +172,9 @@ static void cpum_cf_free_root(void)
cpu_cf_root.cfptr = NULL;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
- debug_sprintf_event(cf_dbg, 4, "%s2 root.refcnt %u cfptr %px\n",
+ debug_sprintf_event(cf_dbg, 4, "%s root.refcnt %u cfptr %d\n",
__func__, refcount_read(&cpu_cf_root.refcnt),
- cpu_cf_root.cfptr);
+ !cpu_cf_root.cfptr);
}
/*
@@ -975,10 +975,6 @@ static int cfdiag_push_sample(struct perf_event *event,
}
overflow = perf_event_overflow(event, &data, &regs);
- debug_sprintf_event(cf_dbg, 3,
- "%s event %#llx sample_type %#llx raw %d ov %d\n",
- __func__, event->hw.config,
- event->attr.sample_type, raw.size, overflow);
if (overflow)
event->pmu->stop(event, 0);
@@ -1105,10 +1101,6 @@ static int cpum_cf_online_cpu(unsigned int cpu)
{
int rc = 0;
- debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d "
- "opencnt %d\n", __func__, cpu,
- refcount_read(&cpu_cf_root.refcnt),
- refcount_read(&cfset_opencnt));
/*
* Ignore notification for perf_event_open().
* Handle only /dev/hwctr device sessions.
@@ -1127,9 +1119,6 @@ static int cfset_offline_cpu(unsigned int cpu);
static int cpum_cf_offline_cpu(unsigned int cpu)
{
- debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d opencnt %d\n",
- __func__, cpu, refcount_read(&cpu_cf_root.refcnt),
- refcount_read(&cfset_opencnt));
/*
* During task exit processing of grouped perf events triggered by CPU
* hotplug processing, pmu_disable() is called as part of perf context
@@ -1337,8 +1326,6 @@ static void cfset_ioctl_off(void *parm)
cpuhw->state, S390_HWCTR_DEVICE, rc);
if (!cpuhw->dev_state)
cpuhw->flags &= ~PMU_F_IN_USE;
- debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
- __func__, rc, cpuhw->state, cpuhw->dev_state);
}
/* Start counter sets on particular CPU */
@@ -1360,8 +1347,6 @@ static void cfset_ioctl_on(void *parm)
else
pr_err("Counter set start %#llx of /dev/%s failed rc=%i\n",
cpuhw->dev_state | cpuhw->state, S390_HWCTR_DEVICE, rc);
- debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
- __func__, rc, cpuhw->state, cpuhw->dev_state);
}
static void cfset_release_cpu(void *p)
@@ -1369,8 +1354,6 @@ static void cfset_release_cpu(void *p)
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int rc;
- debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
- __func__, cpuhw->state, cpuhw->dev_state);
cpuhw->dev_state = 0;
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
if (rc)
@@ -1459,7 +1442,6 @@ static int cfset_all_start(struct cfset_request *req)
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
rc = -EIO;
- debug_sprintf_event(cf_dbg, 4, "%s CPUs missing", __func__);
}
free_cpumask_var(mask);
return rc;
@@ -1516,8 +1498,6 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
if (put_user(cpus, &ctrset_read->no_cpus))
rc = -EFAULT;
out:
- debug_sprintf_event(cf_dbg, 4, "%s rc %d copied %ld\n", __func__, rc,
- uptr - (void __user *)ctrset_read->data);
return rc;
}
@@ -1565,8 +1545,6 @@ static void cfset_cpu_read(void *parm)
cpuhw->used += space;
cpuhw->sets += 1;
}
- debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__,
- cpuhw->sets, cpuhw->used);
}
}
@@ -1661,8 +1639,6 @@ static long cfset_ioctl_start(unsigned long arg, struct file *file)
if (!ret) {
cfset_session_add(preq);
file->private_data = preq;
- debug_sprintf_event(cf_dbg, 4, "%s set %#lx need %ld ret %d\n",
- __func__, preq->ctrset, need, ret);
} else {
kfree(preq);
}
@@ -1761,8 +1737,6 @@ static int cfset_offline_cpu(unsigned int cpu)
static void cfdiag_read(struct perf_event *event)
{
- debug_sprintf_event(cf_dbg, 3, "%s event %#llx count %ld\n", __func__,
- event->attr.config, local64_read(&event->count));
}
static int get_authctrsets(void)
@@ -1807,8 +1781,6 @@ static int cfdiag_event_init2(struct perf_event *event)
if (!event->hw.config_base)
err = -EINVAL;
- debug_sprintf_event(cf_dbg, 5, "%s err %d config_base %#lx\n",
- __func__, err, event->hw.config_base);
return err;
}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 8ecfbce4ac92..06efad5b4f93 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -22,7 +22,7 @@
#include <asm/irq.h>
#include <asm/debug.h>
#include <asm/timex.h>
-#include <asm-generic/io.h>
+#include <linux/io.h>
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
@@ -43,7 +43,7 @@
#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
static inline int require_table_link(const void *sdbt)
{
- return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
+ return ((unsigned long)sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
}
/* Minimum and maximum sampling buffer sizes:
@@ -192,7 +192,7 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
if (is_link_entry(curr)) {
curr = get_next_sdbt(curr);
if (sdbt)
- free_page((unsigned long) sdbt);
+ free_page((unsigned long)sdbt);
/* If the origin is reached, sampling buffer is freed */
if (curr == sfb->sdbt)
@@ -278,7 +278,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
for (i = 0; i < num_sdb; i++) {
/* Allocate a new SDB-table if it is full. */
if (require_table_link(tail)) {
- new = (unsigned long *) get_zeroed_page(gfp_flags);
+ new = (unsigned long *)get_zeroed_page(gfp_flags);
if (!new) {
rc = -ENOMEM;
break;
@@ -304,7 +304,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*/
if (tail_prev) {
sfb->num_sdbt--;
- free_page((unsigned long) new);
+ free_page((unsigned long)new);
tail = tail_prev;
}
break;
@@ -343,7 +343,7 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
return -EINVAL;
/* Allocate the sample-data-block-table origin */
- sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
return -ENOMEM;
sfb->num_sdb = 0;
@@ -594,11 +594,10 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
- int err;
struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
+ int err = 0;
- err = 0;
- switch (*((int *) flags)) {
+ switch (*((int *)flags)) {
case PMC_INIT:
memset(cpusf, 0, sizeof(*cpusf));
err = qsi(&cpusf->qsi);
@@ -606,22 +605,18 @@ static void setup_pmc_cpu(void *flags)
break;
cpusf->flags |= PMU_F_RESERVED;
err = sf_disable();
- if (err)
- pr_err("Switching off the sampling facility failed "
- "with rc %i\n", err);
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
- if (err) {
- pr_err("Switching off the sampling facility failed "
- "with rc %i\n", err);
- } else
+ if (!err)
deallocate_buffers(cpusf);
break;
}
- if (err)
- *((int *) flags) |= PMC_FAILURE;
+ if (err) {
+ *((int *)flags) |= PMC_FAILURE;
+ pr_err("Switching off the sampling facility failed with rc %i\n", err);
+ }
}
static void release_pmc_hardware(void)
@@ -963,10 +958,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
return -ENOENT;
}
- /* Check online status of the CPU to which the event is pinned */
- if (event->cpu >= 0 && !cpu_online(event->cpu))
- return -ENODEV;
-
/* Force reset of idle/hv excludes regardless of what the
* user requested.
*/
@@ -1026,8 +1017,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
err = lsctl(&cpuhw->lsctl);
if (err) {
cpuhw->flags &= ~PMU_F_ENABLED;
- pr_err("Loading sampling controls failed: op %i err %i\n",
- 1, err);
+ pr_err("Loading sampling controls failed: op 1 err %i\n", err);
return;
}
@@ -1061,8 +1051,7 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
err = lsctl(&inactive);
if (err) {
- pr_err("Loading sampling controls failed: op %i err %i\n",
- 2, err);
+ pr_err("Loading sampling controls failed: op 2 err %i\n", err);
return;
}
@@ -1221,7 +1210,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
te = trailer_entry_ptr((unsigned long)sdbt);
sample = (struct hws_basic_entry *)sdbt;
- while ((unsigned long *) sample < (unsigned long *) te) {
+ while ((unsigned long *)sample < (unsigned long *)te) {
/* Check for an empty sample */
if (!sample->def || sample->LS)
break;
@@ -1298,7 +1287,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
if (SAMPL_DIAG_MODE(&event->hw))
return;
- sdbt = (unsigned long *) TEAR_REG(hwc);
+ sdbt = (unsigned long *)TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) {
/* Get the trailer entry of the sample-data-block */
@@ -1670,9 +1659,6 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
pr_err("The AUX buffer with %lu pages for the "
"diagnostic-sampling mode is full\n",
num_sdb);
- debug_sprintf_event(sfdbg, 1,
- "%s: AUX buffer used up\n",
- __func__);
break;
}
if (WARN_ON_ONCE(!aux))
@@ -1804,7 +1790,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
/* Allocate the first SDBT */
sfb->num_sdbt = 0;
- sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
@@ -1816,7 +1802,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
for (i = 0; i < nr_pages; i++, tail++) {
if (require_table_link(tail)) {
- new = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ new = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!new)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
@@ -1865,7 +1851,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
/* Nothing to do ... updates are interrupt-driven */
}
-/* Check if the new sampling period/freqeuncy is appropriate.
+/* Check if the new sampling period/frequency is appropriate.
*
* Return non-zero on error and zero on passed checks.
*/
@@ -1973,8 +1959,8 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
if (!SAMPL_DIAG_MODE(&event->hw)) {
cpuhw->lsctl.tear = virt_to_phys(cpuhw->sfb.sdbt);
- cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
- TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt;
+ cpuhw->lsctl.dear = *(unsigned long *)cpuhw->sfb.sdbt;
+ TEAR_REG(&event->hw) = (unsigned long)cpuhw->sfb.sdbt;
}
/* Ensure sampling functions are in the disabled state. If disabled,
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 3b4f384f77f7..c57c1a203256 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -84,7 +84,7 @@ static int paiext_root_alloc(void)
/* The memory is already zeroed. */
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
if (!paiext_root.mapptr) {
- /* Returing without refcnt adjustment is ok. The
+ /* Returning without refcnt adjustment is ok. The
* error code is handled by paiext_alloc() which
* decrements refcnt when an event can not be
* created.
@@ -190,7 +190,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING;
} else {
- /* Multiple invocation, check whats active.
+ /* Multiple invocation, check what is active.
* Supported are multiple counter events or only one sampling
* event concurrently at any one time.
*/
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 87ca3a727604..258000417724 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -30,8 +30,8 @@
#include <linux/export.h>
#include <linux/init_task.h>
#include <linux/entry-common.h>
+#include <linux/io.h>
#include <asm/cpu_mf.h>
-#include <asm/io.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
#include <asm/exec.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index fe10da1a271e..00d76448319d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -529,7 +529,7 @@ static void __init setup_resources(void)
res->start = start;
/*
* In memblock, end points to the first byte after the
- * range while in resourses, end points to the last byte in
+ * range while in resources, end points to the last byte in
* the range.
*/
res->end = end - 1;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 726de4f4df01..f9a2b755f510 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -113,7 +113,7 @@ early_param("smt", early_smt);
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
- * member of a pcpu data structure within the pcpu_devices arreay.
+ * member of a pcpu data structure within the pcpu_devices array.
*/
DEFINE_MUTEX(smp_cpu_state_mutex);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 276278199c44..d34d3548c046 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -702,7 +702,7 @@ static void stp_work_fn(struct work_struct *work)
if (!check_sync_clock())
/*
- * There is a usable clock but the synchonization failed.
+ * There is a usable clock but the synchronization failed.
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 3c62d1b218b1..66f0eb1c872b 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -23,12 +23,20 @@
int __bootdata_preserved(prot_virt_guest);
#endif
+/*
+ * uv_info contains both host and guest information but it's currently only
+ * expected to be used within modules if it's the KVM module or for
+ * any PV guest module.
+ *
+ * The kernel itself will write these values once in uv_query_info()
+ * and then make some of them readable via a sysfs interface.
+ */
struct uv_info __bootdata_preserved(uv_info);
+EXPORT_SYMBOL(uv_info);
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host);
-EXPORT_SYMBOL(uv_info);
static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
{
@@ -462,13 +470,13 @@ EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
static ssize_t uv_query_facilities(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
- uv_info.inst_calls_list[0],
- uv_info.inst_calls_list[1],
- uv_info.inst_calls_list[2],
- uv_info.inst_calls_list[3]);
+ return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
+ uv_info.inst_calls_list[0],
+ uv_info.inst_calls_list[1],
+ uv_info.inst_calls_list[2],
+ uv_info.inst_calls_list[3]);
}
static struct kobj_attribute uv_query_facilities_attr =
@@ -493,30 +501,27 @@ static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n",
- uv_info.guest_cpu_stor_len);
+ return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
}
static struct kobj_attribute uv_query_dump_cpu_len_attr =
__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n",
- uv_info.conf_dump_storage_state_len);
+ return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
}
static struct kobj_attribute uv_query_dump_storage_state_len_attr =
__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n",
- uv_info.conf_dump_finalize_len);
+ return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
}
static struct kobj_attribute uv_query_dump_finalize_len_attr =
@@ -532,53 +537,86 @@ static struct kobj_attribute uv_query_feature_indications_attr =
__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%d\n",
- uv_info.max_guest_cpu_id + 1);
+ return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%d\n",
- uv_info.max_num_sec_conf);
+ return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
}
static struct kobj_attribute uv_query_max_guest_vms_attr =
__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n",
- uv_info.max_sec_stor_addr);
+ return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
}
static struct kobj_attribute uv_query_max_guest_addr_attr =
__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
+ return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
}
static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
- return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
+ return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
}
static struct kobj_attribute uv_query_supp_att_pflags_attr =
__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
+static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
+}
+
+static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
+ __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
+
+static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
+}
+
+static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
+ __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
+
+static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
+}
+
+static struct kobj_attribute uv_query_supp_secret_types_attr =
+ __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
+
+static ssize_t uv_query_max_secrets(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
+}
+
+static struct kobj_attribute uv_query_max_secrets_attr =
+ __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
+
static struct attribute *uv_query_attrs[] = {
&uv_query_facilities_attr.attr,
&uv_query_feature_indications_attr.attr,
@@ -592,6 +630,10 @@ static struct attribute *uv_query_attrs[] = {
&uv_query_dump_cpu_len_attr.attr,
&uv_query_supp_att_req_hdr_ver_attr.attr,
&uv_query_supp_att_pflags_attr.attr,
+ &uv_query_supp_add_secret_req_ver_attr.attr,
+ &uv_query_supp_add_secret_pcf_attr.attr,
+ &uv_query_supp_secret_types_attr.attr,
+ &uv_query_max_secrets_attr.attr,
NULL,
};
@@ -600,18 +642,18 @@ static struct attribute_group uv_query_attr_group = {
};
static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
int val = 0;
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
val = prot_virt_guest;
#endif
- return scnprintf(page, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+ struct kobj_attribute *attr, char *buf)
{
int val = 0;
@@ -619,7 +661,7 @@ static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
val = prot_virt_host;
#endif
- return scnprintf(page, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static struct kobj_attribute uv_prot_virt_guest =
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index bafd3147eb4e..23e868b79a6c 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -19,6 +19,7 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
@@ -40,8 +41,11 @@ KCSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+quiet_cmd_vdso_and_check = VDSO $@
+ cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
+
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
- $(call if_changed,ld)
+ $(call if_changed,vdso_and_check)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index a766d286e15f..fc1c6ff8178f 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -24,6 +24,7 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
@@ -44,9 +45,12 @@ KCSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+quiet_cmd_vdso_and_check = VDSO $@
+ cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
+
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
- $(call if_changed,ld)
+ $(call if_changed,vdso_and_check)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 807fa9da1e72..3c65b8258ae6 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -166,6 +166,7 @@ static int diag9c_forwarding_overrun(void)
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tcpu;
+ int tcpu_cpu;
int tid;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
@@ -181,14 +182,15 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
goto no_yield;
/* target guest VCPU already running */
- if (READ_ONCE(tcpu->cpu) >= 0) {
+ tcpu_cpu = READ_ONCE(tcpu->cpu);
+ if (tcpu_cpu >= 0) {
if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
goto no_yield;
/* target host CPU already running */
- if (!vcpu_is_preempted(tcpu->cpu))
+ if (!vcpu_is_preempted(tcpu_cpu))
goto no_yield;
- smp_yield_cpu(tcpu->cpu);
+ smp_yield_cpu(tcpu_cpu);
VCPU_EVENT(vcpu, 5,
"diag time slice end directed to %d: yield forwarded",
tid);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 3eb85f254881..6d6bc19b37dc 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -478,7 +478,7 @@ struct trans_exc_code_bits {
};
enum {
- FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
+ FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
FSI_STORE = 1, /* Exception was due to store operation */
FSI_FETCH = 2 /* Exception was due to fetch operation */
};
@@ -625,7 +625,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* Returns: - zero on success; @gpa contains the resulting absolute address
* - a negative value if guest access failed due to e.g. broken
* guest mapping
- * - a positve value if an access exception happened. In this case
+ * - a positive value if an access exception happened. In this case
* the returned value is the program interruption code as defined
* by the architecture
*/
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 2cda8d9d7c6e..954d39adf85c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -630,7 +630,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- /* process PER, also if the instrution is processed in user space */
+ /* process PER, also if the instruction is processed in user space */
if (vcpu->arch.sie_block->icptstatus & 0x02 &&
(!rc || rc == -EOPNOTSUPP))
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 17b81659cdb2..d1e768bcfe1d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2156,6 +2156,10 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = 0;
}
+
+ if (cur_gfn < ms->base_gfn)
+ ofs = 0;
+
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
@@ -4157,7 +4161,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.fpc = 0;
/*
* Do not reset these registers in the protected case, as some of
- * them are overlayed and they are not accessible in this case
+ * them are overlaid and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c
index 7dab00f1e833..ffa7739c7a28 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/pci.c
@@ -427,7 +427,7 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
/*
- * Register device with the specified KVM. If interpetation facilities are
+ * Register device with the specified KVM. If interpretation facilities are
* available, enable them and let userspace indicate whether or not they will
* be used (specify SHM bit to disable).
*/
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 9f8a192bd750..dc4cfa8795c0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -13,7 +13,7 @@
#include <linux/errno.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
-
+#include <linux/io.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
#include <asm/current.h>
@@ -22,7 +22,6 @@
#include <asm/sysinfo.h>
#include <asm/page-states.h>
#include <asm/gmap.h>
-#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/sclp.h>
#include <asm/ap.h>
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 3ce5f4351156..2f34c7c3c5ab 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -273,7 +273,7 @@ static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
uvcb.header.rc, uvcb.header.rrc);
WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
- /* Inteded memory leak on "impossible" error */
+ /* Intended memory leak on "impossible" error */
if (!cc)
kvm_s390_pv_dealloc_vm(kvm);
return cc ? -EIO : 0;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index cb747bf6c798..d9696b530064 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -469,7 +469,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
*
* This interception will occur at the source cpu when a source cpu sends an
* external call to a target cpu and the target cpu has the WAIT bit set in
- * its cpuflags. Interception will occurr after the interrupt indicator bits at
+ * its cpuflags. Interception will occur after the interrupt indicator bits at
* the target cpu have been set. All error cases will lead to instruction
* interception, therefore nothing is to be checked or prepared.
*/
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 8d6b765abf29..61499293c2ac 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -177,7 +177,8 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
return 0;
}
@@ -203,7 +204,8 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
return 0;
}
@@ -502,7 +504,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->mso = new_mso;
scb_s->prefix = new_prefix;
- /* We have to definetly flush the tlb if this scb never ran */
+ /* We have to definitely flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU)
scb_s->ihcpu = scb_o->ihcpu;
@@ -899,7 +901,7 @@ static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
(vaddr & 0xfffffffffffff000UL) |
/* 52-53: store / fetch */
(((unsigned int) !write_flag) + 1) << 10,
- /* 62-63: asce id (alway primary == 0) */
+ /* 62-63: asce id (always primary == 0) */
.exc_access_id = 0, /* always primary */
.op_access_id = 0, /* not MVPG */
};
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 04d4c6cf898e..81c53440b3e6 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -13,8 +13,8 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
+#include <linux/io.h>
#include <asm/alternative.h>
-#include <asm/io.h>
int spin_retry = -1;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index f4b6fc746fce..989ebd0912b4 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
* The r2t parameter specifies the address of the source table. The
* four pages of the source table are made read-only in the parent gmap
* address space. A write to the source table area @r2t will automatically
- * remove the shadow r2 table and all of its decendents.
+ * remove the shadow r2 table and all of its descendants.
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index d02a61620cfa..cbe1df1e9c18 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -13,9 +13,9 @@
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/uio.h>
+#include <linux/io.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
-#include <asm/io.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
#include <asm/maccess.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b9dcb4ae6c59..b26649233d12 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -481,6 +481,7 @@ static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
*/
static int vmem_add_range(unsigned long start, unsigned long size)
{
+ start = (unsigned long)__va(start);
return add_pagetable(start, start + size, true);
}
@@ -489,6 +490,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
+ start = (unsigned long)__va(start);
remove_pagetable(start, start + size, true);
}
@@ -556,7 +558,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
* to any physical address. If missing, allocate segment- and region-
* table entries along. Meeting a large segment- or region-table entry
* while traversing is an error, since the function is expected to be
- * called against virtual regions reserverd for 4KB mappings only.
+ * called against virtual regions reserved for 4KB mappings only.
*/
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
{
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index f95d7e401b96..5e9371fbf3d5 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -523,12 +523,12 @@ extern const char bpf_plt_end[];
#define BPF_PLT_SIZE 32
asm(
".pushsection .rodata\n"
- " .align 8\n"
+ " .balign 8\n"
"bpf_plt:\n"
" lgrl %r0,bpf_plt_ret\n"
" lgrl %r1,bpf_plt_target\n"
" br %r1\n"
- " .align 8\n"
+ " .balign 8\n"
"bpf_plt_ret: .quad 0\n"
"bpf_plt_target: .quad 0\n"
"bpf_plt_end:\n"
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 4ab0cf829999..ff8f24854c64 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -163,7 +163,7 @@ static void zpci_handle_cpu_local_irq(bool rescan)
if (!rescan || irqs_on++)
/* End of second scan with interrupts on. */
break;
- /* First scan complete, reenable interrupts. */
+ /* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
break;
bit = 0;
@@ -202,7 +202,7 @@ static void zpci_handle_fallback_irq(void)
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
- /* First scan complete, reenable interrupts. */
+ /* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
cpu = 0;
@@ -247,7 +247,7 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
- /* First scan complete, reenable interrupts. */
+ /* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
si = 0;
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index e5bd1a503528..0f93f2e72eba 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -100,7 +100,7 @@ SYM_CODE_START(purgatory_start)
* checksum verification only (%r2 = 0 -> verification only).
*
* Check now and preserve over C function call by storing in
- * %r10 whith
+ * %r10 with
* 1 -> checksum verification only
* 0 -> load new kernel
*/
diff --git a/arch/sh/Kbuild b/arch/sh/Kbuild
index be171880977e..056efec72c2a 100644
--- a/arch/sh/Kbuild
+++ b/arch/sh/Kbuild
@@ -3,5 +3,7 @@ obj-y += kernel/ mm/ boards/
obj-$(CONFIG_SH_FPU_EMU) += math-emu/
obj-$(CONFIG_USE_BUILTIN_DTB) += boot/dts/
+obj-$(CONFIG_HD6446X_SERIES) += cchips/hd6446x/
+
# for cleaning
subdir- += boot
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 5c8776482530..cab2f9c011a8 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -116,34 +116,15 @@ export ld-bfd
# Mach groups
machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
-machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast
machdir-$(CONFIG_SH_SH03) += mach-sh03
-machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d
-machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
machdir-$(CONFIG_SH_MIGOR) += mach-migor
-machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
machdir-$(CONFIG_SH_KFR2R09) += mach-kfr2r09
machdir-$(CONFIG_SH_ECOVEC) += mach-ecovec24
-machdir-$(CONFIG_SH_SDK7780) += mach-sdk7780
machdir-$(CONFIG_SH_SDK7786) += mach-sdk7786
machdir-$(CONFIG_SH_X3PROTO) += mach-x3proto
-machdir-$(CONFIG_SH_SH7763RDP) += mach-sh7763rdp
-machdir-$(CONFIG_SH_SH4202_MICRODEV) += mach-microdev
machdir-$(CONFIG_SH_LANDISK) += mach-landisk
-machdir-$(CONFIG_SH_LBOX_RE2) += mach-lboxre2
-machdir-$(CONFIG_SH_RSK) += mach-rsk
-
-ifneq ($(machdir-y),)
-core-y += $(addprefix arch/sh/boards/, \
- $(filter-out ., $(patsubst %,%/,$(machdir-y))))
-endif
-
-# Common machine type headers. Not part of the arch/sh/boards/ hierarchy.
-machdir-y += mach-common
-
-# Companion chips
-core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/
+machdir-y += mach-common
#
# CPU header paths
@@ -164,11 +145,8 @@ cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
-cflags-y += $(foreach d, $(cpuincdir-y), -I $(srctree)/arch/sh/include/$(d)) \
- $(foreach d, $(machdir-y), -I $(srctree)/arch/sh/include/$(d))
-
+KBUILD_CPPFLAGS += $(addprefix -I $(srctree)/arch/sh/include/, $(cpuincdir-y) $(machdir-y))
KBUILD_CFLAGS += -pipe $(cflags-y)
-KBUILD_CPPFLAGS += $(cflags-y)
KBUILD_AFLAGS += $(cflags-y)
ifeq ($(CONFIG_MCOUNT),y)
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 4002a22a7c40..b57219436ace 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -18,3 +18,22 @@ obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o
obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o
obj-$(CONFIG_SH_DEVICE_TREE) += of-generic.o
+
+obj-$(CONFIG_SOLUTION_ENGINE) += mach-se/
+obj-$(CONFIG_SH_HP6XX) += mach-hp6xx/
+obj-$(CONFIG_SH_DREAMCAST) += mach-dreamcast/
+obj-$(CONFIG_SH_SH03) += mach-sh03/
+obj-$(CONFIG_SH_RTS7751R2D) += mach-r2d/
+obj-$(CONFIG_SH_HIGHLANDER) += mach-highlander/
+obj-$(CONFIG_SH_MIGOR) += mach-migor/
+obj-$(CONFIG_SH_AP325RXA) += mach-ap325rxa/
+obj-$(CONFIG_SH_KFR2R09) += mach-kfr2r09/
+obj-$(CONFIG_SH_ECOVEC) += mach-ecovec24/
+obj-$(CONFIG_SH_SDK7780) += mach-sdk7780/
+obj-$(CONFIG_SH_SDK7786) += mach-sdk7786/
+obj-$(CONFIG_SH_X3PROTO) += mach-x3proto/
+obj-$(CONFIG_SH_SH7763RDP) += mach-sh7763rdp/
+obj-$(CONFIG_SH_SH4202_MICRODEV)+= mach-microdev/
+obj-$(CONFIG_SH_LANDISK) += mach-landisk/
+obj-$(CONFIG_SH_LBOX_RE2) += mach-lboxre2/
+obj-$(CONFIG_SH_RSK) += mach-rsk/
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index 7d54f284ce10..08d937a6d249 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -28,17 +28,19 @@ config SH_DMA_API
config NR_ONCHIP_DMA_CHANNELS
int
depends on SH_DMA
- default "4" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \
- CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7091
+ default "4" if CPU_SUBTYPE_SH7709 || CPU_SUBTYPE_SH7750 || \
+ CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7751 || \
+ CPU_SUBTYPE_SH7091
default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R || \
CPU_SUBTYPE_SH7760
- default "12" if CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7780 || \
- CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7724
+ default "12" if CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724 || \
+ CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
default "6"
help
This allows you to specify the number of channels that the on-chip
- DMAC supports. This will be 4 for SH7750/SH7751/Sh7750S/SH7091 and 8 for the
- SH7750R/SH7751R/SH7760, 12 for the SH7723/SH7780/SH7785/SH7724, default is 6.
+ DMAC supports. This will be 4 for SH7709/SH7750/SH7750S/SH7751/SH7091,
+ 8 for SH7750R/SH7751R/SH7760, and 12 for SH7723/SH7724/SH7780/SH7785.
+ Default is 6.
config SH_DMABRG
bool "SH7760 DMABRG support"
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 96c626c2cd0a..306fba1564e5 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -19,6 +19,18 @@
#include <cpu/dma.h>
/*
+ * Some of the SoCs feature two DMAC modules. In such a case, the channels are
+ * distributed equally among them.
+ */
+#ifdef SH_DMAC_BASE1
+#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
+#else
+#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS
+#endif
+
+#define SH_DMAC_CH_SZ 0x10
+
+/*
* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
@@ -29,7 +41,7 @@ static unsigned long dma_find_base(unsigned int chan)
unsigned long base = SH_DMAC_BASE0;
#ifdef SH_DMAC_BASE1
- if (chan >= 6)
+ if (chan >= SH_DMAC_NR_MD_CH)
base = SH_DMAC_BASE1;
#endif
@@ -40,13 +52,13 @@ static unsigned long dma_base_addr(unsigned int chan)
{
unsigned long base = dma_find_base(chan);
- /* Normalize offset calculation */
- if (chan >= 9)
- chan -= 6;
- if (chan >= 4)
- base += 0x10;
+ chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
+
+ /* DMAOR is placed inside the channel register space. Step over it. */
+ if (chan >= DMAOR)
+ base += SH_DMAC_CH_SZ;
- return base + (chan * 0x10);
+ return base + chan;
}
#ifdef CONFIG_SH_DMA_IRQ_MULTI
@@ -250,12 +262,11 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan)
#define NR_DMAOR 1
#endif
-/*
- * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
- * channels 0 - 5, DMAOR1 6 - 11 (optional).
- */
-#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6))
-#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6)
+#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \
+ SH_DMAC_NR_MD_CH) + DMAOR)
+#define dmaor_write_reg(n, data) __raw_writew(data, \
+ dma_find_base((n) * \
+ SH_DMAC_NR_MD_CH) + DMAOR)
static inline int dmaor_reset(int no)
{
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index fba90e670ed4..d8f3537ef57f 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -286,6 +286,7 @@ static inline void iounmap(volatile void __iomem *addr) { }
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
+#define unxlate_dev_mem_ptr(p, v) do { } while (0)
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h
index 38187d06b234..e97fb2c79177 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma.h
@@ -13,6 +13,5 @@
#define DMAE0_IRQ evt2irq(0x6c0)
#define SH_DMAC_BASE0 0xffa00000
-#define SH_DMAC_BASE1 0xffa00070
#endif /* __ASM_CPU_SH4_DMA_H */
diff --git a/arch/sh/include/mach-common/mach/highlander.h b/arch/sh/include/mach-common/mach/highlander.h
index fb44c299d033..b12c79558422 100644
--- a/arch/sh/include/mach-common/mach/highlander.h
+++ b/arch/sh/include/mach-common/mach/highlander.h
@@ -176,7 +176,7 @@
#define IVDR_CK_ON 4 /* iVDR Clock ON */
#endif
-#define HL_FPGA_IRQ_BASE 200
+#define HL_FPGA_IRQ_BASE (200 + 16)
#define HL_NR_IRL 15
#define IRQ_AX88796 (HL_FPGA_IRQ_BASE + 0)
diff --git a/arch/sh/include/mach-common/mach/r2d.h b/arch/sh/include/mach-common/mach/r2d.h
index 0d7e483c7d3f..69bc1907c563 100644
--- a/arch/sh/include/mach-common/mach/r2d.h
+++ b/arch/sh/include/mach-common/mach/r2d.h
@@ -47,7 +47,7 @@
#define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */
-#define R2D_FPGA_IRQ_BASE 100
+#define R2D_FPGA_IRQ_BASE (100 + 16)
#define IRQ_VOYAGER (R2D_FPGA_IRQ_BASE + 0)
#define IRQ_EXT (R2D_FPGA_IRQ_BASE + 1)
diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h
index ed69ce7f2030..3b27be9a527e 100644
--- a/arch/sh/include/mach-dreamcast/mach/sysasic.h
+++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h
@@ -22,7 +22,7 @@
takes.
*/
-#define HW_EVENT_IRQ_BASE 48
+#define HW_EVENT_IRQ_BASE (48 + 16)
/* IRQ 13 */
#define HW_EVENT_VSYNC (HW_EVENT_IRQ_BASE + 5) /* VSync */
diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h
index 1fe28820dfa9..ea6c46633b33 100644
--- a/arch/sh/include/mach-se/mach/se7724.h
+++ b/arch/sh/include/mach-se/mach/se7724.h
@@ -37,7 +37,7 @@
#define IRQ2_IRQ evt2irq(0x640)
/* Bits in IRQ012 registers */
-#define SE7724_FPGA_IRQ_BASE 220
+#define SE7724_FPGA_IRQ_BASE (220 + 16)
/* IRQ0 */
#define IRQ0_BASE SE7724_FPGA_IRQ_BASE
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index d342ea08843f..70a07f4f2142 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -21,7 +21,7 @@ static int __init scan_cache(unsigned long node, const char *uname,
if (!of_flat_dt_is_compatible(node, "jcore,cache"))
return 0;
- j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node);
+ j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
return 1;
}
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index e48b3dd996f5..b1f5b3c58a01 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -470,9 +470,9 @@ ENTRY(handle_interrupt)
mov r4, r0 ! save vector->jmp table offset for later
shlr2 r4 ! vector to IRQ# conversion
- add #-0x10, r4
- cmp/pz r4 ! is it a valid IRQ?
+ mov #0x10, r5
+ cmp/hs r5, r4 ! is it a valid IRQ?
bt 10f
/*
diff --git a/arch/sparc/video/fbdev.c b/arch/sparc/video/fbdev.c
index 25837f128132..bff66dd1909a 100644
--- a/arch/sparc/video/fbdev.c
+++ b/arch/sparc/video/fbdev.c
@@ -21,3 +21,6 @@ int fb_is_primary_device(struct fb_info *info)
return 0;
}
EXPORT_SYMBOL(fb_is_primary_device);
+
+MODULE_DESCRIPTION("Sparc fbdev helpers");
+MODULE_LICENSE("GPL");
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index 9b62a9d352b3..a310ae27b479 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -37,13 +37,6 @@ EXPORT_SYMBOL(vsyscall_ehdr);
EXPORT_SYMBOL(vsyscall_end);
#endif
-/* Export symbols used by GCC for the stack protector. */
-extern void __stack_smash_handler(void *) __attribute__((weak));
-EXPORT_SYMBOL(__stack_smash_handler);
-
-extern long __guard __attribute__((weak));
-EXPORT_SYMBOL(__guard);
-
#ifdef _FORTIFY_SOURCE
extern int __sprintf_chk(char *str, int flag, size_t len, const char *format);
EXPORT_SYMBOL(__sprintf_chk);
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
index c17e3e96fc1d..6c98f4bb4228 100644
--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
+++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
@@ -13,7 +13,6 @@ BUILD_BUG_ON(1)
* at the call sites.
*/
KVM_X86_PMU_OP(hw_event_available)
-KVM_X86_PMU_OP(pmc_is_enabled)
KVM_X86_PMU_OP(pmc_idx_to_pmc)
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
KVM_X86_PMU_OP(msr_idx_to_pmc)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fb9d1f2d6136..28bd38303d70 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -523,7 +523,7 @@ struct kvm_pmu {
u64 global_status;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
- u64 global_ovf_ctrl_mask;
+ u64 global_status_mask;
u64 reserved_bits;
u64 raw_event_mask;
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0c9660a07b23..7f4d13383cf2 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -501,20 +501,15 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
- int r;
-
- r = -E2BIG;
if (cpuid->nent < vcpu->arch.cpuid_nent)
- goto out;
- r = -EFAULT;
+ return -E2BIG;
+
if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
- goto out;
- return 0;
+ return -EFAULT;
-out:
cpuid->nent = vcpu->arch.cpuid_nent;
- return r;
+ return 0;
}
/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
@@ -734,6 +729,10 @@ void kvm_set_cpu_caps(void)
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
);
+ kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
+ F(PERFMON_V2)
+ );
+
/*
* Synthesize "LFENCE is serializing" into the AMD-defined entry in
* KVM's supported CPUID if the feature is reported as supported by the
@@ -948,7 +947,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;
- if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+ if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
@@ -1128,7 +1127,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
break;
case 0x80000000:
- entry->eax = min(entry->eax, 0x80000021);
+ entry->eax = min(entry->eax, 0x80000022);
/*
* Serializing LFENCE is reported in a multitude of ways, and
* NullSegClearsBase is not reported in CPUID on Zen2; help
@@ -1233,6 +1232,28 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ebx = entry->ecx = entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0021_EAX);
break;
+ /* AMD Extended Performance Monitoring and Debug */
+ case 0x80000022: {
+ union cpuid_0x80000022_ebx ebx;
+
+ entry->ecx = entry->edx = 0;
+ if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
+ entry->eax = entry->ebx;
+ break;
+ }
+
+ cpuid_entry_override(entry, CPUID_8000_0022_EAX);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
+ ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
+ else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
+ ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
+ else
+ ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
+
+ entry->ebx = ebx.full;
+ break;
+ }
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
/*Just support up to 0xC0000004 now*/
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 4756bcb5724f..8dec646e764b 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -411,7 +411,10 @@ static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
pic_clear_isr(s, ret);
if (addr1 >> 7 || ret != 2)
pic_update_irq(s->pics_state);
+ /* Bit 7 is 1, means there's an interrupt */
+ ret |= 0x80;
} else {
+ /* Bit 7 is 0, means there's no interrupt */
ret = 0x07;
pic_update_irq(s->pics_state);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3c300a196bdf..113ca9661ab2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -51,11 +51,6 @@
#define mod_64(x, y) ((x) % (y))
#endif
-#define PRId64 "d"
-#define PRIx64 "llx"
-#define PRIu64 "u"
-#define PRIo64 "o"
-
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION 0x14UL
#define LAPIC_MMIO_LENGTH (1 << 12)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6eaa3d6994ae..ec169f5c7dce 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -58,6 +58,8 @@
extern bool itlb_multihit_kvm_mitigation;
+static bool nx_hugepage_mitigation_hard_disabled;
+
int __read_mostly nx_huge_pages = -1;
static uint __read_mostly nx_huge_pages_recovery_period_ms;
#ifdef CONFIG_PREEMPT_RT
@@ -67,12 +69,13 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
#endif
+static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops nx_huge_pages_ops = {
.set = set_nx_huge_pages,
- .get = param_get_bool,
+ .get = get_nx_huge_pages,
};
static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
@@ -1600,6 +1603,10 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
if (tdp_mmu_enabled)
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+ if (kvm_x86_ops.set_apic_access_page_addr &&
+ range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+
return flush;
}
@@ -5797,6 +5804,14 @@ static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
vcpu_clear_mmio_info(vcpu, addr);
+ /*
+ * Walking and synchronizing SPTEs both assume they are operating in
+ * the context of the current MMU, and would need to be reworked if
+ * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
+ */
+ if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
+ return;
+
if (!VALID_PAGE(root_hpa))
return;
@@ -6844,6 +6859,14 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
+static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
+{
+ if (nx_hugepage_mitigation_hard_disabled)
+ return sprintf(buffer, "never\n");
+
+ return param_get_bool(buffer, kp);
+}
+
static bool get_nx_auto_mode(void)
{
/* Return true when CPU has the bug, and mitigations are ON */
@@ -6860,15 +6883,29 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
bool old_val = nx_huge_pages;
bool new_val;
+ if (nx_hugepage_mitigation_hard_disabled)
+ return -EPERM;
+
/* In "auto" mode deploy workaround only if CPU has the bug. */
- if (sysfs_streq(val, "off"))
+ if (sysfs_streq(val, "off")) {
new_val = 0;
- else if (sysfs_streq(val, "force"))
+ } else if (sysfs_streq(val, "force")) {
new_val = 1;
- else if (sysfs_streq(val, "auto"))
+ } else if (sysfs_streq(val, "auto")) {
new_val = get_nx_auto_mode();
- else if (kstrtobool(val, &new_val) < 0)
+ } else if (sysfs_streq(val, "never")) {
+ new_val = 0;
+
+ mutex_lock(&kvm_lock);
+ if (!list_empty(&vm_list)) {
+ mutex_unlock(&kvm_lock);
+ return -EBUSY;
+ }
+ nx_hugepage_mitigation_hard_disabled = true;
+ mutex_unlock(&kvm_lock);
+ } else if (kstrtobool(val, &new_val) < 0) {
return -EINVAL;
+ }
__set_nx_huge_pages(new_val);
@@ -7006,6 +7043,9 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
uint old_period, new_period;
int err;
+ if (nx_hugepage_mitigation_hard_disabled)
+ return -EPERM;
+
was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
err = param_set_uint(val, kp);
@@ -7164,6 +7204,9 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
{
int err;
+ if (nx_hugepage_mitigation_hard_disabled)
+ return 0;
+
err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0,
"kvm-nx-lpage-recovery",
&kvm->arch.nx_huge_page_recovery_thread);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 08340219c35a..512163d52194 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -592,7 +592,10 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
/*
* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
- * does not hold the mmu_lock.
+ * does not hold the mmu_lock. On failure, i.e. if a different logical
+ * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
+ * the current value, so the caller operates on fresh data, e.g. if it
+ * retries tdp_mmu_set_spte_atomic()
*/
if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
return -EBUSY;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9fac1ec03463..3eb6e7f47e96 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -25,10 +25,24 @@
#define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
#define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
+static bool is_mtrr_base_msr(unsigned int msr)
+{
+ /* MTRR base MSRs use even numbers, masks use odd numbers. */
+ return !(msr & 0x1);
+}
+
+static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
+ unsigned int msr)
+{
+ int index = (msr - MTRRphysBase_MSR(0)) / 2;
+
+ return &vcpu->arch.mtrr_state.var_ranges[index];
+}
+
static bool msr_mtrr_valid(unsigned msr)
{
switch (msr) {
- case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
+ case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
case MSR_MTRRfix64K_00000:
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
@@ -41,7 +55,6 @@ static bool msr_mtrr_valid(unsigned msr)
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
case MSR_MTRRdefType:
- case MSR_IA32_CR_PAT:
return true;
}
return false;
@@ -52,7 +65,7 @@ static bool valid_mtrr_type(unsigned t)
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
}
-bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
int i;
u64 mask;
@@ -60,9 +73,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (!msr_mtrr_valid(msr))
return false;
- if (msr == MSR_IA32_CR_PAT) {
- return kvm_pat_valid(data);
- } else if (msr == MSR_MTRRdefType) {
+ if (msr == MSR_MTRRdefType) {
if (data & ~0xcff)
return false;
return valid_mtrr_type(data & 0xff);
@@ -74,7 +85,8 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
}
/* variable MTRRs */
- WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
+ WARN_ON(!(msr >= MTRRphysBase_MSR(0) &&
+ msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)));
mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
if ((msr & 1) == 0) {
@@ -88,7 +100,6 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return (data & mask) == 0;
}
-EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
{
@@ -308,10 +319,8 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
gfn_t start, end;
- int index;
- if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
- !kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ if (!tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm))
return;
if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
@@ -326,8 +335,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
end = ~0ULL;
} else {
/* variable range MTRRs. */
- index = (msr - 0x200) / 2;
- var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
+ var_mtrr_range(var_mtrr_msr_to_range(vcpu, msr), &start, &end);
}
kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
@@ -342,21 +350,18 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
struct kvm_mtrr_range *tmp, *cur;
- int index, is_mtrr_mask;
- index = (msr - 0x200) / 2;
- is_mtrr_mask = msr - 0x200 - 2 * index;
- cur = &mtrr_state->var_ranges[index];
+ cur = var_mtrr_msr_to_range(vcpu, msr);
/* remove the entry if it's in the list. */
if (var_mtrr_range_is_valid(cur))
- list_del(&mtrr_state->var_ranges[index].node);
+ list_del(&cur->node);
/*
* Set all illegal GPA bits in the mask, since those bits must
* implicitly be 0. The bits are then cleared when reading them.
*/
- if (!is_mtrr_mask)
+ if (is_mtrr_base_msr(msr))
cur->base = data;
else
cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
@@ -382,8 +387,6 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
else if (msr == MSR_MTRRdefType)
vcpu->arch.mtrr_state.deftype = data;
- else if (msr == MSR_IA32_CR_PAT)
- vcpu->arch.pat = data;
else
set_var_mtrr_msr(vcpu, msr, data);
@@ -411,21 +414,16 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 1;
index = fixed_msr_to_range_index(msr);
- if (index >= 0)
+ if (index >= 0) {
*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
- else if (msr == MSR_MTRRdefType)
+ } else if (msr == MSR_MTRRdefType) {
*pdata = vcpu->arch.mtrr_state.deftype;
- else if (msr == MSR_IA32_CR_PAT)
- *pdata = vcpu->arch.pat;
- else { /* Variable MTRRs */
- int is_mtrr_mask;
-
- index = (msr - 0x200) / 2;
- is_mtrr_mask = msr - 0x200 - 2 * index;
- if (!is_mtrr_mask)
- *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
+ } else {
+ /* Variable MTRRs */
+ if (is_mtrr_base_msr(msr))
+ *pdata = var_mtrr_msr_to_range(vcpu, msr)->base;
else
- *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
+ *pdata = var_mtrr_msr_to_range(vcpu, msr)->mask;
*pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 1690d41c1830..bf653df86112 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -93,11 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}
-static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
-{
- return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
-}
-
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
{
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
@@ -562,6 +557,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
+ switch (msr) {
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
+ default:
+ break;
+ }
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
}
@@ -577,13 +580,86 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
- return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ u32 msr = msr_info->index;
+
+ switch (msr) {
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ msr_info->data = pmu->global_status;
+ break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ msr_info->data = pmu->global_ctrl;
+ break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ msr_info->data = 0;
+ break;
+ default:
+ return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
+ }
+
+ return 0;
}
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
- kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
- return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ u32 msr = msr_info->index;
+ u64 data = msr_info->data;
+ u64 diff;
+
+ /*
+ * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
+ * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
+ */
+ switch (msr) {
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ if (!msr_info->host_initiated)
+ return 1; /* RO MSR */
+ fallthrough;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ /* Per PPR, Read-only MSR. Writes are ignored. */
+ if (!msr_info->host_initiated)
+ break;
+
+ if (data & pmu->global_status_mask)
+ return 1;
+
+ pmu->global_status = data;
+ break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ data &= ~pmu->global_ctrl_mask;
+ fallthrough;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (!kvm_valid_perf_global_ctrl(pmu, data))
+ return 1;
+
+ if (pmu->global_ctrl != data) {
+ diff = pmu->global_ctrl ^ data;
+ pmu->global_ctrl = data;
+ reprogram_counters(pmu, diff);
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ /*
+ * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
+ * GLOBAL_STATUS, and so the set of reserved bits is the same.
+ */
+ if (data & pmu->global_status_mask)
+ return 1;
+ fallthrough;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ if (!msr_info->host_initiated)
+ pmu->global_status &= ~data;
+ break;
+ default:
+ kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
+ return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+ }
+
+ return 0;
}
/* refresh PMU settings. This function generally is called when underlying
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 5c7bbf03b599..7d9ba301c090 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -20,7 +20,6 @@
struct kvm_pmu_ops {
bool (*hw_event_available)(struct kvm_pmc *pmc);
- bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask);
@@ -37,10 +36,25 @@ struct kvm_pmu_ops {
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
+ const int MIN_NR_GP_COUNTERS;
};
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
+static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
+{
+ /*
+ * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
+ * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
+ * greater than zero. However, KVM only exposes and emulates the MSR
+ * to/for the guest if the guest PMU supports at least "Architectural
+ * Performance Monitoring Version 2".
+ *
+ * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
+ */
+ return pmu->version > 1;
+}
+
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -161,6 +175,7 @@ extern struct x86_pmu_capability kvm_pmu_cap;
static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
{
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+ int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
/*
* Hybrid PMUs don't play nice with virtualization without careful
@@ -175,11 +190,15 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
perf_get_x86_pmu_capability(&kvm_pmu_cap);
/*
- * For Intel, only support guest architectural pmu
- * on a host with architectural pmu.
+ * WARN if perf did NOT disable hardware PMU if the number of
+ * architecturally required GP counters aren't present, i.e. if
+ * there are a non-zero number of counters, but fewer than what
+ * is architecturally required.
*/
- if ((is_intel && !kvm_pmu_cap.version) ||
- !kvm_pmu_cap.num_counters_gp)
+ if (!kvm_pmu_cap.num_counters_gp ||
+ WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
+ enable_pmu = false;
+ else if (is_intel && !kvm_pmu_cap.version)
enable_pmu = false;
}
@@ -201,6 +220,33 @@ static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}
+static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
+{
+ int bit;
+
+ if (!diff)
+ return;
+
+ for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+ set_bit(bit, pmu->reprogram_pmi);
+ kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
+}
+
+/*
+ * Check if a PMC is enabled by comparing it against global_ctrl bits.
+ *
+ * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
+ */
+static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (!kvm_pmu_has_perf_global_ctrl(pmu))
+ return true;
+
+ return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index a5717282bb9c..56cbdb24400a 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -15,6 +15,7 @@ enum kvm_only_cpuid_leafs {
CPUID_12_EAX = NCAPINTS,
CPUID_7_1_EDX,
CPUID_8000_0007_EDX,
+ CPUID_8000_0022_EAX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -47,6 +48,9 @@ enum kvm_only_cpuid_leafs {
/* CPUID level 0x80000007 (EDX). */
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
+/* CPUID level 0x80000022 (EAX) */
+#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
+
struct cpuid_reg {
u32 function;
u32 index;
@@ -74,6 +78,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_7_1_EDX] = { 7, 1, CPUID_EDX},
[CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+ [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
};
/*
@@ -108,6 +113,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
return KVM_X86_FEATURE_SGX_EDECCSSA;
else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
return KVM_X86_FEATURE_CONSTANT_TSC;
+ else if (x86_feature == X86_FEATURE_PERFMON_V2)
+ return KVM_X86_FEATURE_PERFMON_V2;
return x86_feature;
}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 5fa939e411d8..cef5a3d0abd0 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -78,14 +78,6 @@ static bool amd_hw_event_available(struct kvm_pmc *pmc)
return true;
}
-/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
- * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
- */
-static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
-{
- return true;
-}
-
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -102,12 +94,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
}
-static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
-{
- /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
- return false;
-}
-
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -119,6 +105,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
return pmc;
}
+static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+ switch (msr) {
+ case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
+ return pmu->version > 0;
+ case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
+ return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ return pmu->version > 1;
+ default:
+ if (msr > MSR_F15H_PERF_CTR5 &&
+ msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
+ return pmu->version > 1;
+ break;
+ }
+
+ return amd_msr_idx_to_pmc(vcpu, msr);
+}
+
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -172,20 +181,39 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ union cpuid_0x80000022_ebx ebx;
- if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ pmu->version = 1;
+ if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
+ pmu->version = 2;
+ /*
+ * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
+ * CPUID entry is guaranteed to be non-NULL.
+ */
+ BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
+ x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
+ ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
+ pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
+ } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
- else
+ } else {
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
+ }
+
+ pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
+ kvm_pmu_cap.num_counters_gp);
+
+ if (pmu->version > 1) {
+ pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ pmu->global_status_mask = pmu->global_ctrl_mask;
+ }
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xfffffff000280000ull;
pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
- pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->nr_arch_fixed_counters = 0;
- pmu->global_status = 0;
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
}
@@ -216,11 +244,12 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
pmc_stop_counter(pmc);
pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
}
+
+ pmu->global_ctrl = pmu->global_status = 0;
}
struct kvm_pmu_ops amd_pmu_ops __initdata = {
.hw_event_available = amd_hw_event_available,
- .pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
@@ -233,4 +262,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.reset = amd_pmu_reset,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
+ .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
};
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 69ae5e1b3120..07756b7348ae 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2216,10 +2216,7 @@ void __init sev_hardware_setup(void)
}
sev_asid_count = max_sev_asid - min_sev_asid + 1;
- if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
- goto out;
-
- pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
+ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
sev_supported = true;
/* SEV-ES support requested? */
@@ -2244,13 +2241,19 @@ void __init sev_hardware_setup(void)
goto out;
sev_es_asid_count = min_sev_asid - 1;
- if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
- goto out;
-
- pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
+ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
sev_es_supported = true;
out:
+ if (boot_cpu_has(X86_FEATURE_SEV))
+ pr_info("SEV %s (ASIDs %u - %u)\n",
+ sev_supported ? "enabled" : "disabled",
+ min_sev_asid, max_sev_asid);
+ if (boot_cpu_has(X86_FEATURE_SEV_ES))
+ pr_info("SEV-ES %s (ASIDs %u - %u)\n",
+ sev_es_supported ? "enabled" : "disabled",
+ min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
+
sev_enabled = sev_supported;
sev_es_enabled = sev_es_supported;
#endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 54089f990c8f..d381ad424554 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -244,15 +244,6 @@ static u8 rsm_ins_bytes[] = "\x0f\xaa";
static unsigned long iopm_base;
-struct kvm_ldttss_desc {
- u16 limit0;
- u16 base0;
- unsigned base1:8, type:5, dpl:2, p:1;
- unsigned limit1:4, zero0:3, g:1, base2:8;
- u32 base3;
- u32 zero1;
-} __attribute__((packed));
-
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
/*
@@ -588,7 +579,6 @@ static int svm_hardware_enable(void)
struct svm_cpu_data *sd;
uint64_t efer;
- struct desc_struct *gdt;
int me = raw_smp_processor_id();
rdmsrl(MSR_EFER, efer);
@@ -601,9 +591,6 @@ static int svm_hardware_enable(void)
sd->next_asid = sd->max_asid + 1;
sd->min_asid = max_sev_asid + 1;
- gdt = get_current_gdt_rw();
- sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
-
wrmsrl(MSR_EFER, efer | EFER_SVME);
wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
@@ -752,7 +739,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
BUG_ON(offset == MSR_INVALID);
- return !!test_bit(bit_write, &tmp);
+ return test_bit(bit_write, &tmp);
}
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
@@ -2939,9 +2926,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_CR_PAT:
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
- return 1;
- vcpu->arch.pat = data;
+ ret = kvm_set_msr_common(vcpu, msr);
+ if (ret)
+ break;
+
svm->vmcb01.ptr->save.g_pat = data;
if (is_guest_mode(vcpu))
nested_vmcb02_compute_g_pat(svm);
@@ -3418,8 +3406,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
- trace_kvm_exit(vcpu, KVM_ISA_SVM);
-
/* SEV-ES guests must use the CR write traps to track CR registers. */
if (!sev_es_guest(vcpu->kvm)) {
if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
@@ -3457,14 +3443,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return svm_invoke_exit_handler(vcpu, exit_code);
}
-static void reload_tss(struct kvm_vcpu *vcpu)
-{
- struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
-
- sd->tss_desc->type = 9; /* available 32/64-bit TSS */
- load_TR_desc();
-}
-
static void pre_svm_run(struct kvm_vcpu *vcpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
@@ -4099,9 +4077,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
- if (!sev_es_guest(vcpu->kvm))
- reload_tss(vcpu);
-
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
@@ -4156,6 +4131,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(vcpu);
+ trace_kvm_exit(vcpu, KVM_ISA_SVM);
+
svm_complete_interrupts(vcpu);
if (is_guest_mode(vcpu))
@@ -5025,9 +5002,22 @@ static __init void svm_set_cpu_caps(void)
boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
- /* AMD PMU PERFCTR_CORE CPUID */
- if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
- kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
+ if (enable_pmu) {
+ /*
+ * Enumerate support for PERFCTR_CORE if and only if KVM has
+ * access to enough counters to virtualize "core" support,
+ * otherwise limit vPMU support to the legacy number of counters.
+ */
+ if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
+ kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
+ kvm_pmu_cap.num_counters_gp);
+ else
+ kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
+
+ if (kvm_pmu_cap.version != 2 ||
+ !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
+ kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
+ }
/* CPUID 0x8000001F (SME/SEV features) */
sev_set_cpu_caps();
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f44751dd8d5d..18af7e712a5a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -303,7 +303,6 @@ struct svm_cpu_data {
u32 max_asid;
u32 next_asid;
u32 min_asid;
- struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
unsigned long save_area_pa;
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 45162c1bcd8f..d0abee35d7ba 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -152,8 +152,8 @@ static inline bool cpu_has_vmx_ept(void)
static inline bool vmx_umip_emulated(void)
{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_DESC;
+ return !boot_cpu_has(X86_FEATURE_UMIP) &&
+ (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_DESC);
}
static inline bool cpu_has_vmx_rdtscp(void)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index e35cf0bd0df9..516391cc0d64 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2328,8 +2328,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
* Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
* will not have to rewrite the controls just for this bit.
*/
- if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
- (vmcs12->guest_cr4 & X86_CR4_UMIP))
+ if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP))
exec_control |= SECONDARY_EXEC_DESC;
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
@@ -2649,7 +2648,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
- intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
+ kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->guest_ia32_perf_global_ctrl))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
@@ -4524,7 +4523,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.pat = vmcs12->host_ia32_pat;
}
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
- intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
+ kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->host_ia32_perf_global_ctrl));
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 741efe2c497b..80c769c58a87 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -73,18 +73,6 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
}
-static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
-{
- int bit;
-
- if (!diff)
- return;
-
- for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
- set_bit(bit, pmu->reprogram_pmi);
- kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
-}
-
static bool intel_hw_event_available(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -107,17 +95,6 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
return true;
}
-/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
-static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
-
- if (!intel_pmu_has_perf_global_ctrl(pmu))
- return true;
-
- return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
-
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -198,11 +175,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
- case MSR_CORE_PERF_GLOBAL_STATUS:
- case MSR_CORE_PERF_GLOBAL_CTRL:
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- return intel_pmu_has_perf_global_ctrl(pmu);
- break;
+ return kvm_pmu_has_perf_global_ctrl(pmu);
case MSR_IA32_PEBS_ENABLE:
ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
break;
@@ -352,15 +325,6 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CORE_PERF_FIXED_CTR_CTRL:
msr_info->data = pmu->fixed_ctr_ctrl;
break;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- msr_info->data = pmu->global_status;
- break;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- msr_info->data = pmu->global_ctrl;
- break;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- msr_info->data = 0;
- break;
case MSR_IA32_PEBS_ENABLE:
msr_info->data = pmu->pebs_enable;
break;
@@ -410,29 +374,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data);
break;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- if (!msr_info->host_initiated)
- return 1; /* RO MSR */
-
- pmu->global_status = data;
- break;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- if (!kvm_valid_perf_global_ctrl(pmu, data))
- return 1;
-
- if (pmu->global_ctrl != data) {
- diff = pmu->global_ctrl ^ data;
- pmu->global_ctrl = data;
- reprogram_counters(pmu, diff);
- }
- break;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (data & pmu->global_ovf_ctrl_mask)
- return 1;
-
- if (!msr_info->host_initiated)
- pmu->global_status &= ~data;
- break;
case MSR_IA32_PEBS_ENABLE:
if (data & pmu->pebs_enable_mask)
return 1;
@@ -444,8 +385,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
case MSR_IA32_DS_AREA:
- if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
- return 1;
if (is_noncanonical_address(data, vcpu))
return 1;
@@ -531,7 +470,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
pmu->global_ctrl_mask = ~0ull;
- pmu->global_ovf_ctrl_mask = ~0ull;
+ pmu->global_status_mask = ~0ull;
pmu->fixed_ctr_ctrl_mask = ~0ull;
pmu->pebs_enable_mask = ~0ull;
pmu->pebs_data_cfg_mask = ~0ull;
@@ -585,11 +524,17 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
pmu->global_ctrl_mask = counter_mask;
- pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
+
+ /*
+ * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
+ * share reserved bit definitions. The kernel just happens to use
+ * OVF_CTRL for the names.
+ */
+ pmu->global_status_mask = pmu->global_ctrl_mask
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
if (vmx_pt_mode_is_host_guest())
- pmu->global_ovf_ctrl_mask &=
+ pmu->global_status_mask &=
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
@@ -801,7 +746,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
pmc = intel_pmc_idx_to_pmc(pmu, bit);
if (!pmc || !pmc_speculative_in_use(pmc) ||
- !intel_pmc_is_enabled(pmc) || !pmc->perf_event)
+ !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
continue;
/*
@@ -816,7 +761,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
struct kvm_pmu_ops intel_pmu_ops __initdata = {
.hw_event_available = intel_hw_event_available,
- .pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
@@ -831,4 +775,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.cleanup = intel_pmu_cleanup,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
+ .MIN_NR_GP_COUNTERS = 1,
};
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 2261b684a7d4..3e822e582497 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -357,11 +357,12 @@ static int handle_encls_einit(struct kvm_vcpu *vcpu)
static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf)
{
- if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX))
- return false;
-
+ /*
+ * ENCLS generates a #UD if SGX1 isn't supported, i.e. this point will
+ * be reached if and only if the SGX1 leafs are enabled.
+ */
if (leaf >= ECREATE && leaf <= ETRACK)
- return guest_cpuid_has(vcpu, X86_FEATURE_SGX1);
+ return true;
if (leaf >= EAUG && leaf <= EMODT)
return guest_cpuid_has(vcpu, X86_FEATURE_SGX2);
@@ -380,9 +381,11 @@ int handle_encls(struct kvm_vcpu *vcpu)
{
u32 leaf = (u32)kvm_rax_read(vcpu);
- if (!encls_leaf_enabled_in_guest(vcpu, leaf)) {
+ if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
kvm_queue_exception(vcpu, UD_VECTOR);
- } else if (!sgx_enabled_in_guest_bios(vcpu)) {
+ } else if (!encls_leaf_enabled_in_guest(vcpu, leaf) ||
+ !sgx_enabled_in_guest_bios(vcpu) || !is_paging(vcpu)) {
kvm_inject_gp(vcpu, 0);
} else {
if (leaf == ECREATE)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 631fd7da2bc3..07e927d4d099 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -187,7 +187,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
_ASM_EXTABLE(.Lvmresume, .Lfixup)
_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
-SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
+SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
UNWIND_HINT_RESTORE
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 44fb619803b8..0ecf4be2c6af 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2287,19 +2287,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
goto find_uret_msr;
case MSR_IA32_CR_PAT:
- if (!kvm_pat_valid(data))
- return 1;
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ if (ret)
+ break;
if (is_guest_mode(vcpu) &&
get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
get_vmcs12(vcpu)->guest_ia32_pat = data;
- if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, data);
- vcpu->arch.pat = data;
- break;
- }
- ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_IA32_MCG_EXT_CTL:
if ((!msr_info->host_initiated &&
@@ -3387,15 +3384,15 @@ static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long old_cr4 = vcpu->arch.cr4;
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long hw_cr4;
+
/*
* Pass through host's Machine Check Enable value to hw_cr4, which
* is in force while we are in guest mode. Do not let guests control
* this bit, even if host CR4.MCE == 0.
*/
- unsigned long hw_cr4;
-
hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
if (is_unrestricted_guest(vcpu))
hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
@@ -3404,7 +3401,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
else
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
- if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
+ if (vmx_umip_emulated()) {
if (cr4 & X86_CR4_UMIP) {
secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP;
@@ -5402,7 +5399,13 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_desc(struct kvm_vcpu *vcpu)
{
- WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
+ /*
+ * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
+ * and other code needs to be updated if UMIP can be guest owned.
+ */
+ BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP);
+
+ WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
return kvm_emulate_instruction(vcpu, 0);
}
@@ -6708,7 +6711,12 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
{
- struct page *page;
+ const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *slot;
+ unsigned long mmu_seq;
+ kvm_pfn_t pfn;
/* Defer reload until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) {
@@ -6720,18 +6728,53 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
return;
- page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
- if (is_error_page(page))
+ /*
+ * Grab the memslot so that the hva lookup for the mmu_notifier retry
+ * is guaranteed to use the same memslot as the pfn lookup, i.e. rely
+ * on the pfn lookup's validation of the memslot to ensure a valid hva
+ * is used for the retry check.
+ */
+ slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return;
- vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page));
+ /*
+ * Ensure that the mmu_notifier sequence count is read before KVM
+ * retrieves the pfn from the primary MMU. Note, the memslot is
+ * protected by SRCU, not the mmu_notifier. Pairs with the smp_wmb()
+ * in kvm_mmu_invalidate_end().
+ */
+ mmu_seq = kvm->mmu_invalidate_seq;
+ smp_rmb();
+
+ /*
+ * No need to retry if the memslot does not exist or is invalid. KVM
+ * controls the APIC-access page memslot, and only deletes the memslot
+ * if APICv is permanently inhibited, i.e. the memslot won't reappear.
+ */
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn))
+ return;
+
+ read_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_invalidate_retry_hva(kvm, mmu_seq,
+ gfn_to_hva_memslot(slot, gfn))) {
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+ read_unlock(&vcpu->kvm->mmu_lock);
+ goto out;
+ }
+
+ vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
+ read_unlock(&vcpu->kvm->mmu_lock);
+
vmx_flush_tlb_current(vcpu);
+out:
/*
* Do not pin apic access page in memory, the MMU notifier
* will call us again if it is migrated or swapped out.
*/
- put_page(page);
+ kvm_release_pfn_clean(pfn);
}
static void vmx_hwapic_isr_update(int max_isr)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 9e66531861cf..32384ba38499 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -93,18 +93,6 @@ union vmx_exit_reason {
u32 full;
};
-static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
-{
- /*
- * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
- * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
- * greater than zero. However, KVM only exposes and emulates the MSR
- * to/for the guest if the guest PMU supports at least "Architectural
- * Performance Monitoring Version 2".
- */
- return pmu->version > 1;
-}
-
struct lbr_desc {
/* Basic info about guest LBR records. */
struct x86_pmu_lbr records;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7f70207e8689..a6b9bea62fb8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1017,13 +1017,11 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
- if (static_cpu_has(X86_FEATURE_PKU) &&
+ if (cpu_feature_enabled(X86_FEATURE_PKU) &&
vcpu->arch.pkru != vcpu->arch.host_pkru &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
write_pkru(vcpu->arch.pkru);
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
}
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
@@ -1032,15 +1030,13 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_state_protected)
return;
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
- if (static_cpu_has(X86_FEATURE_PKU) &&
+ if (cpu_feature_enabled(X86_FEATURE_PKU) &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
write_pkru(vcpu->arch.host_pkru);
}
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
@@ -1427,15 +1423,14 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
/*
- * List of msr numbers which we expose to userspace through KVM_GET_MSRS
- * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
- *
- * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
- * extract the supported MSRs from the related const lists.
- * msrs_to_save is selected from the msrs_to_save_all to reflect the
- * capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
- * may depend on host virtualization features rather than host cpu features.
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
+ * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
+ * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
+ * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
+ * MSRs that KVM emulates without strictly requiring host support.
+ * msr_based_features holds MSRs that enumerate features, i.e. are effectively
+ * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
+ * msrs_to_save and emulated_msrs.
*/
static const u32 msrs_to_save_base[] = {
@@ -1483,6 +1478,10 @@ static const u32 msrs_to_save_pmu[] = {
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
+
+ MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
};
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
@@ -1531,11 +1530,11 @@ static const u32 emulated_msrs_all[] = {
MSR_IA32_UCODE_REV,
/*
- * The following list leaves out MSRs whose values are determined
- * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
- * We always support the "true" VMX control MSRs, even if the host
- * processor does not, so I am putting these registers here rather
- * than in msrs_to_save_all.
+ * KVM always supports the "true" VMX control MSRs, even if the host
+ * does not. The VMX MSRs as a whole are considered "emulated" as KVM
+ * doesn't strictly require them to exist in the host (ignoring that
+ * KVM would refuse to load in the first place if the core set of MSRs
+ * aren't supported).
*/
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
@@ -1631,7 +1630,7 @@ static u64 kvm_get_arch_capabilities(void)
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
* If an outer hypervisor is doing the cache flush for us
- * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
+ * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
* capability to the guest too, and if EPT is disabled we're not
* vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
* require a nested hypervisor to do a flush of its own.
@@ -1809,7 +1808,7 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
unsigned long *bitmap = ranges[i].bitmap;
if ((index >= start) && (index < end) && (flags & type)) {
- allowed = !!test_bit(index - start, bitmap);
+ allowed = test_bit(index - start, bitmap);
break;
}
}
@@ -3701,8 +3700,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
break;
- case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
- case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
+ case MSR_IA32_CR_PAT:
+ if (!kvm_pat_valid(data))
+ return 1;
+
+ vcpu->arch.pat = data;
+ break;
+ case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
+ case MSR_MTRRdefType:
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
@@ -4109,9 +4114,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
break;
}
+ case MSR_IA32_CR_PAT:
+ msr_info->data = vcpu->arch.pat;
+ break;
case MSR_MTRRcap:
- case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
- case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
+ case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
+ case MSR_MTRRdefType:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
case 0xcd: /* fsb frequency */
msr_info->data = 3;
@@ -7149,6 +7157,12 @@ static void kvm_probe_msr_to_save(u32 msr_index)
kvm_pmu_cap.num_counters_fixed)
return;
break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
+ return;
+ break;
case MSR_IA32_XFD:
case MSR_IA32_XFD_ERR:
if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
@@ -10434,20 +10448,6 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
}
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
-{
- unsigned long apic_address;
-
- /*
- * The physical address of apic access page is stored in the VMCS.
- * Update it when it becomes invalid.
- */
- apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
- if (start <= apic_address && apic_address < end)
- kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-}
-
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{
static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index c544602d07a3..82e3dafc5453 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -309,7 +309,6 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
-bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 232acf418cfb..77f7ac3668cb 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -853,9 +853,9 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
/* Disable interrupts around EFI calls: */
local_irq_save(flags);
- status = efi_call(efi.runtime->set_virtual_address_map,
- memory_map_size, descriptor_size,
- descriptor_version, virtual_map);
+ status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
+ memory_map_size, descriptor_size,
+ descriptor_version, virtual_map);
local_irq_restore(flags);
efi_fpu_end();
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index aaf9903ad7b2..fc49be622e05 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -2086,6 +2086,9 @@ void blk_cgroup_bio_start(struct bio *bio)
struct blkg_iostat_set *bis;
unsigned long flags;
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
+ return;
+
/* Root-level stats are sourced from system-wide IO stats */
if (!cgroup_parent(blkcg->css.cgroup))
return;
@@ -2116,8 +2119,7 @@ void blk_cgroup_bio_start(struct bio *bio)
}
u64_stats_update_end_irqrestore(&bis->sync, flags);
- if (cgroup_subsys_on_dfl(io_cgrp_subsys))
- cgroup_rstat_updated(blkcg->css.cgroup, cpu);
+ cgroup_rstat_updated(blkcg->css.cgroup, cpu);
put_cpu();
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 6084a9519883..9dfcf540f400 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -3301,11 +3301,9 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
blk_stat_enable_accounting(disk->queue);
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = true;
- wbt_disable_default(disk);
} else {
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = false;
- wbt_enable_default(disk);
}
if (user) {
@@ -3318,6 +3316,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
+ if (enable)
+ wbt_disable_default(disk);
+ else
+ wbt_enable_default(disk);
+
blk_mq_unquiesce_queue(disk->queue);
blk_mq_unfreeze_queue(disk->queue);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index decb6ab2d508..5504719b970d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -49,17 +49,8 @@ static void blk_mq_request_bypass_insert(struct request *rq,
blk_insert_t flags);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
-
-static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
- blk_qc_t qc)
-{
- return xa_load(&q->hctx_table, qc);
-}
-
-static inline blk_qc_t blk_rq_to_qc(struct request *rq)
-{
- return rq->mq_hctx->queue_num;
-}
+static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ struct io_comp_batch *iob, unsigned int flags);
/*
* Check if any of the ctx, dispatch list or elevator
@@ -1248,7 +1239,7 @@ void blk_mq_start_request(struct request *rq)
q->integrity.profile->prepare_fn(rq);
#endif
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
- WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
+ WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
}
EXPORT_SYMBOL(blk_mq_start_request);
@@ -1280,7 +1271,11 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
if (!plug->multiple_queues && last && last->q != rq->q)
plug->multiple_queues = true;
- if (!plug->has_elevator && (rq->rq_flags & RQF_USE_SCHED))
+ /*
+ * Any request allocated from sched tags can't be issued to
+ * ->queue_rqs() directly
+ */
+ if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
plug->has_elevator = true;
rq->rq_next = NULL;
rq_list_add(&plug->mq_list, rq);
@@ -1350,7 +1345,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
do {
- blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
+ blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
cond_resched();
} while (!completion_done(wait));
}
@@ -4745,10 +4740,9 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
-int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
- unsigned int flags)
+static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ struct io_comp_batch *iob, unsigned int flags)
{
- struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = get_current_state();
int ret;
@@ -4773,6 +4767,32 @@ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *
return 0;
}
+int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
+ struct io_comp_batch *iob, unsigned int flags)
+{
+ struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
+
+ return blk_hctx_poll(q, hctx, iob, flags);
+}
+
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct request_queue *q = rq->q;
+ int ret;
+
+ if (!blk_rq_is_poll(rq))
+ return 0;
+ if (!percpu_ref_tryget(&q->q_usage_counter))
+ return 0;
+
+ ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
+ blk_queue_exit(q);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blk_rq_poll);
+
unsigned int blk_mq_rq_cpu(struct request *rq)
{
return rq->mq_ctx->cpu;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a64208583853..afc797fb0dfc 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -47,19 +47,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
-static ssize_t queue_var_store64(s64 *var, const char *page)
-{
- int err;
- s64 v;
-
- err = kstrtos64(page, 10, &v);
- if (err < 0)
- return err;
-
- *var = v;
- return 0;
-}
-
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, page);
@@ -451,61 +438,6 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
return count;
}
-static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-{
- if (!wbt_rq_qos(q))
- return -EINVAL;
-
- if (wbt_disabled(q))
- return sprintf(page, "0\n");
-
- return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
-}
-
-static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
- size_t count)
-{
- struct rq_qos *rqos;
- ssize_t ret;
- s64 val;
-
- ret = queue_var_store64(&val, page);
- if (ret < 0)
- return ret;
- if (val < -1)
- return -EINVAL;
-
- rqos = wbt_rq_qos(q);
- if (!rqos) {
- ret = wbt_init(q->disk);
- if (ret)
- return ret;
- }
-
- if (val == -1)
- val = wbt_default_latency_nsec(q);
- else if (val >= 0)
- val *= 1000ULL;
-
- if (wbt_get_min_lat(q) == val)
- return count;
-
- /*
- * Ensure that the queue is idled, in case the latency update
- * ends up either enabling or disabling wbt completely. We can't
- * have IO inflight if that happens.
- */
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
-
- wbt_set_min_lat(q, val);
-
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
-
- return count;
-}
-
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -598,7 +530,6 @@ QUEUE_RW_ENTRY(queue_wc, "write_cache");
QUEUE_RO_ENTRY(queue_fua, "fua");
QUEUE_RO_ENTRY(queue_dax, "dax");
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
-QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
@@ -617,8 +548,79 @@ QUEUE_RW_ENTRY(queue_iostats, "iostats");
QUEUE_RW_ENTRY(queue_random, "add_random");
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
+#ifdef CONFIG_BLK_WBT
+static ssize_t queue_var_store64(s64 *var, const char *page)
+{
+ int err;
+ s64 v;
+
+ err = kstrtos64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!wbt_rq_qos(q))
+ return -EINVAL;
+
+ if (wbt_disabled(q))
+ return sprintf(page, "0\n");
+
+ return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ struct rq_qos *rqos;
+ ssize_t ret;
+ s64 val;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+ if (val < -1)
+ return -EINVAL;
+
+ rqos = wbt_rq_qos(q);
+ if (!rqos) {
+ ret = wbt_init(q->disk);
+ if (ret)
+ return ret;
+ }
+
+ if (val == -1)
+ val = wbt_default_latency_nsec(q);
+ else if (val >= 0)
+ val *= 1000ULL;
+
+ if (wbt_get_min_lat(q) == val)
+ return count;
+
+ /*
+ * Ensure that the queue is idled, in case the latency update
+ * ends up either enabling or disabling wbt completely. We can't
+ * have IO inflight if that happens.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ wbt_set_min_lat(q, val);
+
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+
+ return count;
+}
+
+QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
+#endif
+
static struct attribute *queue_attrs[] = {
- &queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
@@ -626,7 +628,6 @@ static struct attribute *queue_attrs[] = {
&queue_max_discard_segments_entry.attr,
&queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr,
- &elv_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
@@ -647,7 +648,6 @@ static struct attribute *queue_attrs[] = {
&queue_max_open_zones_entry.attr,
&queue_max_active_zones_entry.attr,
&queue_nomerges_entry.attr,
- &queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
&queue_random_entry.attr,
@@ -655,9 +655,7 @@ static struct attribute *queue_attrs[] = {
&queue_wc_entry.attr,
&queue_fua_entry.attr,
&queue_dax_entry.attr,
- &queue_wb_lat_entry.attr,
&queue_poll_delay_entry.attr,
- &queue_io_timeout_entry.attr,
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
&blk_throtl_sample_time_entry.attr,
#endif
@@ -666,16 +664,23 @@ static struct attribute *queue_attrs[] = {
NULL,
};
+static struct attribute *blk_mq_queue_attrs[] = {
+ &queue_requests_entry.attr,
+ &elv_iosched_entry.attr,
+ &queue_rq_affinity_entry.attr,
+ &queue_io_timeout_entry.attr,
+#ifdef CONFIG_BLK_WBT
+ &queue_wb_lat_entry.attr,
+#endif
+ NULL,
+};
+
static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
struct request_queue *q = disk->queue;
- if (attr == &queue_io_timeout_entry.attr &&
- (!q->mq_ops || !q->mq_ops->timeout))
- return 0;
-
if ((attr == &queue_max_open_zones_entry.attr ||
attr == &queue_max_active_zones_entry.attr) &&
!blk_queue_is_zoned(q))
@@ -684,11 +689,30 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
return attr->mode;
}
+static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
+ struct request_queue *q = disk->queue;
+
+ if (!queue_is_mq(q))
+ return 0;
+
+ if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
+ return 0;
+
+ return attr->mode;
+}
+
static struct attribute_group queue_attr_group = {
.attrs = queue_attrs,
.is_visible = queue_attr_visible,
};
+static struct attribute_group blk_mq_queue_attr_group = {
+ .attrs = blk_mq_queue_attrs,
+ .is_visible = blk_mq_queue_attr_visible,
+};
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
@@ -733,6 +757,7 @@ static const struct sysfs_ops queue_sysfs_ops = {
static const struct attribute_group *blk_queue_attr_groups[] = {
&queue_attr_group,
+ &blk_mq_queue_attr_group,
NULL
};
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9d010d867fbf..7397ff199d66 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2178,12 +2178,6 @@ bool __blk_throtl_bio(struct bio *bio)
rcu_read_lock();
- if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
- blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
- }
-
spin_lock_irq(&q->queue_lock);
throtl_update_latency_buckets(td);
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index ef4b7a4de987..d1ccbfe9f797 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio)
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
int rw = bio_data_dir(bio);
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
+ if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
+ bio_set_flag(bio, BIO_CGROUP_ACCT);
+ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
+ }
+ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
+ }
+
/* iops limit is always counted */
if (tg->has_rules_iops[rw])
return true;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 7a87506ff8e1..0bb613139bec 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -146,7 +146,7 @@ enum {
static inline bool rwb_enabled(struct rq_wb *rwb)
{
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
- rwb->wb_normal != 0;
+ rwb->enable_state != WBT_STATE_OFF_MANUAL;
}
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@@ -201,15 +201,6 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
inflight = atomic_dec_return(&rqw->inflight);
/*
- * wbt got disabled with IO in flight. Wake up any potential
- * waiters, we don't have to do more than that.
- */
- if (unlikely(!rwb_enabled(rwb))) {
- rwb_wake_all(rwb);
- return;
- }
-
- /*
* For discards, our limit is always the background. For writes, if
* the device does write back caching, drop further down before we
* wake people up.
@@ -503,8 +494,7 @@ bool wbt_disabled(struct request_queue *q)
{
struct rq_qos *rqos = wbt_rq_qos(q);
- return !rqos || RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT ||
- RQWB(rqos)->enable_state == WBT_STATE_OFF_MANUAL;
+ return !rqos || !rwb_enabled(RQWB(rqos));
}
u64 wbt_get_min_lat(struct request_queue *q)
@@ -545,13 +535,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{
unsigned int limit;
- /*
- * If we got disabled, just return UINT_MAX. This ensures that
- * we'll properly inc a new IO, and dec+wakeup at the end.
- */
- if (!rwb_enabled(rwb))
- return UINT_MAX;
-
if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
return rwb->wb_background;
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index ba6cca5849a6..8a029e138f7a 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -18,10 +18,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
#else
-static inline int wbt_init(struct gendisk *disk)
-{
- return -EINVAL;
-}
static inline void wbt_disable_default(struct gendisk *disk)
{
}
@@ -31,21 +27,6 @@ static inline void wbt_enable_default(struct gendisk *disk)
static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
{
}
-static inline u64 wbt_get_min_lat(struct request_queue *q)
-{
- return 0;
-}
-static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
-{
-}
-static inline u64 wbt_default_latency_nsec(struct request_queue *q)
-{
- return 0;
-}
-static inline bool wbt_disabled(struct request_queue *q)
-{
- return true;
-}
#endif /* CONFIG_BLK_WBT */
diff --git a/drivers/accessibility/speakup/Kconfig b/drivers/accessibility/speakup/Kconfig
index 07ecbbde0384..e84fb617acc4 100644
--- a/drivers/accessibility/speakup/Kconfig
+++ b/drivers/accessibility/speakup/Kconfig
@@ -46,6 +46,7 @@ if SPEAKUP
config SPEAKUP_SERIALIO
def_bool y
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
config SPEAKUP_SYNTH_ACNTSA
tristate "Accent SA synthesizer support"
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 56c073103cbb..1fbc9b921c4f 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -1287,7 +1287,7 @@ static struct var_t spk_vars[NB_ID] = {
[PUNC_LEVEL_ID] = { PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
[READING_PUNC_ID] = { READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
[CURSOR_TIME_ID] = { CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} },
- [SAY_CONTROL_ID] { SAY_CONTROL, TOGGLE_0},
+ [SAY_CONTROL_ID] = { SAY_CONTROL, TOGGLE_0},
[SAY_WORD_CTL_ID] = {SAY_WORD_CTL, TOGGLE_0},
[NO_INTERRUPT_ID] = { NO_INTERRUPT, TOGGLE_0},
[KEY_ECHO_ID] = { KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} },
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index fe00a5783f53..48d15dd785f6 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -19,13 +20,16 @@
#include "internal.h"
+/* Exclude devices that have no _CRS resources provided */
+#define ACPI_ALLOW_WO_RESOURCES BIT(0)
+
static const struct acpi_device_id forbidden_id_list[] = {
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
{"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */
{"PNP0200", 0}, /* AT DMA Controller */
- {"SMB0001", 0}, /* ACPI SMBUS virtual device */
+ {ACPI_SMBUS_MS_HID, ACPI_ALLOW_WO_RESOURCES}, /* ACPI SMBUS virtual device */
{ }
};
@@ -83,6 +87,15 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
dest->parent = pci_find_resource(to_pci_dev(parent), dest);
}
+static unsigned int acpi_platform_resource_count(struct acpi_resource *ares, void *data)
+{
+ bool *has_resources = data;
+
+ *has_resources = true;
+
+ return AE_CTRL_TERMINATE;
+}
+
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
@@ -100,6 +113,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
struct acpi_device *parent = acpi_dev_parent(adev);
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
+ const struct acpi_device_id *match;
struct resource_entry *rentry;
struct list_head resource_list;
struct resource *resources = NULL;
@@ -109,8 +123,19 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
if (adev->physical_node_count)
return NULL;
- if (!acpi_match_device_ids(adev, forbidden_id_list))
- return ERR_PTR(-EINVAL);
+ match = acpi_match_acpi_device(forbidden_id_list, adev);
+ if (match) {
+ if (match->driver_data & ACPI_ALLOW_WO_RESOURCES) {
+ bool has_resources = false;
+
+ acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ acpi_platform_resource_count, &has_resources);
+ if (has_resources)
+ return ERR_PTR(-EINVAL);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+ }
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e3e0bd0c5a50..2fc2b43a4ed3 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -682,7 +682,7 @@ bool acpi_device_is_first_physical_node(struct acpi_device *adev,
* resources available from it but they will be matched normally using functions
* provided by their bus types (and analogously for their modalias).
*/
-struct acpi_device *acpi_companion_match(const struct device *dev)
+const struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
@@ -706,7 +706,7 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
-static bool acpi_of_match_device(struct acpi_device *adev,
+static bool acpi_of_match_device(const struct acpi_device *adev,
const struct of_device_id *of_match_table,
const struct of_device_id **of_id)
{
@@ -808,7 +808,7 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
return true;
}
-static bool __acpi_match_device(struct acpi_device *device,
+static bool __acpi_match_device(const struct acpi_device *device,
const struct acpi_device_id *acpi_ids,
const struct of_device_id *of_ids,
const struct acpi_device_id **acpi_id,
@@ -851,6 +851,26 @@ out_acpi_match:
}
/**
+ * acpi_match_acpi_device - Match an ACPI device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id objects to match against.
+ * @adev: The ACPI device pointer to match.
+ *
+ * Match the ACPI device @adev against a given list of ACPI IDs @ids.
+ *
+ * Return:
+ * a pointer to the first matching ACPI ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev)
+{
+ const struct acpi_device_id *id = NULL;
+
+ __acpi_match_device(adev, ids, NULL, &id, NULL);
+ return id;
+}
+EXPORT_SYMBOL_GPL(acpi_match_acpi_device);
+
+/**
* acpi_match_device - Match a struct device against a given list of ACPI IDs
* @ids: Array of struct acpi_device_id object to match against.
* @dev: The device structure to match.
@@ -864,10 +884,7 @@ out_acpi_match:
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
- const struct acpi_device_id *id = NULL;
-
- __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
- return id;
+ return acpi_match_acpi_device(ids, acpi_companion_match(dev));
}
EXPORT_SYMBOL_GPL(acpi_match_device);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 0fbfbaa8d8e3..b9bbf0746199 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -283,7 +283,7 @@ int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env
}
EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
-static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
+static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size)
{
int len, count;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 06ad497067ac..f4148dc50b9c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -11,6 +11,8 @@
#include <linux/idr.h>
+extern struct acpi_device *acpi_root;
+
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
@@ -119,7 +121,7 @@ int acpi_bus_register_early_device(int type);
/* --------------------------------------------------------------------------
Device Matching and Notification
-------------------------------------------------------------------------- */
-struct acpi_device *acpi_companion_match(const struct device *dev);
+const struct acpi_device *acpi_companion_match(const struct device *dev);
int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1c3e1e2bb0b5..5b145f1aaa1b 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -23,8 +23,7 @@
#include <linux/dma-direct.h>
#include "internal.h"
-
-extern struct acpi_device *acpi_root;
+#include "sleep.h"
#define ACPI_BUS_CLASS "system_bus"
#define ACPI_BUS_HID "LNXSYBUS"
@@ -930,26 +929,29 @@ static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
return err;
}
+/* Do not use a button for S5 wakeup */
+#define ACPI_AVOID_WAKE_FROM_S5 BIT(0)
+
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
{
static const struct acpi_device_id button_device_ids[] = {
- {"PNP0C0C", 0}, /* Power button */
- {"PNP0C0D", 0}, /* Lid */
- {"PNP0C0E", 0}, /* Sleep button */
+ {"PNP0C0C", 0}, /* Power button */
+ {"PNP0C0D", ACPI_AVOID_WAKE_FROM_S5}, /* Lid */
+ {"PNP0C0E", ACPI_AVOID_WAKE_FROM_S5}, /* Sleep button */
{"", 0},
};
struct acpi_device_wakeup *wakeup = &device->wakeup;
+ const struct acpi_device_id *match;
acpi_status status;
wakeup->flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
- if (!acpi_match_device_ids(device, button_device_ids)) {
- if (!acpi_match_device_ids(device, &button_device_ids[1])) {
- /* Do not use Lid/sleep button for S5 wakeup */
- if (wakeup->sleep_state == ACPI_STATE_S5)
- wakeup->sleep_state = ACPI_STATE_S4;
- }
+ match = acpi_match_acpi_device(button_device_ids, device);
+ if (match) {
+ if ((match->driver_data & ACPI_AVOID_WAKE_FROM_S5) &&
+ wakeup->sleep_state == ACPI_STATE_S5)
+ wakeup->sleep_state = ACPI_STATE_S4;
acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
device_set_wakeup_capable(&device->dev, true);
return true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8fb7672021ee..486c8271cab7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -66,6 +66,7 @@
#include <linux/syscalls.h>
#include <linux/task_work.h>
#include <linux/sizes.h>
+#include <linux/ktime.h>
#include <uapi/linux/android/binder.h>
@@ -2918,6 +2919,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ ktime_t t_start_time = ktime_get();
char *secctx = NULL;
u32 secctx_sz = 0;
struct list_head sgc_head;
@@ -3159,6 +3161,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -3183,6 +3186,8 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
@@ -5944,17 +5949,19 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
{
struct binder_proc *to_proc;
struct binder_buffer *buffer = t->buffer;
+ ktime_t current_time = ktime_get();
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
prefix, t->debug_id, t,
- t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0,
+ t->from_pid,
+ t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply);
+ t->code, t->flags, t->priority, t->need_reply,
+ ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
if (proc != to_proc) {
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 28ef5b3704b1..7270d4d22207 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -515,6 +515,8 @@ struct binder_transaction {
int debug_id;
struct binder_work work;
struct binder_thread *from;
+ pid_t from_pid;
+ pid_t from_tid;
struct binder_transaction *from_parent;
struct binder_proc *to_proc;
struct binder_thread *to_thread;
@@ -528,6 +530,7 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ ktime_t start_time;
struct list_head fd_fixups;
binder_uintptr_t security_ctx;
/**
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ab30c7138d73..81fc63f6b008 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -9,14 +9,14 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
-#include <linux/acpi.h>
#include <linux/pci_ids.h>
#include "ahci.h"
@@ -56,10 +56,10 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
- if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ if (device_is_compatible(dev, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
- port = acpi_device_get_match_data(dev);
+ port = device_get_match_data(dev);
if (!port)
port = &ahci_port_info;
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 55e3ee2da98f..675ad3139224 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -149,11 +149,8 @@ int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
break;
}
- if (isa_dev->dev.platform_data) {
- isa_dev->next = isa_driver->devices;
- isa_driver->devices = &isa_dev->dev;
- } else
- device_unregister(&isa_dev->dev);
+ isa_dev->next = isa_driver->devices;
+ isa_driver->devices = &isa_dev->dev;
}
if (!error && !isa_driver->devices)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 655975946ef6..9de524e56307 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -162,15 +162,15 @@ free:
}
#ifdef CONFIG_HMEM_REPORTING
-#define ACCESS_ATTR(name) \
-static ssize_t name##_show(struct device *dev, \
+#define ACCESS_ATTR(property) \
+static ssize_t property##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sysfs_emit(buf, "%u\n", \
- to_access_nodes(dev)->hmem_attrs.name); \
+ to_access_nodes(dev)->hmem_attrs.property); \
} \
-static DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(property)
ACCESS_ATTR(read_bandwidth);
ACCESS_ATTR(read_latency);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f6117ec9805c..8c40abed7852 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -987,12 +987,18 @@ EXPORT_SYMBOL(fwnode_iomap);
* @fwnode: Pointer to the firmware node
* @index: Zero-based index of the IRQ
*
- * Return: Linux IRQ number on success. Other values are determined
- * according to acpi_irq_get() or of_irq_get() operation.
+ * Return: Linux IRQ number on success. Negative errno on failure.
*/
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
- return fwnode_call_int_op(fwnode, irq_get, index);
+ int ret;
+
+ ret = fwnode_call_int_op(fwnode, irq_get, index);
+ /* We treat mapping errors as invalid case */
+ if (ret == 0)
+ return -EINVAL;
+
+ return ret;
}
EXPORT_SYMBOL(fwnode_irq_get);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index fd0941fe8608..e7e58a956d15 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -637,7 +637,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/%s", firmware_name);
else if (qca_is_wcn399x(soc_type)) {
- if (ver.soc_id == QCA_WCN3991_SOC_ID) {
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02xu.bin", rom_ver);
} else {
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 2915c82d719d..d978e7cea873 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1367,14 +1367,30 @@ MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723d_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723d_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821c_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821cs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821cs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822cs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822cs_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822cu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822cu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin");
@@ -1383,5 +1399,3 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
-MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
-MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a8e2bb038f5..5ec4ad0a5c86 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -613,6 +613,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -655,6 +658,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x6655, 0x8771), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK |
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 83bf5d4330c4..874d23089b39 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,7 +643,8 @@ static int bcm_setup(struct hci_uart *hu)
* Allow the bootloader to set a valid address through the
* device tree.
*/
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+ if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index c570c45d1480..2ac70b560c46 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -79,6 +79,7 @@ static int virtbt_close_vdev(struct virtio_bluetooth *vbt)
while ((skb = virtqueue_detach_unused_buf(vq)))
kfree_skb(skb);
+ cond_resched();
}
return 0;
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 595d4cecd041..4b68c84ef485 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -45,6 +45,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
struct fsl_mc_child_objs *objs;
struct fsl_mc_device *mc_dev;
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
mc_dev = to_fsl_mc_device(dev);
objs = data;
@@ -64,6 +67,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
static int __fsl_mc_device_remove(struct device *dev, void *data)
{
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
fsl_mc_device_remove(to_fsl_mc_device(dev));
return 0;
}
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
index f5ba6bee6fd8..320cf307db05 100644
--- a/drivers/bus/intel-ixp4xx-eb.c
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -33,7 +33,7 @@
#define IXP4XX_EXP_TIMING_STRIDE 0x04
#define IXP4XX_EXP_CS_EN BIT(31)
#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */
-#define IXP4XX_EXP_T1_MASK GENMASK(28, 27)
+#define IXP4XX_EXP_T1_MASK GENMASK(29, 28)
#define IXP4XX_EXP_T1_SHIFT 28
#define IXP4XX_EXP_T2_MASK GENMASK(27, 26)
#define IXP4XX_EXP_T2_SHIFT 26
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 3a46e27479ff..d668b174ace9 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -481,7 +481,7 @@ static int gdrom_bdops_open(struct gendisk *disk, blk_mode_t mode)
disk_check_media_change(disk);
mutex_lock(&gdrom_mutex);
- ret = cdrom_open(gd.cd_info);
+ ret = cdrom_open(gd.cd_info, mode);
mutex_unlock(&gdrom_mutex);
return ret;
}
@@ -489,7 +489,7 @@ static int gdrom_bdops_open(struct gendisk *disk, blk_mode_t mode)
static void gdrom_bdops_release(struct gendisk *disk)
{
mutex_lock(&gdrom_mutex);
- cdrom_release(gd.cd_info, mode);
+ cdrom_release(gd.cd_info);
mutex_unlock(&gdrom_mutex);
}
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 38511fd36325..d2cad4c670a0 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -62,6 +62,8 @@
#include <linux/mm.h>
#include <linux/xarray.h>
#include <linux/cdx/cdx_bus.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#include "cdx.h"
/* Default DMA mask for devices on a CDX bus */
@@ -257,6 +259,7 @@ static void cdx_shutdown(struct device *dev)
static int cdx_dma_configure(struct device *dev)
{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
u32 input_id = cdx_dev->req_id;
int ret;
@@ -267,9 +270,23 @@ static int cdx_dma_configure(struct device *dev)
return ret;
}
+ if (!ret && !cdx_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
return 0;
}
+static void cdx_dma_cleanup(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+
+ if (!cdx_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
/* show configuration fields */
#define cdx_config_attr(field, format_string) \
static ssize_t \
@@ -405,6 +422,7 @@ struct bus_type cdx_bus_type = {
.remove = cdx_remove,
.shutdown = cdx_shutdown,
.dma_configure = cdx_dma_configure,
+ .dma_cleanup = cdx_dma_cleanup,
.bus_groups = cdx_bus_groups,
.dev_groups = cdx_dev_groups,
};
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index c3e3b9ff8dfe..61bf17fbe433 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -18,14 +18,4 @@ config CDX_CONTROLLER
If unsure, say N.
-config MCDI_LOGGING
- bool "MCDI Logging for the CDX controller"
- depends on CDX_CONTROLLER
- help
- Enable MCDI Logging for
- the CDX Controller for debug
- purpose.
-
- If unsure, say N.
-
endif
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index a211a2ca762e..1eedc5eeb315 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -31,10 +31,6 @@ struct cdx_mcdi_copy_buffer {
struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)];
};
-#ifdef CONFIG_MCDI_LOGGING
-#define LOG_LINE_MAX (1024 - 32)
-#endif
-
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
@@ -119,14 +115,9 @@ int cdx_mcdi_init(struct cdx_mcdi *cdx)
mcdi = cdx_mcdi_if(cdx);
mcdi->cdx = cdx;
-#ifdef CONFIG_MCDI_LOGGING
- mcdi->logging_buffer = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
- if (!mcdi->logging_buffer)
- goto fail2;
-#endif
mcdi->workqueue = alloc_ordered_workqueue("mcdi_wq", 0);
if (!mcdi->workqueue)
- goto fail3;
+ goto fail2;
mutex_init(&mcdi->iface_lock);
mcdi->mode = MCDI_MODE_EVENTS;
INIT_LIST_HEAD(&mcdi->cmd_list);
@@ -135,11 +126,7 @@ int cdx_mcdi_init(struct cdx_mcdi *cdx)
mcdi->new_epoch = true;
return 0;
-fail3:
-#ifdef CONFIG_MCDI_LOGGING
- kfree(mcdi->logging_buffer);
fail2:
-#endif
kfree(cdx->mcdi);
cdx->mcdi = NULL;
fail:
@@ -156,10 +143,6 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
cdx_mcdi_wait_for_cleanup(cdx);
-#ifdef CONFIG_MCDI_LOGGING
- kfree(mcdi->logging_buffer);
-#endif
-
destroy_workqueue(mcdi->workqueue);
kfree(cdx->mcdi);
cdx->mcdi = NULL;
@@ -246,15 +229,9 @@ static void cdx_mcdi_send_request(struct cdx_mcdi *cdx,
size_t hdr_len;
bool not_epoch;
u32 xflags;
-#ifdef CONFIG_MCDI_LOGGING
- char *buf;
-#endif
if (!mcdi)
return;
-#ifdef CONFIG_MCDI_LOGGING
- buf = mcdi->logging_buffer; /* page-sized */
-#endif
mcdi->prev_seq = cmd->seq;
mcdi->seq_held_by[cmd->seq] = cmd;
@@ -281,39 +258,12 @@ static void cdx_mcdi_send_request(struct cdx_mcdi *cdx,
MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM);
hdr_len = 8;
-#ifdef CONFIG_MCDI_LOGGING
- if (!WARN_ON_ONCE(!buf)) {
- const struct cdx_dword *frags[] = { hdr, inbuf };
- const size_t frag_len[] = { hdr_len, round_up(inlen, 4) };
- int bytes = 0;
- int i, j;
-
- for (j = 0; j < ARRAY_SIZE(frags); j++) {
- const struct cdx_dword *frag;
-
- frag = frags[j];
- for (i = 0;
- i < frag_len[j] / 4;
- i++) {
- /*
- * Do not exceed the internal printk limit.
- * The string before that is just over 70 bytes.
- */
- if ((bytes + 75) > LOG_LINE_MAX) {
- pr_info("MCDI RPC REQ:%s \\\n", buf);
- bytes = 0;
- }
- bytes += snprintf(buf + bytes,
- LOG_LINE_MAX - bytes, " %08x",
- le32_to_cpu(frag[i].cdx_u32));
- }
- }
-
- pr_info("MCDI RPC REQ:%s\n", buf);
- }
-#endif
hdr[0].cdx_u32 |= (__force __le32)(cdx_mcdi_payload_csum(hdr, hdr_len, inbuf, inlen) <<
MCDI_HEADER_XFLAGS_LBN);
+
+ print_hex_dump_debug("MCDI REQ HEADER: ", DUMP_PREFIX_NONE, 32, 4, hdr, hdr_len, false);
+ print_hex_dump_debug("MCDI REQ PAYLOAD: ", DUMP_PREFIX_NONE, 32, 4, inbuf, inlen, false);
+
cdx->mcdi_ops->mcdi_request(cdx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -700,28 +650,10 @@ static bool cdx_mcdi_complete_cmd(struct cdx_mcdi_iface *mcdi,
resp_data_len = 0;
}
-#ifdef CONFIG_MCDI_LOGGING
- if (!WARN_ON_ONCE(!mcdi->logging_buffer)) {
- char *log = mcdi->logging_buffer;
- int i, bytes = 0;
- size_t rlen;
-
- WARN_ON_ONCE(resp_hdr_len % 4);
-
- rlen = resp_hdr_len / 4 + DIV_ROUND_UP(resp_data_len, 4);
-
- for (i = 0; i < rlen; i++) {
- if ((bytes + 75) > LOG_LINE_MAX) {
- pr_info("MCDI RPC RESP:%s \\\n", log);
- bytes = 0;
- }
- bytes += snprintf(log + bytes, LOG_LINE_MAX - bytes,
- " %08x", le32_to_cpu(outbuf[i].cdx_u32));
- }
-
- pr_info("MCDI RPC RESP:%s\n", log);
- }
-#endif
+ print_hex_dump_debug("MCDI RESP HEADER: ", DUMP_PREFIX_NONE, 32, 4,
+ outbuf, resp_hdr_len, false);
+ print_hex_dump_debug("MCDI RESP PAYLOAD: ", DUMP_PREFIX_NONE, 32, 4,
+ outbuf + (resp_hdr_len / 4), resp_data_len, false);
if (error && resp_data_len == 0) {
/* MC rebooted during command */
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
index 0bfbeab04e43..54a65e9760ae 100644
--- a/drivers/cdx/controller/mcdi.h
+++ b/drivers/cdx/controller/mcdi.h
@@ -153,8 +153,6 @@ struct cdx_mcdi_cmd {
* @mode: Poll for mcdi completion, or wait for an mcdi_event
* @prev_seq: The last used sequence number
* @new_epoch: Indicates start of day or start of MC reboot recovery
- * @logging_buffer: Buffer that may be used to build MCDI tracing messages
- * @logging_enabled: Whether to trace MCDI
*/
struct cdx_mcdi_iface {
struct cdx_mcdi *cdx;
@@ -170,10 +168,6 @@ struct cdx_mcdi_iface {
enum cdx_mcdi_mode mode;
u8 prev_seq;
bool new_epoch;
-#ifdef CONFIG_MCDI_LOGGING
- bool logging_enabled;
- char *logging_buffer;
-#endif
};
/**
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 801d6c83f896..625af75833fc 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -34,6 +34,7 @@ config TTY_PRINTK_LEVEL
config PRINTER
tristate "Parallel printer support"
depends on PARPORT
+ depends on HAS_IOPORT || PARPORT_NOT_PC
help
If you intend to attach a printer to the parallel port of your Linux
box (as opposed to using a serial printer; if the connector at the
@@ -340,7 +341,7 @@ config NVRAM
config DEVPORT
bool "/dev/port character device"
- depends on ISA || PCI
+ depends on HAS_IOPORT
default y
help
Say Y here if you want to support the /dev/port device. The /dev/port
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index ff429ba02fa4..12143854aeac 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -61,7 +61,6 @@ struct bsr_dev {
static unsigned total_bsr_devs;
static LIST_HEAD(bsr_devs);
-static struct class *bsr_class;
static int bsr_major;
enum {
@@ -108,6 +107,11 @@ static struct attribute *bsr_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(bsr_dev);
+static const struct class bsr_class = {
+ .name = "bsr",
+ .dev_groups = bsr_dev_groups,
+};
+
static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
@@ -244,7 +248,7 @@ static int bsr_add_node(struct device_node *bn)
goto out_err;
}
- cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
+ cur->bsr_device = device_create(&bsr_class, NULL, cur->bsr_dev,
cur, "%s", cur->bsr_name);
if (IS_ERR(cur->bsr_device)) {
printk(KERN_ERR "device_create failed for %s\n",
@@ -293,13 +297,9 @@ static int __init bsr_init(void)
if (!np)
goto out_err;
- bsr_class = class_create("bsr");
- if (IS_ERR(bsr_class)) {
- printk(KERN_ERR "class_create() failed for bsr_class\n");
- ret = PTR_ERR(bsr_class);
+ ret = class_register(&bsr_class);
+ if (ret)
goto out_err_1;
- }
- bsr_class->dev_groups = bsr_dev_groups;
ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
bsr_major = MAJOR(bsr_dev);
@@ -320,7 +320,7 @@ static int __init bsr_init(void)
unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
out_err_2:
- class_destroy(bsr_class);
+ class_unregister(&bsr_class);
out_err_1:
of_node_put(np);
@@ -335,8 +335,7 @@ static void __exit bsr_exit(void)
bsr_cleanup_devs();
- if (bsr_class)
- class_destroy(bsr_class);
+ class_unregister(&bsr_class);
if (bsr_major)
unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index b3eaf3e5ef2e..bda27e595da1 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -101,7 +101,9 @@ static struct dsp56k_device {
int tx_wsize, rx_wsize;
} dsp56k;
-static struct class *dsp56k_class;
+static const struct class dsp56k_class = {
+ .name = "dsp56k",
+};
static int dsp56k_reset(void)
{
@@ -493,7 +495,7 @@ static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n";
static int __init dsp56k_init_driver(void)
{
- int err = 0;
+ int err;
if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) {
printk("DSP56k driver: Hardware not present\n");
@@ -504,12 +506,10 @@ static int __init dsp56k_init_driver(void)
printk("DSP56k driver: Unable to register driver\n");
return -ENODEV;
}
- dsp56k_class = class_create("dsp56k");
- if (IS_ERR(dsp56k_class)) {
- err = PTR_ERR(dsp56k_class);
+ err = class_register(&dsp56k_class);
+ if (err)
goto out_chrdev;
- }
- device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL,
+ device_create(&dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL,
"dsp56k");
printk(banner);
@@ -524,8 +524,8 @@ module_init(dsp56k_init_driver);
static void __exit dsp56k_cleanup_driver(void)
{
- device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
- class_destroy(dsp56k_class);
+ device_destroy(&dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
+ class_unregister(&dsp56k_class);
unregister_chrdev(DSP56K_MAJOR, "dsp56k");
}
module_exit(dsp56k_cleanup_driver);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 70cfc5140c2c..2f171d14b9b5 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -145,7 +145,9 @@ static struct lp_struct lp_table[LP_NO];
static int port_num[LP_NO];
static unsigned int lp_count = 0;
-static struct class *lp_class;
+static const struct class lp_class = {
+ .name = "printer",
+};
#ifdef CONFIG_LP_CONSOLE
static struct parport *console_registered;
@@ -932,7 +934,7 @@ static int lp_register(int nr, struct parport *port)
if (reset)
lp_reset(nr);
- device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
+ device_create(&lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
"lp%d", nr);
printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
@@ -1004,7 +1006,7 @@ static void lp_detach(struct parport *port)
if (port_num[n] == port->number) {
port_num[n] = -1;
lp_count--;
- device_destroy(lp_class, MKDEV(LP_MAJOR, n));
+ device_destroy(&lp_class, MKDEV(LP_MAJOR, n));
parport_unregister_device(lp_table[n].dev);
}
}
@@ -1049,11 +1051,9 @@ static int __init lp_init(void)
return -EIO;
}
- lp_class = class_create("printer");
- if (IS_ERR(lp_class)) {
- err = PTR_ERR(lp_class);
+ err = class_register(&lp_class);
+ if (err)
goto out_reg;
- }
if (parport_register_driver(&lp_driver)) {
printk(KERN_ERR "lp: unable to register with parport\n");
@@ -1072,7 +1072,7 @@ static int __init lp_init(void)
return 0;
out_class:
- class_destroy(lp_class);
+ class_unregister(&lp_class);
out_reg:
unregister_chrdev(LP_MAJOR, "lp");
return err;
@@ -1115,7 +1115,7 @@ static void lp_cleanup_module(void)
#endif
unregister_chrdev(LP_MAJOR, "lp");
- class_destroy(lp_class);
+ class_unregister(&lp_class);
}
__setup("lp=", lp_setup);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 94eff6a2a7b6..0fcc8615fb4f 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -746,20 +746,23 @@ static char *mem_devnode(const struct device *dev, umode_t *mode)
return NULL;
}
-static struct class *mem_class;
+static const struct class mem_class = {
+ .name = "mem",
+ .devnode = mem_devnode,
+};
static int __init chr_dev_init(void)
{
+ int retval;
int minor;
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
- mem_class = class_create("mem");
- if (IS_ERR(mem_class))
- return PTR_ERR(mem_class);
+ retval = class_register(&mem_class);
+ if (retval)
+ return retval;
- mem_class->devnode = mem_devnode;
for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
if (!devlist[minor].name)
continue;
@@ -770,7 +773,7 @@ static int __init chr_dev_init(void)
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
- device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+ device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 1c44c29a666e..541edc26ec89 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -168,7 +168,21 @@ fail:
return err;
}
-static struct class *misc_class;
+static char *misc_devnode(const struct device *dev, umode_t *mode)
+{
+ const struct miscdevice *c = dev_get_drvdata(dev);
+
+ if (mode && c->mode)
+ *mode = c->mode;
+ if (c->nodename)
+ return kstrdup(c->nodename, GFP_KERNEL);
+ return NULL;
+}
+
+static const struct class misc_class = {
+ .name = "misc",
+ .devnode = misc_devnode,
+};
static const struct file_operations misc_fops = {
.owner = THIS_MODULE,
@@ -226,7 +240,7 @@ int misc_register(struct miscdevice *misc)
dev = MKDEV(MISC_MAJOR, misc->minor);
misc->this_device =
- device_create_with_groups(misc_class, misc->parent, dev,
+ device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
if (is_dynamic) {
@@ -263,43 +277,30 @@ void misc_deregister(struct miscdevice *misc)
mutex_lock(&misc_mtx);
list_del(&misc->list);
- device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+ device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
-static char *misc_devnode(const struct device *dev, umode_t *mode)
-{
- const struct miscdevice *c = dev_get_drvdata(dev);
-
- if (mode && c->mode)
- *mode = c->mode;
- if (c->nodename)
- return kstrdup(c->nodename, GFP_KERNEL);
- return NULL;
-}
-
static int __init misc_init(void)
{
int err;
struct proc_dir_entry *ret;
ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
- misc_class = class_create("misc");
- err = PTR_ERR(misc_class);
- if (IS_ERR(misc_class))
+ err = class_register(&misc_class);
+ if (err)
goto fail_remove;
err = -EIO;
if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
goto fail_printk;
- misc_class->devnode = misc_devnode;
return 0;
fail_printk:
pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
- class_destroy(misc_class);
+ class_unregister(&misc_class);
fail_remove:
if (ret)
remove_proc_entry("misc", NULL);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 81ed58157b15..4c188e9e477c 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -773,7 +773,9 @@ static __poll_t pp_poll(struct file *file, poll_table *wait)
return mask;
}
-static struct class *ppdev_class;
+static const struct class ppdev_class = {
+ .name = CHRDEV,
+};
static const struct file_operations pp_fops = {
.owner = THIS_MODULE,
@@ -794,7 +796,7 @@ static void pp_attach(struct parport *port)
if (devices[port->number])
return;
- ret = device_create(ppdev_class, port->dev,
+ ret = device_create(&ppdev_class, port->dev,
MKDEV(PP_MAJOR, port->number), NULL,
"parport%d", port->number);
if (IS_ERR(ret)) {
@@ -810,7 +812,7 @@ static void pp_detach(struct parport *port)
if (!devices[port->number])
return;
- device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+ device_destroy(&ppdev_class, MKDEV(PP_MAJOR, port->number));
devices[port->number] = NULL;
}
@@ -841,11 +843,10 @@ static int __init ppdev_init(void)
pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
return -EIO;
}
- ppdev_class = class_create(CHRDEV);
- if (IS_ERR(ppdev_class)) {
- err = PTR_ERR(ppdev_class);
+ err = class_register(&ppdev_class);
+ if (err)
goto out_chrdev;
- }
+
err = parport_register_driver(&pp_driver);
if (err < 0) {
pr_warn(CHRDEV ": unable to register with parport\n");
@@ -856,7 +857,7 @@ static int __init ppdev_init(void)
goto out;
out_class:
- class_destroy(ppdev_class);
+ class_unregister(&ppdev_class);
out_chrdev:
unregister_chrdev(PP_MAJOR, CHRDEV);
out:
@@ -867,7 +868,7 @@ static void __exit ppdev_cleanup(void)
{
/* Clean up all parport stuff */
parport_unregister_driver(&pp_driver);
- class_destroy(ppdev_class);
+ class_unregister(&ppdev_class);
unregister_chrdev(PP_MAJOR, CHRDEV);
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b65c809a4e97..680d1ef2a217 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -40,9 +40,6 @@
* across multiple devices and multiple ports per device.
*/
struct ports_driver_data {
- /* Used for registering chardevs */
- struct class *class;
-
/* Used for exporting per-port information to debugfs */
struct dentry *debugfs_dir;
@@ -55,6 +52,10 @@ struct ports_driver_data {
static struct ports_driver_data pdrvdata;
+static const struct class port_class = {
+ .name = "virtio-ports",
+};
+
static DEFINE_SPINLOCK(pdrvdata_lock);
static DECLARE_COMPLETION(early_console_added);
@@ -1399,7 +1400,7 @@ static int add_port(struct ports_device *portdev, u32 id)
"Error %d adding cdev for port %u\n", err, id);
goto free_cdev;
}
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ port->dev = device_create(&port_class, &port->portdev->vdev->dev,
devt, port, "vport%up%u",
port->portdev->vdev->index, id);
if (IS_ERR(port->dev)) {
@@ -1465,7 +1466,7 @@ static int add_port(struct ports_device *portdev, u32 id)
free_inbufs:
free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
+ device_destroy(&port_class, port->dev->devt);
free_cdev:
cdev_del(port->cdev);
free_port:
@@ -1540,7 +1541,7 @@ static void unplug_port(struct port *port)
port->portdev = NULL;
sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
+ device_destroy(&port_class, port->dev->devt);
cdev_del(port->cdev);
debugfs_remove(port->debugfs_file);
@@ -1935,6 +1936,7 @@ static void remove_vqs(struct ports_device *portdev)
flush_bufs(vq, true);
while ((buf = virtqueue_detach_unused_buf(vq)))
free_buf(buf, true);
+ cond_resched();
}
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
@@ -2244,12 +2246,9 @@ static int __init virtio_console_init(void)
{
int err;
- pdrvdata.class = class_create("virtio-ports");
- if (IS_ERR(pdrvdata.class)) {
- err = PTR_ERR(pdrvdata.class);
- pr_err("Error %d creating virtio-ports class\n", err);
+ err = class_register(&port_class);
+ if (err)
return err;
- }
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
INIT_LIST_HEAD(&pdrvdata.consoles);
@@ -2271,7 +2270,7 @@ unregister:
unregister_virtio_driver(&virtio_console);
free:
debugfs_remove_recursive(pdrvdata.debugfs_dir);
- class_destroy(pdrvdata.class);
+ class_unregister(&port_class);
return err;
}
@@ -2282,7 +2281,7 @@ static void __exit virtio_console_fini(void)
unregister_virtio_driver(&virtio_console);
unregister_virtio_driver(&virtio_rproc_serial);
- class_destroy(pdrvdata.class);
+ class_unregister(&port_class);
debugfs_remove_recursive(pdrvdata.debugfs_dir);
}
module_init(virtio_console_init);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index a46f637da959..f60bb6151402 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -113,7 +113,9 @@ static DEFINE_MUTEX(hwicap_mutex);
static bool probed_devices[HWICAP_DEVICES];
static struct mutex icap_sem;
-static struct class *icap_class;
+static const struct class icap_class = {
+ .name = "xilinx_config",
+};
#define UNIMPLEMENTED 0xFFFF
@@ -687,7 +689,7 @@ static int hwicap_setup(struct device *dev, int id,
goto failed3;
}
- device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
+ device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
return 0; /* success */
failed3:
@@ -721,27 +723,6 @@ static struct hwicap_driver_config fifo_icap_config = {
.reset = fifo_icap_reset,
};
-static int hwicap_remove(struct device *dev)
-{
- struct hwicap_drvdata *drvdata;
-
- drvdata = dev_get_drvdata(dev);
-
- if (!drvdata)
- return 0;
-
- device_destroy(icap_class, drvdata->devt);
- cdev_del(&drvdata->cdev);
- iounmap(drvdata->base_address);
- release_mem_region(drvdata->mem_start, drvdata->mem_size);
- kfree(drvdata);
-
- mutex_lock(&icap_sem);
- probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
- mutex_unlock(&icap_sem);
- return 0; /* success */
-}
-
#ifdef CONFIG_OF
static int hwicap_of_probe(struct platform_device *op,
const struct hwicap_driver_config *config)
@@ -825,9 +806,22 @@ static int hwicap_drv_probe(struct platform_device *pdev)
&buffer_icap_config, regs);
}
-static int hwicap_drv_remove(struct platform_device *pdev)
+static void hwicap_drv_remove(struct platform_device *pdev)
{
- return hwicap_remove(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct hwicap_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(dev);
+
+ device_destroy(&icap_class, drvdata->devt);
+ cdev_del(&drvdata->cdev);
+ iounmap(drvdata->base_address);
+ release_mem_region(drvdata->mem_start, drvdata->mem_size);
+ kfree(drvdata);
+
+ mutex_lock(&icap_sem);
+ probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
+ mutex_unlock(&icap_sem);
}
#ifdef CONFIG_OF
@@ -844,7 +838,7 @@ MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
- .remove = hwicap_drv_remove,
+ .remove_new = hwicap_drv_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
@@ -856,7 +850,9 @@ static int __init hwicap_module_init(void)
dev_t devt;
int retval;
- icap_class = class_create("xilinx_config");
+ retval = class_register(&icap_class);
+ if (retval)
+ return retval;
mutex_init(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
@@ -882,7 +878,7 @@ static void __exit hwicap_module_cleanup(void)
{
dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
- class_destroy(icap_class);
+ class_unregister(&icap_class);
platform_driver_unregister(&hwicap_platform_driver);
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
index 89926fe9d813..c92a628e389e 100644
--- a/drivers/char/xillybus/xillybus_class.c
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -23,7 +23,9 @@ MODULE_LICENSE("GPL v2");
static DEFINE_MUTEX(unit_mutex);
static LIST_HEAD(unit_list);
-static struct class *xillybus_class;
+static const struct class xillybus_class = {
+ .name = "xillybus",
+};
#define UNITNAMELEN 16
@@ -121,7 +123,7 @@ int xillybus_init_chrdev(struct device *dev,
len -= namelen + 1;
idt += namelen + 1;
- device = device_create(xillybus_class,
+ device = device_create(&xillybus_class,
NULL,
MKDEV(unit->major,
i + unit->lowest_minor),
@@ -152,7 +154,7 @@ int xillybus_init_chrdev(struct device *dev,
unroll_device_create:
for (i--; i >= 0; i--)
- device_destroy(xillybus_class, MKDEV(unit->major,
+ device_destroy(&xillybus_class, MKDEV(unit->major,
i + unit->lowest_minor));
cdev_del(unit->cdev);
@@ -193,7 +195,7 @@ void xillybus_cleanup_chrdev(void *private_data,
for (minor = unit->lowest_minor;
minor < (unit->lowest_minor + unit->num_nodes);
minor++)
- device_destroy(xillybus_class, MKDEV(unit->major, minor));
+ device_destroy(&xillybus_class, MKDEV(unit->major, minor));
cdev_del(unit->cdev);
@@ -242,19 +244,12 @@ EXPORT_SYMBOL(xillybus_find_inode);
static int __init xillybus_class_init(void)
{
- xillybus_class = class_create("xillybus");
-
- if (IS_ERR(xillybus_class)) {
- pr_warn("Failed to register xillybus class\n");
-
- return PTR_ERR(xillybus_class);
- }
- return 0;
+ return class_register(&xillybus_class);
}
static void __exit xillybus_class_exit(void)
{
- class_destroy(xillybus_class);
+ class_unregister(&xillybus_class);
}
module_init(xillybus_class_init);
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 0b860126d589..3f19e737ae4d 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -108,12 +108,12 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
- hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -140,7 +140,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91rm9200_master_layout,
&rm9200_mck_characteristics,
&rm9200_mck_lock);
@@ -148,7 +148,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91rm9200_master_layout,
&rm9200_mck_characteristics,
&rm9200_mck_lock, CLK_SET_RATE_GATE, 0);
@@ -171,7 +171,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 4, i,
+ parent_names, NULL, 4, i,
&at91rm9200_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -182,7 +182,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
hw = at91_clk_register_system(regmap, at91rm9200_systemck[i].n,
- at91rm9200_systemck[i].p,
+ at91rm9200_systemck[i].p, NULL,
at91rm9200_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
@@ -193,7 +193,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91rm9200_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
at91rm9200_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index b521f470428f..0799a13060ea 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -363,12 +363,12 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
- hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -416,7 +416,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91rm9200_master_layout,
data->mck_characteristics,
&at91sam9260_mck_lock);
@@ -424,7 +424,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91rm9200_master_layout,
data->mck_characteristics,
&at91sam9260_mck_lock,
@@ -448,7 +448,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 4, i,
+ parent_names, NULL, 4, i,
&at91rm9200_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -459,7 +459,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
for (i = 0; i < data->num_sck; i++) {
hw = at91_clk_register_system(regmap, data->sck[i].n,
- data->sck[i].p,
+ data->sck[i].p, NULL,
data->sck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
@@ -470,7 +470,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
for (i = 0; i < data->num_pck; i++) {
hw = at91_clk_register_peripheral(regmap,
data->pck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
data->pck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 5099669ddcbd..f45a7b80f7d8 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -123,12 +123,12 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
- hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -145,7 +145,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
at91sam9g45_pmc->chws[PMC_PLLACK] = hw;
- hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -156,7 +156,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91rm9200_master_layout,
&mck_characteristics,
&at91sam9g45_mck_lock);
@@ -164,7 +164,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91rm9200_master_layout,
&mck_characteristics,
&at91sam9g45_mck_lock,
@@ -191,7 +191,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91sam9g45_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -202,7 +202,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9g45_systemck[i].n,
- at91sam9g45_systemck[i].p,
+ at91sam9g45_systemck[i].p, NULL,
at91sam9g45_systemck[i].id,
at91sam9g45_systemck[i].flags);
if (IS_ERR(hw))
@@ -214,7 +214,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9g45_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
at91sam9g45_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 08a10e12d08d..751786184ae2 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -147,14 +147,14 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -183,7 +183,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "pllbck";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91sam9x5_master_layout,
&mck_characteristics,
&at91sam9n12_mck_lock);
@@ -191,7 +191,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91sam9x5_master_layout,
&mck_characteristics,
&at91sam9n12_mck_lock,
@@ -216,7 +216,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91sam9x5_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -227,7 +227,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9n12_systemck[i].n,
- at91sam9n12_systemck[i].p,
+ at91sam9n12_systemck[i].p, NULL,
at91sam9n12_systemck[i].id,
at91sam9n12_systemck[i].flags);
if (IS_ERR(hw))
@@ -240,7 +240,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9n12_pcr_layout,
at91sam9n12_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
at91sam9n12_periphck[i].id,
&range, INT_MIN, 0);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 1a1b6b2bb0e3..969f809e7d65 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -95,7 +95,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
if (!at91sam9rl_pmc)
return;
- hw = at91_clk_register_rm9200_main(regmap, "mainck", mainxtal_name);
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", mainxtal_name, NULL);
if (IS_ERR(hw))
goto err_free;
@@ -109,7 +109,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
at91sam9rl_pmc->chws[PMC_PLLACK] = hw;
- hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -120,7 +120,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
parent_names[2] = "pllack";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91rm9200_master_layout,
&sam9rl_mck_characteristics,
&sam9rl_mck_lock);
@@ -128,7 +128,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91rm9200_master_layout,
&sam9rl_mck_characteristics,
&sam9rl_mck_lock, CLK_SET_RATE_GATE, 0);
@@ -148,7 +148,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91rm9200_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -159,7 +159,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9rl_systemck[i].n,
- at91sam9rl_systemck[i].p,
+ at91sam9rl_systemck[i].p, NULL,
at91sam9rl_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
@@ -170,7 +170,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9rl_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9rl_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
at91sam9rl_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 13e589c95907..3b801d12fac0 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -169,14 +169,14 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -193,7 +193,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
at91sam9x5_pmc->chws[PMC_PLLACK] = hw;
- hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -204,14 +204,14 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock);
if (IS_ERR(hw))
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
CLK_SET_RATE_GATE, 0);
@@ -241,7 +241,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91sam9x5_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -252,7 +252,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) {
hw = at91_clk_register_system(regmap, at91sam9x5_systemck[i].n,
- at91sam9x5_systemck[i].p,
+ at91sam9x5_systemck[i].p, NULL,
at91sam9x5_systemck[i].id,
at91sam9x5_systemck[i].flags);
if (IS_ERR(hw))
@@ -263,7 +263,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
if (has_lcdck) {
hw = at91_clk_register_system(regmap, "lcdck", "masterck_div",
- 3, 0);
+ NULL, 3, 0);
if (IS_ERR(hw))
goto err_free;
@@ -274,7 +274,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
at91sam9x5_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
at91sam9x5_periphck[i].id,
&range, INT_MIN, 0);
if (IS_ERR(hw))
@@ -287,7 +287,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
extra_pcks[i].n,
- "masterck_div",
+ "masterck_div", NULL,
extra_pcks[i].id,
&range, INT_MIN, 0);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 943ea67bf135..4b4edeecc889 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -319,22 +319,29 @@ struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
+ struct clk_hw **parent_hws,
u32 *mux_table, u8 num_parents, u8 id,
const struct clk_range *range,
int chg_pid)
{
struct clk_generated *gck;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
int ret;
+ if (!(parent_names || parent_hws))
+ return ERR_PTR(-ENOMEM);
+
gck = kzalloc(sizeof(*gck), GFP_KERNEL);
if (!gck)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &generated_ops;
- init.parent_names = parent_names;
+ if (parent_hws)
+ init.parent_hws = (const struct clk_hw **)parent_hws;
+ else
+ init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
if (chg_pid >= 0)
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 4966e0f9e92c..9b462becc693 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -152,14 +152,15 @@ struct clk_hw * __init
at91_clk_register_main_osc(struct regmap *regmap,
const char *name,
const char *parent_name,
+ struct clk_parent_data *parent_data,
bool bypass)
{
struct clk_main_osc *osc;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
int ret;
- if (!name || !parent_name)
+ if (!name || !(parent_name || parent_data))
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -168,7 +169,10 @@ at91_clk_register_main_osc(struct regmap *regmap,
init.name = name;
init.ops = &main_osc_ops;
- init.parent_names = &parent_name;
+ if (parent_data)
+ init.parent_data = (const struct clk_parent_data *)parent_data;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = CLK_IGNORE_UNUSED;
@@ -397,17 +401,18 @@ static const struct clk_ops rm9200_main_ops = {
struct clk_hw * __init
at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
- const char *parent_name)
+ const char *parent_name,
+ struct clk_hw *parent_hw)
{
struct clk_rm9200_main *clkmain;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
int ret;
if (!name)
return ERR_PTR(-EINVAL);
- if (!parent_name)
+ if (!(parent_name || parent_hw))
return ERR_PTR(-EINVAL);
clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
@@ -416,7 +421,10 @@ at91_clk_register_rm9200_main(struct regmap *regmap,
init.name = name;
init.ops = &rm9200_main_ops;
- init.parent_names = &parent_name;
+ if (parent_hw)
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = 0;
@@ -544,10 +552,11 @@ struct clk_hw * __init
at91_clk_register_sam9x5_main(struct regmap *regmap,
const char *name,
const char **parent_names,
+ struct clk_hw **parent_hws,
int num_parents)
{
struct clk_sam9x5_main *clkmain;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned int status;
struct clk_hw *hw;
int ret;
@@ -555,7 +564,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
if (!name)
return ERR_PTR(-EINVAL);
- if (!parent_names || !num_parents)
+ if (!(parent_hws || parent_names) || !num_parents)
return ERR_PTR(-EINVAL);
clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
@@ -564,7 +573,10 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
init.name = name;
init.ops = &sam9x5_main_ops;
- init.parent_names = parent_names;
+ if (parent_hws)
+ init.parent_hws = (const struct clk_hw **)parent_hws;
+ else
+ init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_PARENT_GATE;
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index b7cd1924de52..15c46489ba85 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -473,18 +473,19 @@ static struct clk_hw * __init
at91_clk_register_master_internal(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
+ struct clk_hw **parent_hws,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
const struct clk_ops *ops, spinlock_t *lock, u32 flags)
{
struct clk_master *master;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
unsigned int mckr;
unsigned long irqflags;
int ret;
- if (!name || !num_parents || !parent_names || !lock)
+ if (!name || !num_parents || !(parent_names || parent_hws) || !lock)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -493,7 +494,10 @@ at91_clk_register_master_internal(struct regmap *regmap,
init.name = name;
init.ops = ops;
- init.parent_names = parent_names;
+ if (parent_hws)
+ init.parent_hws = (const struct clk_hw **)parent_hws;
+ else
+ init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = flags;
@@ -527,12 +531,13 @@ struct clk_hw * __init
at91_clk_register_master_pres(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
+ struct clk_hw **parent_hws,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock)
{
return at91_clk_register_master_internal(regmap, name, num_parents,
- parent_names, layout,
+ parent_names, parent_hws, layout,
characteristics,
&master_pres_ops,
lock, CLK_SET_RATE_GATE);
@@ -541,7 +546,7 @@ at91_clk_register_master_pres(struct regmap *regmap,
struct clk_hw * __init
at91_clk_register_master_div(struct regmap *regmap,
const char *name, const char *parent_name,
- const struct clk_master_layout *layout,
+ struct clk_hw *parent_hw, const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock, u32 flags, u32 safe_div)
{
@@ -554,7 +559,8 @@ at91_clk_register_master_div(struct regmap *regmap,
ops = &master_div_ops_chg;
hw = at91_clk_register_master_internal(regmap, name, 1,
- &parent_name, layout,
+ parent_name ? &parent_name : NULL,
+ parent_hw ? &parent_hw : NULL, layout,
characteristics, ops,
lock, flags);
@@ -806,18 +812,19 @@ struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
+ struct clk_hw **parent_hws,
u32 *mux_table,
spinlock_t *lock, u8 id,
bool critical, int chg_pid)
{
struct clk_master *master;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned long flags;
unsigned int val;
int ret;
- if (!name || !num_parents || !parent_names || !mux_table ||
+ if (!name || !num_parents || !(parent_names || parent_hws) || !mux_table ||
!lock || id > MASTER_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -827,7 +834,10 @@ at91_clk_sama7g5_register_master(struct regmap *regmap,
init.name = name;
init.ops = &sama7g5_master_ops;
- init.parent_names = parent_names;
+ if (parent_hws)
+ init.parent_hws = (const struct clk_hw **)parent_hws;
+ else
+ init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
if (chg_pid >= 0)
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 93ea685e27f6..c173a44c800a 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -97,14 +97,15 @@ static const struct clk_ops peripheral_ops = {
struct clk_hw * __init
at91_clk_register_peripheral(struct regmap *regmap, const char *name,
- const char *parent_name, u32 id)
+ const char *parent_name, struct clk_hw *parent_hw,
+ u32 id)
{
struct clk_peripheral *periph;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
int ret;
- if (!name || !parent_name || id > PERIPHERAL_ID_MAX)
+ if (!name || !(parent_name || parent_hw) || id > PERIPHERAL_ID_MAX)
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -113,7 +114,10 @@ at91_clk_register_peripheral(struct regmap *regmap, const char *name,
init.name = name;
init.ops = &peripheral_ops;
- init.parent_names = &parent_name;
+ if (parent_hw)
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = 0;
@@ -444,15 +448,16 @@ struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
+ struct clk_hw *parent_hw,
u32 id, const struct clk_range *range,
int chg_pid, unsigned long flags)
{
struct clk_sam9x5_peripheral *periph;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_hw *hw;
int ret;
- if (!name || !parent_name)
+ if (!name || !(parent_name || parent_hw))
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -460,7 +465,10 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.parent_names = &parent_name;
+ if (parent_hw)
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = flags;
if (chg_pid < 0) {
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 6c4b259d31d3..1195fb405503 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -215,16 +215,16 @@ static const struct clk_ops programmable_ops = {
struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
- u8 num_parents, u8 id,
+ struct clk_hw **parent_hws, u8 num_parents, u8 id,
const struct clk_programmable_layout *layout,
u32 *mux_table)
{
struct clk_programmable *prog;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
- if (id > PROG_ID_MAX)
+ if (id > PROG_ID_MAX || !(parent_names || parent_hws))
return ERR_PTR(-EINVAL);
prog = kzalloc(sizeof(*prog), GFP_KERNEL);
@@ -233,7 +233,10 @@ at91_clk_register_programmable(struct regmap *regmap,
init.name = name;
init.ops = &programmable_ops;
- init.parent_names = parent_names;
+ if (parent_hws)
+ init.parent_hws = (const struct clk_hw **)parent_hws;
+ else
+ init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 0882ed01d5c2..ff65f7b916f0 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -616,7 +616,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
{
struct sam9x60_frac *frac;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned long parent_rate, irqflags;
unsigned int val;
int ret;
@@ -629,7 +629,10 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.parent_names = &parent_name;
+ if (parent_name)
+ init.parent_names = &parent_name;
+ else
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
init.num_parents = 1;
if (flags & CLK_SET_RATE_GATE)
init.ops = &sam9x60_frac_pll_ops;
@@ -692,14 +695,15 @@ free:
struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
- const char *name, const char *parent_name, u8 id,
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
const struct clk_pll_layout *layout, u32 flags,
u32 safe_div)
{
struct sam9x60_div *div;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned long irqflags;
unsigned int val;
int ret;
@@ -716,7 +720,10 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.parent_names = &parent_name;
+ if (parent_hw)
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
if (flags & CLK_SET_RATE_GATE)
init.ops = &sam9x60_div_pll_ops;
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 10193650429e..90eed39d0785 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -105,14 +105,15 @@ static const struct clk_ops system_ops = {
struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id, unsigned long flags)
+ const char *parent_name, struct clk_hw *parent_hw, u8 id,
+ unsigned long flags)
{
struct clk_system *sys;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
- if (!parent_name || id > SYSTEM_MAX_ID)
+ if (!(parent_name || parent_hw) || id > SYSTEM_MAX_ID)
return ERR_PTR(-EINVAL);
sys = kzalloc(sizeof(*sys), GFP_KERNEL);
@@ -121,7 +122,10 @@ at91_clk_register_system(struct regmap *regmap, const char *name,
init.name = name;
init.ops = &system_ops;
- init.parent_names = &parent_name;
+ if (parent_hw)
+ init.parent_hws = (const struct clk_hw **)&parent_hw;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = CLK_SET_RATE_PARENT | flags;
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index a22c10d9a1b9..40c84f5af5e8 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -144,21 +144,30 @@ static struct clk_hw * __init
at91_clk_register_utmi_internal(struct regmap *regmap_pmc,
struct regmap *regmap_sfr,
const char *name, const char *parent_name,
+ struct clk_hw *parent_hw,
const struct clk_ops *ops, unsigned long flags)
{
struct clk_utmi *utmi;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
+ if (!(parent_name || parent_hw))
+ return ERR_PTR(-EINVAL);
+
utmi = kzalloc(sizeof(*utmi), GFP_KERNEL);
if (!utmi)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = ops;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
+ if (parent_hw) {
+ init.parent_hws = parent_hw ? (const struct clk_hw **)&parent_hw : NULL;
+ init.num_parents = parent_hw ? 1 : 0;
+ } else {
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ }
init.flags = flags;
utmi->hw.init = &init;
@@ -177,10 +186,11 @@ at91_clk_register_utmi_internal(struct regmap *regmap_pmc,
struct clk_hw * __init
at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
- const char *name, const char *parent_name)
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw)
{
return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name,
- parent_name, &utmi_ops, CLK_SET_RATE_GATE);
+ parent_name, parent_hw, &utmi_ops, CLK_SET_RATE_GATE);
}
static int clk_utmi_sama7g5_prepare(struct clk_hw *hw)
@@ -279,8 +289,8 @@ static const struct clk_ops sama7g5_utmi_ops = {
struct clk_hw * __init
at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name,
- const char *parent_name)
+ const char *parent_name, struct clk_hw *parent_hw)
{
return at91_clk_register_utmi_internal(regmap_pmc, NULL, name,
- parent_name, &sama7g5_utmi_ops, 0);
+ parent_name, parent_hw, &sama7g5_utmi_ops, 0);
}
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index 97f67e23ef80..a32dc2111b90 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -171,7 +171,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&dt_pcr_layout, name,
- parent_names, NULL,
+ parent_names, NULL, NULL,
num_parents, id, &range,
chg_pid);
if (IS_ERR(hw))
@@ -269,7 +269,7 @@ static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
+ hw = at91_clk_register_main_osc(regmap, name, parent_name, NULL, bypass);
if (IS_ERR(hw))
return;
@@ -323,7 +323,7 @@ static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- hw = at91_clk_register_rm9200_main(regmap, name, parent_name);
+ hw = at91_clk_register_rm9200_main(regmap, name, parent_name, NULL);
if (IS_ERR(hw))
return;
@@ -354,7 +354,7 @@ static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- hw = at91_clk_register_sam9x5_main(regmap, name, parent_names,
+ hw = at91_clk_register_sam9x5_main(regmap, name, parent_names, NULL,
num_parents);
if (IS_ERR(hw))
return;
@@ -420,12 +420,12 @@ of_at91_clk_master_setup(struct device_node *np,
return;
hw = at91_clk_register_master_pres(regmap, "masterck_pres", num_parents,
- parent_names, layout,
+ parent_names, NULL, layout,
characteristics, &mck_lock);
if (IS_ERR(hw))
goto out_free_characteristics;
- hw = at91_clk_register_master_div(regmap, name, "masterck_pres",
+ hw = at91_clk_register_master_div(regmap, name, "masterck_pres", NULL,
layout, characteristics,
&mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
@@ -490,7 +490,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
if (type == PERIPHERAL_AT91RM9200) {
hw = at91_clk_register_peripheral(regmap, name,
- parent_name, id);
+ parent_name, NULL, id);
} else {
struct clk_range range = CLK_RANGE(0, 0);
unsigned long flags = 0;
@@ -512,6 +512,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
&dt_pcr_layout,
name,
parent_name,
+ NULL,
id, &range,
INT_MIN,
flags);
@@ -769,7 +770,7 @@ of_at91_clk_prog_setup(struct device_node *np,
name = progclknp->name;
hw = at91_clk_register_programmable(regmap, name,
- parent_names, num_parents,
+ parent_names, NULL, num_parents,
id, layout, mux_table);
if (IS_ERR(hw))
continue;
@@ -907,8 +908,8 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
if (!strcmp(sysclknp->name, "ddrck"))
flags = CLK_IS_CRITICAL;
- hw = at91_clk_register_system(regmap, name, parent_name, id,
- flags);
+ hw = at91_clk_register_system(regmap, name, parent_name, NULL,
+ id, flags);
if (IS_ERR(hw))
continue;
@@ -1054,7 +1055,7 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
regmap_sfr = NULL;
}
- hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
+ hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name, NULL);
if (IS_ERR(hw))
return;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 1b3ca7dd9b57..0f52e80bcd49 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -144,7 +144,8 @@ struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
- u32 *mux_table, u8 num_parents, u8 id,
+ struct clk_hw **parent_hws, u32 *mux_table,
+ u8 num_parents, u8 id,
const struct clk_range *range, int chg_pid);
struct clk_hw * __init
@@ -161,25 +162,29 @@ at91_clk_register_main_rc_osc(struct regmap *regmap, const char *name,
u32 frequency, u32 accuracy);
struct clk_hw * __init
at91_clk_register_main_osc(struct regmap *regmap, const char *name,
- const char *parent_name, bool bypass);
+ const char *parent_name,
+ struct clk_parent_data *parent_data, bool bypass);
struct clk_hw * __init
at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
- const char *parent_name);
+ const char *parent_name,
+ struct clk_hw *parent_hw);
struct clk_hw * __init
at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name,
- const char **parent_names, int num_parents);
+ const char **parent_names,
+ struct clk_hw **parent_hws, int num_parents);
struct clk_hw * __init
at91_clk_register_master_pres(struct regmap *regmap, const char *name,
int num_parents, const char **parent_names,
+ struct clk_hw **parent_hws,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock);
struct clk_hw * __init
at91_clk_register_master_div(struct regmap *regmap, const char *name,
- const char *parent_names,
+ const char *parent_names, struct clk_hw *parent_hw,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock, u32 flags, u32 safe_div);
@@ -187,17 +192,20 @@ at91_clk_register_master_div(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
const char *name, int num_parents,
- const char **parent_names, u32 *mux_table,
+ const char **parent_names,
+ struct clk_hw **parent_hws, u32 *mux_table,
spinlock_t *lock, u8 id, bool critical,
int chg_pid);
struct clk_hw * __init
at91_clk_register_peripheral(struct regmap *regmap, const char *name,
- const char *parent_name, u32 id);
+ const char *parent_name, struct clk_hw *parent_hw,
+ u32 id);
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
+ struct clk_hw *parent_hw,
u32 id, const struct clk_range *range,
int chg_pid, unsigned long flags);
@@ -212,7 +220,8 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
- const char *name, const char *parent_name, u8 id,
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
const struct clk_pll_layout *layout, u32 flags,
u32 safe_div);
@@ -226,7 +235,8 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
- const char **parent_names, u8 num_parents, u8 id,
+ const char **parent_names, struct clk_hw **parent_hws,
+ u8 num_parents, u8 id,
const struct clk_programmable_layout *layout,
u32 *mux_table);
@@ -242,7 +252,8 @@ at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
- const char *parent_name, u8 id, unsigned long flags);
+ const char *parent_name, struct clk_hw *parent_hw,
+ u8 id, unsigned long flags);
struct clk_hw * __init
at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
@@ -259,10 +270,11 @@ at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
- const char *name, const char *parent_name);
+ const char *name, const char *parent_name,
+ struct clk_hw *parent_hw);
struct clk_hw * __init
at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name,
- const char *parent_name);
+ const char *parent_name, struct clk_hw *parent_hw);
#endif /* __PMC_H_ */
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index ac070db58195..e309cbf3cb9a 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -219,14 +219,14 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0);
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL, 0);
if (IS_ERR(hw))
goto err_free;
main_osc_hw = hw;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -246,7 +246,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck",
- "pllack_fracck", 0, &plla_characteristics,
+ "pllack_fracck", NULL, 0, &plla_characteristics,
&pll_div_layout,
/*
* This feeds CPU. It should not
@@ -266,7 +266,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck",
- "upllck_fracck", 1, &upll_characteristics,
+ "upllck_fracck", NULL, 1, &upll_characteristics,
&pll_div_layout,
CLK_SET_RATE_GATE |
CLK_SET_PARENT_GATE |
@@ -280,13 +280,13 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack_divck";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 3,
- parent_names, &sam9x60_master_layout,
+ parent_names, NULL, &sam9x60_master_layout,
&mck_characteristics, &mck_lock);
if (IS_ERR(hw))
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres", &sam9x60_master_layout,
+ "masterck_pres", NULL, &sam9x60_master_layout,
&mck_characteristics, &mck_lock,
CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
@@ -313,7 +313,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 6, i,
+ parent_names, NULL, 6, i,
&sam9x60_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -324,7 +324,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
hw = at91_clk_register_system(regmap, sam9x60_systemck[i].n,
- sam9x60_systemck[i].p,
+ sam9x60_systemck[i].p, NULL,
sam9x60_systemck[i].id,
sam9x60_systemck[i].flags);
if (IS_ERR(hw))
@@ -337,7 +337,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sam9x60_pcr_layout,
sam9x60_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
sam9x60_periphck[i].id,
&range, INT_MIN,
sam9x60_periphck[i].flags);
@@ -351,7 +351,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sam9x60_pcr_layout,
sam9x60_gck[i].n,
- parent_names, NULL, 6,
+ parent_names, NULL, NULL, 6,
sam9x60_gck[i].id,
&sam9x60_gck[i].r, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index c0e3e1a4bbf3..c16594fce90c 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -202,14 +202,14 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -249,7 +249,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(regmap_sfr))
regmap_sfr = NULL;
- hw = at91_clk_register_utmi(regmap, regmap_sfr, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, regmap_sfr, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -260,14 +260,14 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock);
if (IS_ERR(hw))
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
CLK_SET_RATE_GATE, 0);
@@ -300,7 +300,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 6, i,
+ parent_names, NULL, 6, i,
&sama5d2_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -311,7 +311,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d2_systemck[i].n,
- sama5d2_systemck[i].p,
+ sama5d2_systemck[i].p, NULL,
sama5d2_systemck[i].id,
sama5d2_systemck[i].flags);
if (IS_ERR(hw))
@@ -324,7 +324,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
sama5d2_periphck[i].id,
&range, INT_MIN,
sama5d2_periphck[i].flags);
@@ -338,7 +338,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_periph32ck[i].n,
- "h32mxck",
+ "h32mxck", NULL,
sama5d2_periph32ck[i].id,
&sama5d2_periph32ck[i].r,
INT_MIN, 0);
@@ -358,7 +358,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_gck[i].n,
- parent_names, NULL, 6,
+ parent_names, NULL, NULL, 6,
sama5d2_gck[i].id,
&sama5d2_gck[i].r,
sama5d2_gck[i].chg_pid);
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index ad6068b884de..522ce6031446 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -150,14 +150,14 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -172,7 +172,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
sama5d3_pmc->chws[PMC_PLLACK] = hw;
- hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -183,14 +183,14 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock);
if (IS_ERR(hw))
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
CLK_SET_RATE_GATE, 0);
@@ -220,7 +220,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91sam9x5_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -231,7 +231,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d3_systemck[i].n,
- sama5d3_systemck[i].p,
+ sama5d3_systemck[i].p, NULL,
sama5d3_systemck[i].id,
sama5d3_systemck[i].flags);
if (IS_ERR(hw))
@@ -244,7 +244,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d3_pcr_layout,
sama5d3_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
sama5d3_periphck[i].id,
&sama5d3_periphck[i].r,
INT_MIN,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e876ec971a39..160c0bddb6a3 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -165,14 +165,14 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL,
bypass);
if (IS_ERR(hw))
goto err_free;
parent_names[0] = "main_rc_osc";
parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
if (IS_ERR(hw))
goto err_free;
@@ -187,7 +187,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
sama5d4_pmc->chws[PMC_PLLACK] = hw;
- hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck", NULL);
if (IS_ERR(hw))
goto err_free;
@@ -198,14 +198,14 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
- parent_names,
+ parent_names, NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock);
if (IS_ERR(hw))
goto err_free;
hw = at91_clk_register_master_div(regmap, "masterck_div",
- "masterck_pres",
+ "masterck_pres", NULL,
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
CLK_SET_RATE_GATE, 0);
@@ -243,7 +243,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
+ parent_names, NULL, 5, i,
&at91sam9x5_programmable_layout,
NULL);
if (IS_ERR(hw))
@@ -254,7 +254,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) {
hw = at91_clk_register_system(regmap, sama5d4_systemck[i].n,
- sama5d4_systemck[i].p,
+ sama5d4_systemck[i].p, NULL,
sama5d4_systemck[i].id,
sama5d4_systemck[i].flags);
if (IS_ERR(hw))
@@ -267,7 +267,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d4_pcr_layout,
sama5d4_periphck[i].n,
- "masterck_div",
+ "masterck_div", NULL,
sama5d4_periphck[i].id,
&range, INT_MIN,
sama5d4_periphck[i].flags);
@@ -281,7 +281,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d4_pcr_layout,
sama5d4_periph32ck[i].n,
- "h32mxck",
+ "h32mxck", NULL,
sama5d4_periph32ck[i].id,
&range, INT_MIN, 0);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index f135b662f1ff..91b5c6f14819 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -57,6 +57,18 @@ enum pll_ids {
};
/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+};
+
+/*
* PLL type identifiers
* @PLL_TYPE_FRAC: fractional PLL identifier
* @PLL_TYPE_DIV: divider PLL identifier
@@ -119,185 +131,233 @@ static const struct clk_pll_characteristics pll_characteristics = {
};
/*
+ * SAMA7G5 PLL possible parents
+ * @SAMA7G5_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7G5_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7G5_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7g5_pll_parent {
+ SAMA7G5_PLL_PARENT_MAINCK,
+ SAMA7G5_PLL_PARENT_MAIN_XTAL,
+ SAMA7G5_PLL_PARENT_FRACCK,
+};
+
+/*
* PLL clocks description
* @n: clock name
- * @p: clock parent
* @l: clock layout
* @c: clock characteristics
+ * @hw: pointer to clk_hw
* @t: clock type
* @f: clock flags
+ * @p: clock parent
* @eid: export index in sama7g5->chws[] array
* @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
* notification
*/
-static const struct {
+static struct sama7g5_pll {
const char *n;
- const char *p;
const struct clk_pll_layout *l;
const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
unsigned long f;
+ enum sama7g5_pll_parent p;
u8 t;
u8 eid;
u8 safe_div;
} sama7g5_plls[][PLL_ID_MAX] = {
[PLL_ID_CPU] = {
- { .n = "cpupll_fracck",
- .p = "mainck",
- .l = &pll_layout_frac,
- .c = &cpu_pll_characteristics,
- .t = PLL_TYPE_FRAC,
- /*
- * This feeds cpupll_divpmcck which feeds CPU. It should
- * not be disabled.
- */
- .f = CLK_IS_CRITICAL, },
-
- { .n = "cpupll_divpmcck",
- .p = "cpupll_fracck",
- .l = &pll_layout_divpmc,
- .c = &cpu_pll_characteristics,
- .t = PLL_TYPE_DIV,
- /* This feeds CPU. It should not be disabled. */
- .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .eid = PMC_CPUPLL,
- /*
- * Safe div=15 should be safe even for switching b/w 1GHz and
- * 90MHz (frac pll might go up to 1.2GHz).
- */
- .safe_div = 15, },
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
},
[PLL_ID_SYS] = {
- { .n = "syspll_fracck",
- .p = "mainck",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- /*
- * This feeds syspll_divpmcck which may feed critical parts
- * of the systems like timers. Therefore it should not be
- * disabled.
- */
- .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
-
- { .n = "syspll_divpmcck",
- .p = "syspll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- /*
- * This may feed critical parts of the systems like timers.
- * Therefore it should not be disabled.
- */
- .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
- .eid = PMC_SYSPLL, },
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
},
[PLL_ID_DDR] = {
- { .n = "ddrpll_fracck",
- .p = "mainck",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- /*
- * This feeds ddrpll_divpmcck which feeds DDR. It should not
- * be disabled.
- */
- .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
-
- { .n = "ddrpll_divpmcck",
- .p = "ddrpll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- /* This feeds DDR. It should not be disabled. */
- .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
},
[PLL_ID_IMG] = {
- { .n = "imgpll_fracck",
- .p = "mainck",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- .f = CLK_SET_RATE_GATE, },
-
- { .n = "imgpll_divpmcck",
- .p = "imgpll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT, },
+ [PLL_COMPID_FRAC] = {
+ .n = "imgpll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "imgpll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
},
[PLL_ID_BAUD] = {
- { .n = "baudpll_fracck",
- .p = "mainck",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- .f = CLK_SET_RATE_GATE, },
-
- { .n = "baudpll_divpmcck",
- .p = "baudpll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT, },
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
},
[PLL_ID_AUDIO] = {
- { .n = "audiopll_fracck",
- .p = "main_xtal",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- .f = CLK_SET_RATE_GATE, },
-
- { .n = "audiopll_divpmcck",
- .p = "audiopll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT,
- .eid = PMC_AUDIOPMCPLL, },
-
- { .n = "audiopll_diviock",
- .p = "audiopll_fracck",
- .l = &pll_layout_divio,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT,
- .eid = PMC_AUDIOIOPLL, },
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
},
[PLL_ID_ETH] = {
- { .n = "ethpll_fracck",
- .p = "main_xtal",
- .l = &pll_layout_frac,
- .c = &pll_characteristics,
- .t = PLL_TYPE_FRAC,
- .f = CLK_SET_RATE_GATE, },
-
- { .n = "ethpll_divpmcck",
- .p = "ethpll_fracck",
- .l = &pll_layout_divpmc,
- .c = &pll_characteristics,
- .t = PLL_TYPE_DIV,
- .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT, },
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7G5_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7G5_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
},
};
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
/*
* Master clock (MCK[1..4]) description
* @n: clock name
- * @ep: extra parents names array
- * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * @ep_chg_id: index in parents array that specifies the changeable
* parent
* @ep_count: extra parents count
* @ep_mux_table: mux table for extra parents
@@ -305,9 +365,13 @@ static const struct {
* @eid: export index in sama7g5->chws[] array
* @c: true if clock is critical and cannot be disabled
*/
-static const struct {
+static struct {
const char *n;
- const char *ep[4];
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
int ep_chg_id;
u8 ep_count;
u8 ep_mux_table[4];
@@ -315,9 +379,10 @@ static const struct {
u8 eid;
u8 c;
} sama7g5_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
{ .n = "mck1",
.id = 1,
- .ep = { "syspll_divpmcck", },
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
.ep_mux_table = { 5, },
.ep_count = 1,
.ep_chg_id = INT_MIN,
@@ -326,7 +391,7 @@ static const struct {
{ .n = "mck2",
.id = 2,
- .ep = { "ddrpll_divpmcck", },
+ .ep = { PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
.ep_mux_table = { 6, },
.ep_count = 1,
.ep_chg_id = INT_MIN,
@@ -334,14 +399,15 @@ static const struct {
{ .n = "mck3",
.id = 3,
- .ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", },
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(IMG, DIV0), },
.ep_mux_table = { 5, 6, 7, },
.ep_count = 3,
.ep_chg_id = 5, },
{ .n = "mck4",
.id = 4,
- .ep = { "syspll_divpmcck", },
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
.ep_mux_table = { 5, },
.ep_count = 1,
.ep_chg_id = INT_MIN,
@@ -351,120 +417,137 @@ static const struct {
/*
* System clock description
* @n: clock name
- * @p: clock parent name
* @id: clock id
*/
static const struct {
const char *n;
- const char *p;
u8 id;
} sama7g5_systemck[] = {
- { .n = "pck0", .p = "prog0", .id = 8, },
- { .n = "pck1", .p = "prog1", .id = 9, },
- { .n = "pck2", .p = "prog2", .id = 10, },
- { .n = "pck3", .p = "prog3", .id = 11, },
- { .n = "pck4", .p = "prog4", .id = 12, },
- { .n = "pck5", .p = "prog5", .id = 13, },
- { .n = "pck6", .p = "prog6", .id = 14, },
- { .n = "pck7", .p = "prog7", .id = 15, },
+ { .n = "pck0", .id = 8, },
+ { .n = "pck1", .id = 9, },
+ { .n = "pck2", .id = 10, },
+ { .n = "pck3", .id = 11, },
+ { .n = "pck4", .id = 12, },
+ { .n = "pck5", .id = 13, },
+ { .n = "pck6", .id = 14, },
+ { .n = "pck7", .id = 15, },
};
/* Mux table for programmable clocks. */
static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 5, 6, 7, 8, 9, 10, };
/*
+ * Peripheral clock parent hw identifier (used to index in sama7g5_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7g5_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MAX,
+};
+
+/*
* Peripheral clock description
* @n: clock name
- * @p: clock parent name
+ * @p: clock parent hw id
* @r: clock range values
* @id: clock id
* @chgp: index in parent array of the changeable parent
*/
-static const struct {
+static struct {
const char *n;
- const char *p;
+ enum sama7g5_pck_parent_hw_id p;
struct clk_range r;
u8 chgp;
u8 id;
} sama7g5_periphck[] = {
- { .n = "pioA_clk", .p = "mck0", .id = 11, },
- { .n = "securam_clk", .p = "mck0", .id = 18, },
- { .n = "sfr_clk", .p = "mck1", .id = 19, },
- { .n = "hsmc_clk", .p = "mck1", .id = 21, },
- { .n = "xdmac0_clk", .p = "mck1", .id = 22, },
- { .n = "xdmac1_clk", .p = "mck1", .id = 23, },
- { .n = "xdmac2_clk", .p = "mck1", .id = 24, },
- { .n = "acc_clk", .p = "mck1", .id = 25, },
- { .n = "aes_clk", .p = "mck1", .id = 27, },
- { .n = "tzaesbasc_clk", .p = "mck1", .id = 28, },
- { .n = "asrc_clk", .p = "mck1", .id = 30, .r = { .max = 200000000, }, },
- { .n = "cpkcc_clk", .p = "mck0", .id = 32, },
- { .n = "csi_clk", .p = "mck3", .id = 33, .r = { .max = 266000000, }, .chgp = 1, },
- { .n = "csi2dc_clk", .p = "mck3", .id = 34, .r = { .max = 266000000, }, .chgp = 1, },
- { .n = "eic_clk", .p = "mck1", .id = 37, },
- { .n = "flex0_clk", .p = "mck1", .id = 38, },
- { .n = "flex1_clk", .p = "mck1", .id = 39, },
- { .n = "flex2_clk", .p = "mck1", .id = 40, },
- { .n = "flex3_clk", .p = "mck1", .id = 41, },
- { .n = "flex4_clk", .p = "mck1", .id = 42, },
- { .n = "flex5_clk", .p = "mck1", .id = 43, },
- { .n = "flex6_clk", .p = "mck1", .id = 44, },
- { .n = "flex7_clk", .p = "mck1", .id = 45, },
- { .n = "flex8_clk", .p = "mck1", .id = 46, },
- { .n = "flex9_clk", .p = "mck1", .id = 47, },
- { .n = "flex10_clk", .p = "mck1", .id = 48, },
- { .n = "flex11_clk", .p = "mck1", .id = 49, },
- { .n = "gmac0_clk", .p = "mck1", .id = 51, },
- { .n = "gmac1_clk", .p = "mck1", .id = 52, },
- { .n = "icm_clk", .p = "mck1", .id = 55, },
- { .n = "isc_clk", .p = "mck3", .id = 56, .r = { .max = 266000000, }, .chgp = 1, },
- { .n = "i2smcc0_clk", .p = "mck1", .id = 57, .r = { .max = 200000000, }, },
- { .n = "i2smcc1_clk", .p = "mck1", .id = 58, .r = { .max = 200000000, }, },
- { .n = "matrix_clk", .p = "mck1", .id = 60, },
- { .n = "mcan0_clk", .p = "mck1", .id = 61, .r = { .max = 200000000, }, },
- { .n = "mcan1_clk", .p = "mck1", .id = 62, .r = { .max = 200000000, }, },
- { .n = "mcan2_clk", .p = "mck1", .id = 63, .r = { .max = 200000000, }, },
- { .n = "mcan3_clk", .p = "mck1", .id = 64, .r = { .max = 200000000, }, },
- { .n = "mcan4_clk", .p = "mck1", .id = 65, .r = { .max = 200000000, }, },
- { .n = "mcan5_clk", .p = "mck1", .id = 66, .r = { .max = 200000000, }, },
- { .n = "pdmc0_clk", .p = "mck1", .id = 68, .r = { .max = 200000000, }, },
- { .n = "pdmc1_clk", .p = "mck1", .id = 69, .r = { .max = 200000000, }, },
- { .n = "pit64b0_clk", .p = "mck1", .id = 70, },
- { .n = "pit64b1_clk", .p = "mck1", .id = 71, },
- { .n = "pit64b2_clk", .p = "mck1", .id = 72, },
- { .n = "pit64b3_clk", .p = "mck1", .id = 73, },
- { .n = "pit64b4_clk", .p = "mck1", .id = 74, },
- { .n = "pit64b5_clk", .p = "mck1", .id = 75, },
- { .n = "pwm_clk", .p = "mck1", .id = 77, },
- { .n = "qspi0_clk", .p = "mck1", .id = 78, },
- { .n = "qspi1_clk", .p = "mck1", .id = 79, },
- { .n = "sdmmc0_clk", .p = "mck1", .id = 80, },
- { .n = "sdmmc1_clk", .p = "mck1", .id = 81, },
- { .n = "sdmmc2_clk", .p = "mck1", .id = 82, },
- { .n = "sha_clk", .p = "mck1", .id = 83, },
- { .n = "spdifrx_clk", .p = "mck1", .id = 84, .r = { .max = 200000000, }, },
- { .n = "spdiftx_clk", .p = "mck1", .id = 85, .r = { .max = 200000000, }, },
- { .n = "ssc0_clk", .p = "mck1", .id = 86, .r = { .max = 200000000, }, },
- { .n = "ssc1_clk", .p = "mck1", .id = 87, .r = { .max = 200000000, }, },
- { .n = "tcb0_ch0_clk", .p = "mck1", .id = 88, .r = { .max = 200000000, }, },
- { .n = "tcb0_ch1_clk", .p = "mck1", .id = 89, .r = { .max = 200000000, }, },
- { .n = "tcb0_ch2_clk", .p = "mck1", .id = 90, .r = { .max = 200000000, }, },
- { .n = "tcb1_ch0_clk", .p = "mck1", .id = 91, .r = { .max = 200000000, }, },
- { .n = "tcb1_ch1_clk", .p = "mck1", .id = 92, .r = { .max = 200000000, }, },
- { .n = "tcb1_ch2_clk", .p = "mck1", .id = 93, .r = { .max = 200000000, }, },
- { .n = "tcpca_clk", .p = "mck1", .id = 94, },
- { .n = "tcpcb_clk", .p = "mck1", .id = 95, },
- { .n = "tdes_clk", .p = "mck1", .id = 96, },
- { .n = "trng_clk", .p = "mck1", .id = 97, },
- { .n = "udphsa_clk", .p = "mck1", .id = 104, },
- { .n = "udphsb_clk", .p = "mck1", .id = 105, },
- { .n = "uhphs_clk", .p = "mck1", .id = 106, },
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 11, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 18, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK1, .id = 19, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK1, .id = 21, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK1, .id = 22, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 24, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK1, .id = 25, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK1, .id = 27, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK1, .id = 28, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK1, .id = 30, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 32, },
+ { .n = "csi_clk", .p = PCK_PARENT_HW_MCK3, .id = 33, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "csi2dc_clk", .p = PCK_PARENT_HW_MCK3, .id = 34, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK1, .id = 37, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK1, .id = 38, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK1, .id = 39, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK1, .id = 40, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK1, .id = 41, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK1, .id = 42, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK1, .id = 43, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK1, .id = 44, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK1, .id = 45, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK1, .id = 46, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK1, .id = 47, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK1, .id = 48, },
+ { .n = "flex11_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK1, .id = 51, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK1, .id = 52, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK1, .id = 55, },
+ { .n = "isc_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, .r = { .max = 266000000, }, .chgp = 1, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 57, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK1, .id = 60, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK1, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK1, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK1, .id = 63, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK1, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK1, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "mcan5_clk", .p = PCK_PARENT_HW_MCK1, .id = 66, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 68, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 69, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK1, .id = 70, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK1, .id = 71, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK1, .id = 72, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK1, .id = 73, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK1, .id = 74, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK1, .id = 78, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK1, .id = 79, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 80, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 81, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 82, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK1, .id = 83, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK1, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK1, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK1, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK1, .id = 89, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK1, .id = 90, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK1, .id = 91, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK1, .id = 92, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK1, .id = 93, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK1, .id = 94, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK1, .id = 95, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK1, .id = 96, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK1, .id = 97, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK1, .id = 104, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK1, .id = 105, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK1, .id = 106, },
};
/*
* Generic clock description
* @n: clock name
- * @pp: PLL parents
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
* @pp_mux_table: PLL parents mux table
* @r: clock output range
* @pp_chg_id: id in parent array of changeable PLL parent
@@ -473,7 +556,10 @@ static const struct {
*/
static const struct {
const char *n;
- const char *pp[8];
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
const char pp_mux_table[8];
struct clk_range r;
int pp_chg_id;
@@ -483,7 +569,8 @@ static const struct {
{ .n = "adc_gclk",
.id = 26,
.r = { .max = 100000000, },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 7, 9, },
.pp_count = 3,
.pp_chg_id = INT_MIN, },
@@ -491,7 +578,7 @@ static const struct {
{ .n = "asrc_gclk",
.id = 30,
.r = { .max = 200000000 },
- .pp = { "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 9, },
.pp_count = 1,
.pp_chg_id = 3, },
@@ -499,7 +586,7 @@ static const struct {
{ .n = "csi_gclk",
.id = 33,
.r = { .max = 27000000 },
- .pp = { "ddrpll_divpmcck", "imgpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0), },
.pp_mux_table = { 6, 7, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -507,7 +594,7 @@ static const struct {
{ .n = "flex0_gclk",
.id = 38,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -515,7 +602,7 @@ static const struct {
{ .n = "flex1_gclk",
.id = 39,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -523,7 +610,7 @@ static const struct {
{ .n = "flex2_gclk",
.id = 40,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -531,7 +618,7 @@ static const struct {
{ .n = "flex3_gclk",
.id = 41,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -539,7 +626,7 @@ static const struct {
{ .n = "flex4_gclk",
.id = 42,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -547,7 +634,7 @@ static const struct {
{ .n = "flex5_gclk",
.id = 43,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -555,7 +642,7 @@ static const struct {
{ .n = "flex6_gclk",
.id = 44,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -563,7 +650,7 @@ static const struct {
{ .n = "flex7_gclk",
.id = 45,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -571,7 +658,7 @@ static const struct {
{ .n = "flex8_gclk",
.id = 46,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -579,7 +666,7 @@ static const struct {
{ .n = "flex9_gclk",
.id = 47,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -587,7 +674,7 @@ static const struct {
{ .n = "flex10_gclk",
.id = 48,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -595,7 +682,7 @@ static const struct {
{ .n = "flex11_gclk",
.id = 49,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -603,7 +690,7 @@ static const struct {
{ .n = "gmac0_gclk",
.id = 51,
.r = { .max = 125000000 },
- .pp = { "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 10, },
.pp_count = 1,
.pp_chg_id = 3, },
@@ -611,7 +698,7 @@ static const struct {
{ .n = "gmac1_gclk",
.id = 52,
.r = { .max = 50000000 },
- .pp = { "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 10, },
.pp_count = 1,
.pp_chg_id = INT_MIN, },
@@ -619,7 +706,7 @@ static const struct {
{ .n = "gmac0_tsu_gclk",
.id = 53,
.r = { .max = 300000000 },
- .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 9, 10, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -627,7 +714,7 @@ static const struct {
{ .n = "gmac1_tsu_gclk",
.id = 54,
.r = { .max = 300000000 },
- .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 9, 10, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -635,7 +722,7 @@ static const struct {
{ .n = "i2smcc0_gclk",
.id = 57,
.r = { .max = 100000000 },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -643,7 +730,7 @@ static const struct {
{ .n = "i2smcc1_gclk",
.id = 58,
.r = { .max = 100000000 },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -651,7 +738,7 @@ static const struct {
{ .n = "mcan0_gclk",
.id = 61,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -659,7 +746,7 @@ static const struct {
{ .n = "mcan1_gclk",
.id = 62,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -667,7 +754,7 @@ static const struct {
{ .n = "mcan2_gclk",
.id = 63,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -675,7 +762,7 @@ static const struct {
{ .n = "mcan3_gclk",
.id = 64,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -683,7 +770,7 @@ static const struct {
{ .n = "mcan4_gclk",
.id = 65,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -691,7 +778,7 @@ static const struct {
{ .n = "mcan5_gclk",
.id = 66,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -699,7 +786,7 @@ static const struct {
{ .n = "pdmc0_gclk",
.id = 68,
.r = { .max = 50000000 },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -707,7 +794,7 @@ static const struct {
{ .n = "pdmc1_gclk",
.id = 69,
.r = { .max = 50000000, },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -715,8 +802,9 @@ static const struct {
{ .n = "pit64b0_gclk",
.id = 70,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -724,8 +812,9 @@ static const struct {
{ .n = "pit64b1_gclk",
.id = 71,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -733,8 +822,9 @@ static const struct {
{ .n = "pit64b2_gclk",
.id = 72,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -742,8 +832,9 @@ static const struct {
{ .n = "pit64b3_gclk",
.id = 73,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -751,8 +842,9 @@ static const struct {
{ .n = "pit64b4_gclk",
.id = 74,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -760,8 +852,9 @@ static const struct {
{ .n = "pit64b5_gclk",
.id = 75,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -769,7 +862,7 @@ static const struct {
{ .n = "qspi0_gclk",
.id = 78,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -777,7 +870,7 @@ static const struct {
{ .n = "qspi1_gclk",
.id = 79,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
@@ -785,7 +878,7 @@ static const struct {
{ .n = "sdmmc0_gclk",
.id = 80,
.r = { .max = 208000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -793,7 +886,7 @@ static const struct {
{ .n = "sdmmc1_gclk",
.id = 81,
.r = { .max = 208000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -801,7 +894,7 @@ static const struct {
{ .n = "sdmmc2_gclk",
.id = 82,
.r = { .max = 208000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -809,7 +902,7 @@ static const struct {
{ .n = "spdifrx_gclk",
.id = 84,
.r = { .max = 150000000 },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -817,7 +910,7 @@ static const struct {
{ .n = "spdiftx_gclk",
.id = 85,
.r = { .max = 25000000 },
- .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = 4, },
@@ -825,8 +918,9 @@ static const struct {
{ .n = "tcb0_ch0_gclk",
.id = 88,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -834,8 +928,9 @@ static const struct {
{ .n = "tcb1_ch0_gclk",
.id = 91,
.r = { .max = 200000000 },
- .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
- "audiopll_divpmcck", "ethpll_divpmcck", },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(IMG, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
.pp_mux_table = { 5, 7, 8, 9, 10, },
.pp_count = 5,
.pp_chg_id = INT_MIN, },
@@ -884,34 +979,25 @@ static const struct clk_pcr_layout sama7g5_pcr_layout = {
static void __init sama7g5_pmc_setup(struct device_node *np)
{
- const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ const char *main_xtal_name = "main_xtal";
struct pmc_data *sama7g5_pmc;
- const char *parent_names[10];
void **alloc_mem = NULL;
int alloc_mem_size = 0;
struct regmap *regmap;
- struct clk_hw *hw;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
bool bypass;
int i, j;
- i = of_property_match_string(np, "clock-names", "td_slck");
- if (i < 0)
- return;
-
- td_slck_name = of_clk_get_parent_name(np, i);
-
- i = of_property_match_string(np, "clock-names", "md_slck");
- if (i < 0)
- return;
-
- md_slck_name = of_clk_get_parent_name(np, i);
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
- i = of_property_match_string(np, "clock-names", "main_xtal");
- if (i < 0)
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
return;
- mainxtal_name = of_clk_get_parent_name(np, i);
-
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
@@ -929,21 +1015,23 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!alloc_mem)
goto err_free;
- hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
- 50000000);
- if (IS_ERR(hw))
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
goto err_free;
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
- bypass);
- if (IS_ERR(hw))
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
goto err_free;
- parent_names[0] = "main_rc_osc";
- parent_names[1] = "main_osc";
- hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
if (IS_ERR(hw))
goto err_free;
@@ -958,15 +1046,22 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
switch (sama7g5_plls[i][j].t) {
case PLL_TYPE_FRAC:
- if (!strcmp(sama7g5_plls[i][j].p, "mainck"))
+ switch (sama7g5_plls[i][j].p) {
+ case SAMA7G5_PLL_PARENT_MAINCK:
parent_hw = sama7g5_pmc->chws[PMC_MAIN];
- else
- parent_hw = __clk_get_hw(of_clk_get_by_name(np,
- sama7g5_plls[i][j].p));
+ break;
+ case SAMA7G5_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
hw = sam9x60_clk_register_frac_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
- sama7g5_plls[i][j].p, parent_hw, i,
+ NULL, parent_hw, i,
sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
sama7g5_plls[i][j].f);
@@ -975,7 +1070,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
case PLL_TYPE_DIV:
hw = sam9x60_clk_register_div_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
- sama7g5_plls[i][j].p, i,
+ NULL, sama7g5_plls[i][0].hw, i,
sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
sama7g5_plls[i][j].f,
@@ -989,25 +1084,27 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama7g5_plls[i][j].hw = hw;
if (sama7g5_plls[i][j].eid)
sama7g5_pmc->chws[sama7g5_plls[i][j].eid] = hw;
}
}
- parent_names[0] = "cpupll_divpmcck";
- hw = at91_clk_register_master_div(regmap, "mck0", "cpupll_divpmcck",
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7g5_plls[PLL_ID_CPU][1].hw,
&mck0_layout, &mck0_characteristics,
&pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
if (IS_ERR(hw))
goto err_free;
- sama7g5_pmc->chws[PMC_MCK] = hw;
+ sama7g5_mckx[PCK_PARENT_HW_MCK0].hw = sama7g5_pmc->chws[PMC_MCK] = hw;
- parent_names[0] = md_slck_name;
- parent_names[1] = td_slck_name;
- parent_names[2] = "mainck";
- for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) {
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7g5_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7g5_mckx); i++) {
u8 num_parents = 3 + sama7g5_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -1018,11 +1115,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
SAMA7G5_INIT_TABLE(mux_table, 3);
SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
sama7g5_mckx[i].ep_count);
- SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_mckx[i].ep,
+ for (j = 0; j < sama7g5_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7g5_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7g5_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
+ }
+ SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
- num_parents, parent_names, mux_table,
+ num_parents, NULL, parent_hws, mux_table,
&pmc_mckX_lock, sama7g5_mckx[i].id,
sama7g5_mckx[i].c,
sama7g5_mckx[i].ep_chg_id);
@@ -1031,31 +1134,32 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
alloc_mem[alloc_mem_size++] = mux_table;
+ sama7g5_mckx[i].hw = hw;
if (sama7g5_mckx[i].eid)
sama7g5_pmc->chws[sama7g5_mckx[i].eid] = hw;
}
- hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", "main_xtal");
+ hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", NULL, main_xtal_hw);
if (IS_ERR(hw))
goto err_free;
sama7g5_pmc->chws[PMC_UTMI] = hw;
- parent_names[0] = md_slck_name;
- parent_names[1] = td_slck_name;
- parent_names[2] = "mainck";
- parent_names[3] = "syspll_divpmcck";
- parent_names[4] = "ddrpll_divpmcck";
- parent_names[5] = "imgpll_divpmcck";
- parent_names[6] = "baudpll_divpmcck";
- parent_names[7] = "audiopll_divpmcck";
- parent_names[8] = "ethpll_divpmcck";
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7g5_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7g5_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7g5_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7g5_plls[PLL_ID_IMG][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7g5_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7g5_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7g5_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
for (i = 0; i < 8; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
- hw = at91_clk_register_programmable(regmap, name, parent_names,
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
9, i,
&programmable_layout,
sama7g5_prog_mux_table);
@@ -1067,7 +1171,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
hw = at91_clk_register_system(regmap, sama7g5_systemck[i].n,
- sama7g5_systemck[i].p,
+ NULL, sama7g5_pmc->pchws[i],
sama7g5_systemck[i].id, 0);
if (IS_ERR(hw))
goto err_free;
@@ -1079,7 +1183,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama7g5_pcr_layout,
sama7g5_periphck[i].n,
- sama7g5_periphck[i].p,
+ NULL,
+ sama7g5_mckx[sama7g5_periphck[i].p].hw,
sama7g5_periphck[i].id,
&sama7g5_periphck[i].r,
sama7g5_periphck[i].chgp ? 0 :
@@ -1090,11 +1195,12 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_pmc->phws[sama7g5_periphck[i].id] = hw;
}
- parent_names[0] = md_slck_name;
- parent_names[1] = td_slck_name;
- parent_names[2] = "mainck";
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7g5_pmc->chws[PMC_MAIN];
for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) {
u8 num_parents = 3 + sama7g5_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -1105,13 +1211,19 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
SAMA7G5_INIT_TABLE(mux_table, 3);
SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
sama7g5_gck[i].pp_count);
- SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_gck[i].pp,
+ for (j = 0; j < sama7g5_gck[i].pp_count; j++) {
+ u8 pll_id = sama7g5_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7g5_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
+ }
+ SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama7g5_pcr_layout,
- sama7g5_gck[i].n,
- parent_names, mux_table,
+ sama7g5_gck[i].n, NULL,
+ parent_hws, mux_table,
num_parents,
sama7g5_gck[i].id,
&sama7g5_gck[i].r,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index a2d86c377827..7741d8f3dbee 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -117,17 +117,17 @@ static const struct clk_ops slow_osc_ops = {
static struct clk_hw * __init
at91_clk_register_slow_osc(void __iomem *sckcr,
const char *name,
- const char *parent_name,
+ const struct clk_parent_data *parent_data,
unsigned long startup,
bool bypass,
const struct clk_slow_bits *bits)
{
struct clk_slow_osc *osc;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
- if (!sckcr || !name || !parent_name)
+ if (!sckcr || !name || !parent_data)
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -136,7 +136,7 @@ at91_clk_register_slow_osc(void __iomem *sckcr,
init.name = name;
init.ops = &slow_osc_ops;
- init.parent_names = &parent_name;
+ init.parent_data = parent_data;
init.num_parents = 1;
init.flags = CLK_IGNORE_UNUSED;
@@ -318,16 +318,16 @@ static const struct clk_ops sam9x5_slow_ops = {
static struct clk_hw * __init
at91_clk_register_sam9x5_slow(void __iomem *sckcr,
const char *name,
- const char **parent_names,
+ const struct clk_hw **parent_hws,
int num_parents,
const struct clk_slow_bits *bits)
{
struct clk_sam9x5_slow *slowck;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
- if (!sckcr || !name || !parent_names || !num_parents)
+ if (!sckcr || !name || !parent_hws || !num_parents)
return ERR_PTR(-EINVAL);
slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
@@ -336,7 +336,7 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
init.name = name;
init.ops = &sam9x5_slow_ops;
- init.parent_names = parent_names;
+ init.parent_hws = parent_hws;
init.num_parents = num_parents;
init.flags = 0;
@@ -367,18 +367,21 @@ static void __init at91sam9x5_sckc_register(struct device_node *np,
unsigned int rc_osc_startup_us,
const struct clk_slow_bits *bits)
{
- const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
void __iomem *regbase = of_iomap(np, 0);
struct device_node *child = NULL;
const char *xtal_name;
struct clk_hw *slow_rc, *slow_osc, *slowck;
+ static struct clk_parent_data parent_data = {
+ .name = "slow_xtal",
+ };
+ const struct clk_hw *parent_hws[2];
bool bypass;
int ret;
if (!regbase)
return;
- slow_rc = at91_clk_register_slow_rc_osc(regbase, parent_names[0],
+ slow_rc = at91_clk_register_slow_rc_osc(regbase, "slow_rc_osc",
32768, 50000000,
rc_osc_startup_us, bits);
if (IS_ERR(slow_rc))
@@ -402,12 +405,16 @@ static void __init at91sam9x5_sckc_register(struct device_node *np,
if (!xtal_name)
goto unregister_slow_rc;
- slow_osc = at91_clk_register_slow_osc(regbase, parent_names[1],
- xtal_name, 1200000, bypass, bits);
+ parent_data.fw_name = xtal_name;
+
+ slow_osc = at91_clk_register_slow_osc(regbase, "slow_osc",
+ &parent_data, 1200000, bypass, bits);
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
- slowck = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names,
+ parent_hws[0] = slow_rc;
+ parent_hws[1] = slow_osc;
+ slowck = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_hws,
2, bits);
if (IS_ERR(slowck))
goto unregister_slow_osc;
@@ -465,14 +472,17 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
struct clk_hw_onecell_data *clk_data;
struct clk_hw *slow_rc, *slow_osc;
const char *xtal_name;
- const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
+ const struct clk_hw *parent_hws[2];
+ static struct clk_parent_data parent_data = {
+ .name = "slow_xtal",
+ };
bool bypass;
int ret;
if (!regbase)
return;
- slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0],
+ slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, "slow_rc_osc",
NULL, 0, 32768,
93750000);
if (IS_ERR(slow_rc))
@@ -482,9 +492,10 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
if (!xtal_name)
goto unregister_slow_rc;
+ parent_data.fw_name = xtal_name;
bypass = of_property_read_bool(np, "atmel,osc-bypass");
- slow_osc = at91_clk_register_slow_osc(regbase, parent_names[1],
- xtal_name, 5000000, bypass,
+ slow_osc = at91_clk_register_slow_osc(regbase, "slow_osc",
+ &parent_data, 5000000, bypass,
&at91sam9x60_bits);
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
@@ -495,14 +506,16 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate(NULL, "md_slck",
- parent_names[0],
- 0, 32768);
+ clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
+ slow_rc,
+ 0, 32768);
if (IS_ERR(clk_data->hws[0]))
goto clk_data_free;
+ parent_hws[0] = slow_rc;
+ parent_hws[1] = slow_osc;
clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_names, 2,
+ parent_hws, 2,
&at91sam9x60_bits);
if (IS_ERR(clk_data->hws[1]))
goto unregister_md_slck;
@@ -573,30 +586,36 @@ static void __init of_sama5d4_sckc_setup(struct device_node *np)
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw *slow_rc, *slowck;
struct clk_sama5d4_slow_osc *osc;
- struct clk_init_data init;
+ struct clk_init_data init = {};
const char *xtal_name;
- const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
+ const struct clk_hw *parent_hws[2];
+ static struct clk_parent_data parent_data = {
+ .name = "slow_xtal",
+ };
int ret;
if (!regbase)
return;
slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL,
- parent_names[0],
+ "slow_rc_osc",
NULL, 0, 32768,
250000000);
if (IS_ERR(slow_rc))
return;
xtal_name = of_clk_get_parent_name(np, 0);
+ if (!xtal_name)
+ goto unregister_slow_rc;
+ parent_data.fw_name = xtal_name;
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
if (!osc)
goto unregister_slow_rc;
- init.name = parent_names[1];
+ init.name = "slow_osc";
init.ops = &sama5d4_slow_osc_ops;
- init.parent_names = &xtal_name;
+ init.parent_data = &parent_data;
init.num_parents = 1;
init.flags = CLK_IGNORE_UNUSED;
@@ -609,8 +628,10 @@ static void __init of_sama5d4_sckc_setup(struct device_node *np)
if (ret)
goto free_slow_osc_data;
+ parent_hws[0] = slow_rc;
+ parent_hws[1] = &osc->hw;
slowck = at91_clk_register_sam9x5_slow(regbase, "slowck",
- parent_names, 2,
+ parent_hws, 2,
&at91sama5d4_bits);
if (IS_ERR(slowck))
goto unregister_slow_osc;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 12be3e2371b3..263e55d75e3f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -48,6 +48,7 @@ config QCOM_CLK_APCS_MSM8916
config QCOM_CLK_APCC_MSM8996
tristate "MSM8996 CPU Clock Controller"
select QCOM_KRYO_L2_ACCESSORS
+ select INTERCONNECT_CLK if INTERCONNECT
depends on ARM64
help
Support for the CPU clock controller on msm8996 devices.
@@ -57,6 +58,7 @@ config QCOM_CLK_APCC_MSM8996
config QCOM_CLK_APCS_SDX55
tristate "SDX55 and SDX65 APCS Clock Controller"
depends on QCOM_APCS_IPC || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
help
Support for the APCS Clock Controller on SDX55, SDX65 platforms. The
APCS is managing the mux and divider which feeds the CPUs.
@@ -100,6 +102,7 @@ config QCOM_CLK_RPMH
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on apq8084 devices.
@@ -108,6 +111,7 @@ config APQ_GCC_8084
config APQ_MMCC_8084
tristate "APQ8084 Multimedia Clock Controller"
+ depends on ARM || COMPILE_TEST
select APQ_GCC_8084
select QCOM_GDSC
help
@@ -159,6 +163,7 @@ config IPQ_GCC_6018
config IPQ_GCC_806X
tristate "IPQ806x Global Clock Controller"
+ depends on ARM || COMPILE_TEST
help
Support for the global clock controller on ipq806x devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -166,6 +171,7 @@ config IPQ_GCC_806X
config IPQ_LCC_806X
tristate "IPQ806x LPASS Clock Controller"
+ depends on ARM || COMPILE_TEST
select IPQ_GCC_806X
help
Support for the LPASS clock controller on ipq806x devices.
@@ -191,6 +197,7 @@ config IPQ_GCC_9574
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
help
Support for the global clock controller on msm8660 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -198,6 +205,7 @@ config MSM_GCC_8660
config MSM_GCC_8909
tristate "MSM8909 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on msm8909 devices.
@@ -232,6 +240,7 @@ config MSM_GCC_8939
config MSM_GCC_8960
tristate "APQ8064/MSM8960 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
help
Support for the global clock controller on apq8064/msm8960 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -239,6 +248,7 @@ config MSM_GCC_8960
config MSM_LCC_8960
tristate "APQ8064/MSM8960 LPASS Clock Controller"
+ depends on ARM || COMPILE_TEST
select MSM_GCC_8960
help
Support for the LPASS clock controller on apq8064/msm8960 devices.
@@ -247,6 +257,7 @@ config MSM_LCC_8960
config MDM_GCC_9607
tristate "MDM9607 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
help
Support for the global clock controller on mdm9607 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -254,6 +265,7 @@ config MDM_GCC_9607
config MDM_GCC_9615
tristate "MDM9615 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
help
Support for the global clock controller on mdm9615 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -261,6 +273,7 @@ config MDM_GCC_9615
config MDM_LCC_9615
tristate "MDM9615 LPASS Clock Controller"
+ depends on ARM || COMPILE_TEST
select MDM_GCC_9615
help
Support for the LPASS clock controller on mdm9615 devices.
@@ -269,6 +282,7 @@ config MDM_LCC_9615
config MSM_MMCC_8960
tristate "MSM8960 Multimedia Clock Controller"
+ depends on ARM || COMPILE_TEST
select MSM_GCC_8960
help
Support for the multimedia clock controller on msm8960 devices.
@@ -285,6 +299,7 @@ config MSM_GCC_8953
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on msm8974 devices.
@@ -293,6 +308,7 @@ config MSM_GCC_8974
config MSM_MMCC_8974
tristate "MSM8974 Multimedia Clock Controller"
+ depends on ARM || COMPILE_TEST
select MSM_GCC_8974
select QCOM_GDSC
help
@@ -393,6 +409,7 @@ config QCS_GCC_404
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the camera clock controller on Qualcomm Technologies, Inc
@@ -402,6 +419,7 @@ config SC_CAMCC_7180
config SC_CAMCC_7280
tristate "SC7280 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
help
Support for the camera clock controller on Qualcomm Technologies, Inc
@@ -411,6 +429,7 @@ config SC_CAMCC_7280
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -420,6 +439,7 @@ config SC_DISPCC_7180
config SC_DISPCC_7280
tristate "SC7280 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
help
Support for the display clock controller on Qualcomm Technologies, Inc.
@@ -429,6 +449,7 @@ config SC_DISPCC_7280
config SC_DISPCC_8280XP
tristate "SC8280XP Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_8280XP
help
Support for the two display clock controllers on Qualcomm
@@ -458,6 +479,7 @@ config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
depends on COMMON_CLK_QCOM
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SC7180 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -467,6 +489,7 @@ config SC_GCC_7280
tristate "SC7280 Global Clock Controller"
select QCOM_GDSC
depends on COMMON_CLK_QCOM
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SC7280 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -476,6 +499,7 @@ config SC_GCC_8180X
tristate "SC8180X Global Clock Controller"
select QCOM_GDSC
depends on COMMON_CLK_QCOM
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SC8180X devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -485,6 +509,7 @@ config SC_GCC_8280XP
tristate "SC8280XP Global Clock Controller"
select QCOM_GDSC
depends on COMMON_CLK_QCOM
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SC8280XP devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -492,6 +517,7 @@ config SC_GCC_8280XP
config SC_GPUCC_7180
tristate "SC7180 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the graphics clock controller on SC7180 devices.
@@ -500,6 +526,7 @@ config SC_GPUCC_7180
config SC_GPUCC_7280
tristate "SC7280 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
help
Support for the graphics clock controller on SC7280 devices.
@@ -508,6 +535,7 @@ config SC_GPUCC_7280
config SC_GPUCC_8280XP
tristate "SC8280XP Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_8280XP
help
Support for the graphics clock controller on SC8280XP devices.
@@ -516,14 +544,25 @@ config SC_GPUCC_8280XP
config SC_LPASSCC_7280
tristate "SC7280 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
help
Support for the LPASS clock controller on SC7280 devices.
Say Y if you want to use the LPASS branch clocks of the LPASS clock
controller to reset the LPASS subsystem.
+config SC_LPASSCC_8280XP
+ tristate "SC8280 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SC_GCC_8280XP
+ help
+ Support for the LPASS clock controller on SC8280XP devices.
+ Say Y if you want to use the LPASS branch clocks of the LPASS clock
+ controller to reset the LPASS subsystem.
+
config SC_LPASS_CORECC_7180
tristate "SC7180 LPASS Core Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the LPASS(Low Power Audio Subsystem) core clock controller
@@ -533,6 +572,7 @@ config SC_LPASS_CORECC_7180
config SC_LPASS_CORECC_7280
tristate "SC7280 LPASS Core & Audio Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
select QCOM_GDSC
help
@@ -543,6 +583,7 @@ config SC_LPASS_CORECC_7280
config SC_MSS_7180
tristate "SC7180 Modem Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the Modem Subsystem clock controller on Qualcomm
@@ -552,6 +593,7 @@ config SC_MSS_7180
config SC_VIDEOCC_7180
tristate "SC7180 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7180
help
Support for the video clock controller on SC7180 devices.
@@ -560,6 +602,7 @@ config SC_VIDEOCC_7180
config SC_VIDEOCC_7280
tristate "SC7280 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_7280
help
Support for the video clock controller on SC7280 devices.
@@ -568,6 +611,7 @@ config SC_VIDEOCC_7280
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_845
help
Support for the camera clock controller on SDM845 devices.
@@ -575,6 +619,7 @@ config SDM_CAMCC_845
config SDM_GCC_660
tristate "SDM660 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SDM660 devices.
@@ -583,6 +628,7 @@ config SDM_GCC_660
config SDM_MMCC_660
tristate "SDM660 Multimedia Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_660
select QCOM_GDSC
help
@@ -592,6 +638,7 @@ config SDM_MMCC_660
config SDM_GPUCC_660
tristate "SDM660 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_660
select QCOM_GDSC
help
@@ -623,6 +670,7 @@ config QDU_GCC_1000
config SDM_GCC_845
tristate "SDM845/SDM670 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SDM845 and SDM670 devices.
@@ -631,6 +679,7 @@ config SDM_GCC_845
config SDM_GPUCC_845
tristate "SDM845 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_845
help
Support for the graphics clock controller on SDM845 devices.
@@ -639,6 +688,7 @@ config SDM_GPUCC_845
config SDM_VIDEOCC_845
tristate "SDM845 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_845
select QCOM_GDSC
help
@@ -648,6 +698,7 @@ config SDM_VIDEOCC_845
config SDM_DISPCC_845
tristate "SDM845 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_845
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -657,6 +708,7 @@ config SDM_DISPCC_845
config SDM_LPASSCC_845
tristate "SDM845 Low Power Audio Subsystem (LPAAS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SDM_GCC_845
help
Support for the LPASS clock controller on SDM845 devices.
@@ -665,6 +717,7 @@ config SDM_LPASSCC_845
config SDX_GCC_55
tristate "SDX55 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SDX55 devices.
@@ -673,14 +726,24 @@ config SDX_GCC_55
config SDX_GCC_65
tristate "SDX65 Global Clock Controller"
+ depends on ARM || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SDX65 devices.
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SDX_GCC_75
+ tristate "SDX75 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX75 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/eMMC, PCIe etc.
+
config SM_CAMCC_6350
tristate "SM6350 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_6350
help
Support for the camera clock controller on SM6350 devices.
@@ -688,6 +751,7 @@ config SM_CAMCC_6350
config SM_CAMCC_8250
tristate "SM8250 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8250
help
Support for the camera clock controller on SM8250 devices.
@@ -695,6 +759,7 @@ config SM_CAMCC_8250
config SM_CAMCC_8450
tristate "SM8450 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8450
help
Support for the camera clock controller on SM8450 devices.
@@ -702,6 +767,7 @@ config SM_CAMCC_8450
config SM_DISPCC_6115
tristate "SM6115 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_6115
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -711,6 +777,7 @@ config SM_DISPCC_6115
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_6125
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -720,6 +787,7 @@ config SM_DISPCC_6125
config SM_DISPCC_8250
tristate "SM8150/SM8250/SM8350 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_8150 || SM_GCC_8250 || SM_GCC_8350
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -729,6 +797,7 @@ config SM_DISPCC_8250
config SM_DISPCC_6350
tristate "SM6350 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_6350
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -738,6 +807,7 @@ config SM_DISPCC_6350
config SM_DISPCC_6375
tristate "SM6375 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_6375
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -747,6 +817,7 @@ config SM_DISPCC_6375
config SM_DISPCC_8450
tristate "SM8450 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_8450
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -756,6 +827,7 @@ config SM_DISPCC_8450
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
depends on SM_GCC_8550
help
Support for the display clock controller on Qualcomm Technologies, Inc
@@ -765,6 +837,7 @@ config SM_DISPCC_8550
config SM_GCC_6115
tristate "SM6115 and SM4250 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM6115 and SM4250 devices.
@@ -773,6 +846,7 @@ config SM_GCC_6115
config SM_GCC_6125
tristate "SM6125 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SM6125 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -780,6 +854,7 @@ config SM_GCC_6125
config SM_GCC_6350
tristate "SM6350 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM6350 devices.
@@ -788,6 +863,7 @@ config SM_GCC_6350
config SM_GCC_6375
tristate "SM6375 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM6375 devices.
@@ -804,6 +880,7 @@ config SM_GCC_7150
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
help
Support for the global clock controller on SM8150 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -811,6 +888,7 @@ config SM_GCC_8150
config SM_GCC_8250
tristate "SM8250 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM8250 devices.
@@ -819,6 +897,7 @@ config SM_GCC_8250
config SM_GCC_8350
tristate "SM8350 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM8350 devices.
@@ -827,6 +906,7 @@ config SM_GCC_8350
config SM_GCC_8450
tristate "SM8450 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM8450 devices.
@@ -835,6 +915,7 @@ config SM_GCC_8450
config SM_GCC_8550
tristate "SM8550 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the global clock controller on SM8550 devices.
@@ -870,6 +951,7 @@ config SM_GPUCC_6375
config SM_GPUCC_6350
tristate "SM6350 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_6350
help
Support for the graphics clock controller on SM6350 devices.
@@ -878,6 +960,7 @@ config SM_GPUCC_6350
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8150
help
Support for the graphics clock controller on SM8150 devices.
@@ -886,6 +969,7 @@ config SM_GPUCC_8150
config SM_GPUCC_8250
tristate "SM8250 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8250
help
Support for the graphics clock controller on SM8250 devices.
@@ -894,14 +978,32 @@ config SM_GPUCC_8250
config SM_GPUCC_8350
tristate "SM8350 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8350
help
Support for the graphics clock controller on SM8350 devices.
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_8450
+ tristate "SM8450 Graphics Clock Controller"
+ select SM_GCC_8450
+ help
+ Support for the graphics clock controller on SM8450 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config SM_GPUCC_8550
+ tristate "SM8550 Graphics Clock Controller"
+ select SM_GCC_8550
+ help
+ Support for the graphics clock controller on SM8550 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
Support for the TCSR clock controller on SM8550 devices.
@@ -909,6 +1011,7 @@ config SM_TCSRCC_8550
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8150
select QCOM_GDSC
help
@@ -918,6 +1021,7 @@ config SM_VIDEOCC_8150
config SM_VIDEOCC_8250
tristate "SM8250 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8250
select QCOM_GDSC
help
@@ -925,6 +1029,25 @@ config SM_VIDEOCC_8250
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SM_VIDEOCC_8350
+ tristate "SM8350 Video Clock Controller"
+ select SM_GCC_8350
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM8350 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
+config SM_VIDEOCC_8550
+ tristate "SM8550 Video Clock Controller"
+ select SM_GCC_8550
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SM8550 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
@@ -958,8 +1081,18 @@ config KRAITCC
config CLK_GFM_LPASS_SM8250
tristate "SM8250 GFM LPASS Clocks"
+ depends on ARM64 || COMPILE_TEST
help
Support for the Glitch Free Mux (GFM) Low power audio
subsystem (LPASS) clocks found on SM8250 SoCs.
+config SM_VIDEOCC_8450
+ tristate "SM8450 Video Clock Controller"
+ select SM_GCC_8450
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SM8450 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9ff4c373ad95..e6e294274c35 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
obj-$(CONFIG_SC_GPUCC_8280XP) += gpucc-sc8280xp.o
obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
+obj-$(CONFIG_SC_LPASSCC_8280XP) += lpasscc-sc8280xp.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
@@ -97,6 +98,7 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
+obj-$(CONFIG_SDX_GCC_75) += gcc-sdx75.o
obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
@@ -124,9 +126,14 @@ obj-$(CONFIG_SM_GPUCC_6375) += gpucc-sm6375.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
+obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
+obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
+obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
+obj-$(CONFIG_SM_VIDEOCC_8450) += videocc-sm8450.o
+obj-$(CONFIG_SM_VIDEOCC_8550) += videocc-sm8550.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index cf4f0d340cbf..ce28d882ee78 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -111,6 +111,18 @@ static const struct alpha_pll_config ipq8074_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+static const struct alpha_pll_config ipq9574_pll_config = {
+ .l = 0x3b,
+ .config_ctl_val = 0x200d4828,
+ .config_ctl_hi_val = 0x6,
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .aux_output_mask = BIT(1),
+ .main_output_mask = BIT(0),
+ .test_ctl_val = 0x0,
+ .test_ctl_hi_val = 0x4000,
+};
+
struct apss_pll_data {
int pll_type;
struct clk_alpha_pll *pll;
@@ -135,6 +147,12 @@ static struct apss_pll_data ipq6018_pll_data = {
.pll_config = &ipq6018_pll_config,
};
+static struct apss_pll_data ipq9574_pll_data = {
+ .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
+ .pll = &ipq_pll_huayra,
+ .pll_config = &ipq9574_pll_config,
+};
+
static const struct regmap_config ipq_pll_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -180,6 +198,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
{ .compatible = "qcom,ipq5332-a53pll", .data = &ipq5332_pll_data },
{ .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_data },
{ .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_data },
+ { .compatible = "qcom,ipq9574-a73pll", .data = &ipq9574_pll_data },
{ }
};
MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index e2b4804695f3..8a4ba7a19ed1 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -1480,12 +1480,21 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
.name = "bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
.flags = HW_CTRL,
};
@@ -1495,6 +1504,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ife_1_gdsc = {
@@ -1503,6 +1513,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ipe_0_gdsc = {
@@ -1512,15 +1523,9 @@ static struct gdsc ipe_0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
+ .parent = &titan_top_gdsc.pd,
};
-static struct gdsc titan_top_gdsc = {
- .gdscr = 0xb134,
- .pd = {
- .name = "titan_top_gdsc",
- },
- .pwrsts = PWRSTS_OFF_ON,
-};
static struct clk_hw *cam_cc_sc7180_hws[] = {
[CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index b9f6535a7ba7..e4ef645f65d1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -55,6 +55,7 @@
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
+#define PLL_TEST_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U2])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -383,10 +384,21 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
- clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
- config->test_ctl_val);
- clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
- config->test_ctl_hi_val);
+ if (config->test_ctl_mask)
+ regmap_update_bits(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_mask,
+ config->test_ctl_val);
+ else
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+
+ if (config->test_ctl_hi_mask)
+ regmap_update_bits(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_mask,
+ config->test_ctl_hi_val);
+ else
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
if (pll->flags & SUPPORTS_FSM_MODE)
qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
@@ -2096,6 +2108,7 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U2(pll), config->test_ctl_hi2_val);
/* Disable PLL output */
regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index d07b17186b90..e4bd863027ab 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -123,8 +123,11 @@ struct alpha_pll_config {
u32 user_ctl_hi_val;
u32 user_ctl_hi1_val;
u32 test_ctl_val;
+ u32 test_ctl_mask;
u32 test_ctl_hi_val;
+ u32 test_ctl_hi_mask;
u32 test_ctl_hi1_val;
+ u32 test_ctl_hi2_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index ca896ebf7e1b..fc4735f74f0f 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -43,6 +43,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
{
u32 val;
u32 mask;
+ bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
mask = CBCR_NOC_FSM_STATUS;
mask |= CBCR_CLK_OFF;
@@ -51,11 +52,10 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
if (enabling) {
val &= mask;
- return (val & CBCR_CLK_OFF) == 0 ||
+ return (val & CBCR_CLK_OFF) == (invert ? CBCR_CLK_OFF : 0) ||
FIELD_GET(CBCR_NOC_FSM_STATUS, val) == FSM_STATUS_ON;
- } else {
- return val & CBCR_CLK_OFF;
}
+ return (val & CBCR_CLK_OFF) == (invert ? 0 : CBCR_CLK_OFF);
}
static int clk_branch_wait(const struct clk_branch *br, bool enabling,
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index cfd567636f4e..1e23b734abb3 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -5,11 +5,15 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/interconnect-clk.h>
+#include <linux/interconnect-provider.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <dt-bindings/interconnect/qcom,msm8996-cbf.h>
+
#include "clk-alpha-pll.h"
#include "clk-regmap.h"
@@ -223,6 +227,49 @@ static const struct regmap_config cbf_msm8996_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+#ifdef CONFIG_INTERCONNECT
+
+/* Random ID that doesn't clash with main qnoc and OSM */
+#define CBF_MASTER_NODE 2000
+
+static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
+ const struct icc_clk_data data[] = {
+ { .clk = clk, .name = "cbf", },
+ };
+ struct icc_provider *provider;
+
+ provider = icc_clk_register(dev, CBF_MASTER_NODE, ARRAY_SIZE(data), data);
+ if (IS_ERR(provider))
+ return PTR_ERR(provider);
+
+ platform_set_drvdata(pdev, provider);
+
+ return 0;
+}
+
+static int qcom_msm8996_cbf_icc_remove(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+
+ icc_clk_unregister(provider);
+
+ return 0;
+}
+#define qcom_msm8996_cbf_icc_sync_state icc_sync_state
+#else
+static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
+{
+ dev_warn(&pdev->dev, "CONFIG_INTERCONNECT is disabled, CBF clock is fixed\n");
+
+ return 0;
+}
+#define qcom_msm8996_cbf_icc_remove(pdev) (0)
+#define qcom_msm8996_cbf_icc_sync_state NULL
+#endif
+
static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -281,7 +328,16 @@ static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
if (ret)
return ret;
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
+ if (ret)
+ return ret;
+
+ return qcom_msm8996_cbf_icc_register(pdev, &cbf_mux.clkr.hw);
+}
+
+static int qcom_msm8996_cbf_remove(struct platform_device *pdev)
+{
+ return qcom_msm8996_cbf_icc_remove(pdev);
}
static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
@@ -292,9 +348,11 @@ MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
static struct platform_driver qcom_msm8996_cbf_driver = {
.probe = qcom_msm8996_cbf_probe,
+ .remove = qcom_msm8996_cbf_remove,
.driver = {
.name = "qcom-msm8996-cbf",
.of_match_table = qcom_msm8996_cbf_match_table,
+ .sync_state = qcom_msm8996_cbf_icc_sync_state,
},
};
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 01581f4d2c39..e6d84c8c7989 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -141,6 +141,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @clkr: regmap clock handle
* @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
* @parked_cfg: cached value of the CFG register for parked RCGs
+ * @hw_clk_ctrl: whether to enable hardware clock control
*/
struct clk_rcg2 {
u32 cmd_rcgr;
@@ -152,6 +153,7 @@ struct clk_rcg2 {
struct clk_regmap clkr;
u8 cfg_off;
u32 parked_cfg;
+ bool hw_clk_ctrl;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 76551534f10d..e22baf3a7112 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -325,6 +325,8 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
+ if (rcg->hw_clk_ctrl)
+ cfg |= CFG_HW_CLK_CTRL_MASK;
*_cfg &= ~mask;
*_cfg |= cfg;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 45ee370f3307..86572570bc54 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -700,6 +700,24 @@ static const struct clk_rpmh_desc clk_rpmh_qdu1000 = {
.num_clks = ARRAY_SIZE(qdu1000_rpmh_clocks),
};
+static struct clk_hw *sdx75_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a_ao.hw,
+ [RPMH_QPIC_CLK] = &clk_rpmh_qpic_clk.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdx75 = {
+ .clks = sdx75_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdx75_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -792,6 +810,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65},
+ { .compatible = "qcom,sdx75-rpmh-clk", .data = &clk_rpmh_sdx75},
{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 887b945a6fb7..e4de74b68797 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -67,7 +67,7 @@
type, r_id, key)
#define __DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(_prefix, _name, _active,\
- type, r_id, r, key) \
+ type, r_id, r, key, ao_flags) \
static struct clk_smd_rpm clk_smd_rpm_##_prefix##_active; \
static struct clk_smd_rpm clk_smd_rpm_##_prefix##_name = { \
.rpm_res_type = (type), \
@@ -102,12 +102,13 @@
.name = "xo_board", \
}, \
.num_parents = 1, \
+ .flags = (ao_flags), \
}, \
}
#define __DEFINE_CLK_SMD_RPM_BRANCH(_name, _active, type, r_id, r, key) \
__DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(/* empty */, \
- _name, _active, type, r_id, r, key)
+ _name, _active, type, r_id, r, key, 0)
#define DEFINE_CLK_SMD_RPM(_name, type, r_id) \
__DEFINE_CLK_SMD_RPM(_name##_clk, _name##_a_clk, \
@@ -126,12 +127,12 @@
#define DEFINE_CLK_SMD_RPM_BRANCH(_name, type, r_id, r) \
__DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(branch_, \
_name##_clk, _name##_a_clk, \
- type, r_id, r, QCOM_RPM_SMD_KEY_ENABLE)
+ type, r_id, r, QCOM_RPM_SMD_KEY_ENABLE, 0)
-#define DEFINE_CLK_SMD_RPM_BRANCH_A(_name, type, r_id, r) \
+#define DEFINE_CLK_SMD_RPM_BRANCH_A(_name, type, r_id, r, ao_flags) \
__DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(branch_, \
_name, _name##_a, type, \
- r_id, r, QCOM_RPM_SMD_KEY_ENABLE)
+ r_id, r, QCOM_RPM_SMD_KEY_ENABLE, ao_flags)
#define DEFINE_CLK_SMD_RPM_QDSS(_name, type, r_id) \
__DEFINE_CLK_SMD_RPM(_name##_clk, _name##_a_clk, \
@@ -146,7 +147,7 @@
__DEFINE_CLK_SMD_RPM_BRANCH_PREFIX(_prefix, \
_name, _name##_a, \
QCOM_SMD_RPM_CLK_BUF_A, r_id, r, \
- QCOM_RPM_KEY_SOFTWARE_ENABLE)
+ QCOM_RPM_KEY_SOFTWARE_ENABLE, 0)
#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_name, r_id, r) \
DEFINE_CLK_SMD_RPM_XO_BUFFER(_name, r_id, r); \
@@ -156,6 +157,8 @@
#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+static struct qcom_smd_rpm *rpmcc_smd_rpm;
+
struct clk_smd_rpm {
const int rpm_res_type;
const int rpm_key;
@@ -166,7 +169,6 @@ struct clk_smd_rpm {
struct clk_smd_rpm *peer;
struct clk_hw hw;
unsigned long rate;
- struct qcom_smd_rpm *rpm;
};
struct clk_smd_rpm_req {
@@ -178,6 +180,7 @@ struct clk_smd_rpm_req {
struct rpm_smd_clk_desc {
struct clk_smd_rpm **clks;
size_t num_clks;
+ bool scaling_before_handover;
};
static DEFINE_MUTEX(rpm_smd_clk_lock);
@@ -191,12 +194,12 @@ static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
.value = cpu_to_le32(r->branch ? 1 : INT_MAX),
};
- ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ ret = qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
if (ret)
return ret;
- ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ ret = qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
if (ret)
@@ -214,7 +217,7 @@ static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ return qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
}
@@ -228,7 +231,7 @@ static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ return qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
}
@@ -395,7 +398,7 @@ static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
-static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+static int clk_smd_rpm_enable_scaling(void)
{
int ret;
struct clk_smd_rpm_req req = {
@@ -404,7 +407,7 @@ static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
.value = cpu_to_le32(1),
};
- ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ ret = qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_SLEEP_STATE,
QCOM_SMD_RPM_MISC_CLK,
QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
if (ret) {
@@ -412,7 +415,7 @@ static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
return ret;
}
- ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ ret = qcom_rpm_smd_write(rpmcc_smd_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
QCOM_SMD_RPM_MISC_CLK,
QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
if (ret) {
@@ -438,10 +441,11 @@ static const struct clk_ops clk_smd_rpm_branch_ops = {
.recalc_rate = clk_smd_rpm_recalc_rate,
};
-DEFINE_CLK_SMD_RPM_BRANCH_A(bi_tcxo, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+/* Disabling BI_TCXO_AO could gate the root clock source of the entire system. */
+DEFINE_CLK_SMD_RPM_BRANCH_A(bi_tcxo, QCOM_SMD_RPM_MISC_CLK, 0, 19200000, CLK_IS_CRITICAL);
DEFINE_CLK_SMD_RPM_BRANCH(qdss, QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
DEFINE_CLK_SMD_RPM_QDSS(qdss, QCOM_SMD_RPM_MISC_CLK, 1);
-DEFINE_CLK_SMD_RPM_BRANCH_A(bimc_freq_log, QCOM_SMD_RPM_MISC_CLK, 4, 1);
+DEFINE_CLK_SMD_RPM_BRANCH_A(bimc_freq_log, QCOM_SMD_RPM_MISC_CLK, 4, 1, 0);
DEFINE_CLK_SMD_RPM_BRANCH(mss_cfg_ahb, QCOM_SMD_RPM_MCFG_CLK, 0, 19200000);
@@ -693,6 +697,7 @@ static struct clk_smd_rpm *msm8974_clks[] = {
static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.clks = msm8974_clks,
.num_clks = ARRAY_SIZE(msm8974_clks),
+ .scaling_before_handover = true,
};
static struct clk_smd_rpm *msm8976_clks[] = {
@@ -1301,12 +1306,11 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
{
int ret;
size_t num_clks, i;
- struct qcom_smd_rpm *rpm;
struct clk_smd_rpm **rpm_smd_clks;
const struct rpm_smd_clk_desc *desc;
- rpm = dev_get_drvdata(pdev->dev.parent);
- if (!rpm) {
+ rpmcc_smd_rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpmcc_smd_rpm) {
dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
return -ENODEV;
}
@@ -1318,20 +1322,26 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
rpm_smd_clks = desc->clks;
num_clks = desc->num_clks;
+ if (desc->scaling_before_handover) {
+ ret = clk_smd_rpm_enable_scaling();
+ if (ret)
+ goto err;
+ }
+
for (i = 0; i < num_clks; i++) {
if (!rpm_smd_clks[i])
continue;
- rpm_smd_clks[i]->rpm = rpm;
-
ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
if (ret)
goto err;
}
- ret = clk_smd_rpm_enable_scaling(rpm);
- if (ret)
- goto err;
+ if (!desc->scaling_before_handover) {
+ ret = clk_smd_rpm_enable_scaling();
+ if (ret)
+ goto err;
+ }
for (i = 0; i < num_clks; i++) {
if (!rpm_smd_clks[i])
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index e9cfe41c0442..44dd5cfcc150 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -24,9 +24,11 @@
enum {
P_BI_TCXO,
+ P_BI_TCXO_AO,
P_DISP_CC_PLL0_OUT_MAIN,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_DIV,
P_GPLL0_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -82,8 +84,8 @@ static const struct clk_parent_data disp_cc_parent_data_1[] = {
};
static const struct parent_map disp_cc_parent_map_2[] = {
- { P_BI_TCXO, 0 },
- { P_GPLL0_OUT_MAIN, 4 },
+ { P_BI_TCXO_AO, 0 },
+ { P_GPLL0_OUT_DIV, 4 },
};
static const struct clk_parent_data disp_cc_parent_data_2[] = {
@@ -151,9 +153,9 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
};
static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
- F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_DIV, 4, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
index bdb4a0a11d07..a75ab88ed14c 100644
--- a/drivers/clk/qcom/gcc-ipq5332.c
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -20,8 +20,8 @@
#include "reset.h"
enum {
- DT_SLEEP_CLK,
DT_XO,
+ DT_SLEEP_CLK,
DT_PCIE_2LANE_PHY_PIPE_CLK,
DT_PCIE_2LANE_PHY_PIPE_CLK_X1,
DT_USB_PCIE_WRAPPER_PIPE_CLK,
@@ -366,7 +366,7 @@ static struct clk_rcg2 gcc_adss_pwm_clk_src = {
};
static const struct freq_tbl ftbl_gcc_apss_axi_clk_src[] = {
- F(480000000, P_GPLL4_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_GPLL4_OUT_AUX, 2.5, 0, 0),
F(533333333, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
{ }
};
@@ -963,7 +963,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_9,
.num_parents = ARRAY_SIZE(gcc_parent_data_9),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index 3f9c2f61a5d9..86b43175b042 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -26,8 +26,6 @@
#include "clk-regmap-mux.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_BIAS_PLL,
@@ -1654,7 +1652,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -4151,15 +4149,18 @@ static struct clk_branch gcc_dcc_clk = {
static const struct alpha_pll_config ubi32_pll_config = {
.l = 0x3e,
- .alpha = 0x57,
- .config_ctl_val = 0x240d6aa8,
- .config_ctl_hi_val = 0x3c2,
+ .alpha = 0x6667,
+ .config_ctl_val = 0x240d4828,
+ .config_ctl_hi_val = 0x6,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.pre_div_val = 0x0,
.pre_div_mask = BIT(12),
.post_div_val = 0x0,
.post_div_mask = GENMASK(9, 8),
+ .alpha_en_mask = BIT(24),
+ .test_ctl_val = 0x1C0000C0,
+ .test_ctl_hi_val = 0x4000,
};
static const struct alpha_pll_config nss_crypto_pll_config = {
@@ -4517,24 +4518,24 @@ static const struct qcom_reset_map gcc_ipq6018_resets[] = {
[GCC_PCIE0_AHB_ARES] = { 0x75040, 5 },
[GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 },
[GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 },
- [GCC_PPE_FULL_RESET] = { 0x68014, 0 },
- [GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 },
+ [GCC_PPE_FULL_RESET] = { .reg = 0x68014, .bitmask = 0xf0000 },
+ [GCC_UNIPHY0_SOFT_RESET] = { .reg = 0x56004, .bitmask = 0x3ff2 },
[GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 },
- [GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 },
+ [GCC_UNIPHY1_SOFT_RESET] = { .reg = 0x56104, .bitmask = 0x32 },
[GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 },
- [GCC_EDMA_HW_RESET] = { 0x68014, 0 },
- [GCC_NSSPORT1_RESET] = { 0x68014, 0 },
- [GCC_NSSPORT2_RESET] = { 0x68014, 0 },
- [GCC_NSSPORT3_RESET] = { 0x68014, 0 },
- [GCC_NSSPORT4_RESET] = { 0x68014, 0 },
- [GCC_NSSPORT5_RESET] = { 0x68014, 0 },
- [GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 },
- [GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 },
+ [GCC_EDMA_HW_RESET] = { .reg = 0x68014, .bitmask = 0x300000 },
+ [GCC_NSSPORT1_RESET] = { .reg = 0x68014, .bitmask = 0x1000003 },
+ [GCC_NSSPORT2_RESET] = { .reg = 0x68014, .bitmask = 0x200000c },
+ [GCC_NSSPORT3_RESET] = { .reg = 0x68014, .bitmask = 0x4000030 },
+ [GCC_NSSPORT4_RESET] = { .reg = 0x68014, .bitmask = 0x8000300 },
+ [GCC_NSSPORT5_RESET] = { .reg = 0x68014, .bitmask = 0x10000c00 },
+ [GCC_UNIPHY0_PORT1_ARES] = { .reg = 0x56004, .bitmask = 0x30 },
+ [GCC_UNIPHY0_PORT2_ARES] = { .reg = 0x56004, .bitmask = 0xc0 },
+ [GCC_UNIPHY0_PORT3_ARES] = { .reg = 0x56004, .bitmask = 0x300 },
+ [GCC_UNIPHY0_PORT4_ARES] = { .reg = 0x56004, .bitmask = 0xc00 },
+ [GCC_UNIPHY0_PORT5_ARES] = { .reg = 0x56004, .bitmask = 0x3000 },
+ [GCC_UNIPHY0_PORT_4_5_RESET] = { .reg = 0x56004, .bitmask = 0x3c02 },
+ [GCC_UNIPHY0_PORT_4_RESET] = { .reg = 0x56004, .bitmask = 0xc02 },
[GCC_LPASS_BCR] = {0x1F000, 0},
[GCC_UBI32_TBU_BCR] = {0x65000, 0},
[GCC_LPASS_TBU_BCR] = {0x6C000, 0},
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index b2a2d618a5ec..6914f962c893 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -3,24 +3,24 @@
* Copyright (c) 2023 The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,ipq9574-gcc.h>
#include <dt-bindings/reset/qcom,ipq9574-gcc.h>
-#include "clk-rcg.h"
-#include "clk-branch.h"
#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
#include "clk-regmap-phy-mux.h"
+#include "common.h"
#include "reset.h"
/* Need to match the order of clocks in DT binding */
@@ -69,7 +69,7 @@ static struct clk_alpha_pll gpll0_main = {
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gpll0_main",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -81,7 +81,7 @@ static struct clk_alpha_pll gpll0_main = {
static struct clk_fixed_factor gpll0_out_main_div2 = {
.mult = 1,
.div = 2,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gpll0_out_main_div2",
.parent_hws = (const struct clk_hw *[]) {
&gpll0_main.clkr.hw
@@ -96,7 +96,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x20000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll0",
.parent_hws = (const struct clk_hw *[]) {
&gpll0_main.clkr.hw
@@ -113,7 +113,7 @@ static struct clk_alpha_pll gpll4_main = {
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(2),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gpll4_main",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -126,7 +126,7 @@ static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x22000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll4",
.parent_hws = (const struct clk_hw *[]) {
&gpll4_main.clkr.hw
@@ -143,7 +143,7 @@ static struct clk_alpha_pll gpll2_main = {
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gpll2_main",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -156,7 +156,7 @@ static struct clk_alpha_pll_postdiv gpll2 = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll2",
.parent_hws = (const struct clk_hw *[]) {
&gpll2_main.clkr.hw
@@ -172,7 +172,7 @@ static struct clk_branch gcc_sleep_clk_src = {
.clkr = {
.enable_reg = 0x3400c,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sleep_clk_src",
.parent_data = gcc_sleep_clk_data,
.num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
@@ -420,7 +420,7 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.freq_tbl = ftbl_apss_ahb_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "apss_ahb_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -438,7 +438,7 @@ static struct clk_rcg2 apss_axi_clk_src = {
.freq_tbl = ftbl_apss_axi_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_div2_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "apss_axi_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_div2_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_div2_gpll0),
@@ -458,7 +458,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -483,7 +483,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -496,7 +496,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -510,7 +510,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -523,7 +523,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -537,7 +537,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -550,7 +550,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -564,7 +564,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -577,7 +577,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup5_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -591,7 +591,7 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup5_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -604,7 +604,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup6_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -618,7 +618,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_qup6_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -650,7 +650,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart1_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -664,7 +664,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart2_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -678,7 +678,7 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart3_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -692,7 +692,7 @@ static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart4_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -706,7 +706,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart5_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -720,7 +720,7 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "blsp1_uart6_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -728,13 +728,48 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_crypto_clk_src[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .freq_tbl = ftbl_gcc_crypto_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_apss_ahb_clk = {
.halt_reg = 0x24018,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x0b004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_apss_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&apss_ahb_clk_src.clkr.hw
@@ -752,7 +787,7 @@ static struct clk_branch gcc_apss_axi_clk = {
.clkr = {
.enable_reg = 0x0b004,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_apss_axi_clk",
.parent_hws = (const struct clk_hw *[]) {
&apss_axi_clk_src.clkr.hw
@@ -769,7 +804,7 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x2024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup1_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup1_i2c_apps_clk_src.clkr.hw
@@ -786,7 +821,7 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.clkr = {
.enable_reg = 0x02020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup1_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup1_spi_apps_clk_src.clkr.hw
@@ -803,7 +838,7 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x03024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup2_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup2_i2c_apps_clk_src.clkr.hw
@@ -820,7 +855,7 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.clkr = {
.enable_reg = 0x03020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup2_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup2_spi_apps_clk_src.clkr.hw
@@ -837,7 +872,7 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x04024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup3_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup3_i2c_apps_clk_src.clkr.hw
@@ -854,7 +889,7 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.clkr = {
.enable_reg = 0x04020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup3_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup3_spi_apps_clk_src.clkr.hw
@@ -871,7 +906,7 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x05024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup4_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup4_i2c_apps_clk_src.clkr.hw
@@ -888,7 +923,7 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.clkr = {
.enable_reg = 0x05020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup4_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup4_spi_apps_clk_src.clkr.hw
@@ -905,7 +940,7 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x06024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup5_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup5_i2c_apps_clk_src.clkr.hw
@@ -922,7 +957,7 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.clkr = {
.enable_reg = 0x06020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup5_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup5_spi_apps_clk_src.clkr.hw
@@ -939,7 +974,7 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x07024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup6_i2c_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup6_i2c_apps_clk_src.clkr.hw
@@ -956,7 +991,7 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.clkr = {
.enable_reg = 0x07020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_qup6_spi_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_qup6_spi_apps_clk_src.clkr.hw
@@ -973,7 +1008,7 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.clkr = {
.enable_reg = 0x02040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart1_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart1_apps_clk_src.clkr.hw
@@ -990,7 +1025,7 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.clkr = {
.enable_reg = 0x03040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart2_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart2_apps_clk_src.clkr.hw
@@ -1007,7 +1042,7 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.clkr = {
.enable_reg = 0x04054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart3_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart3_apps_clk_src.clkr.hw
@@ -1024,7 +1059,7 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
.clkr = {
.enable_reg = 0x05040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart4_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart4_apps_clk_src.clkr.hw
@@ -1041,7 +1076,7 @@ static struct clk_branch gcc_blsp1_uart5_apps_clk = {
.clkr = {
.enable_reg = 0x06040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart5_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart5_apps_clk_src.clkr.hw
@@ -1058,7 +1093,7 @@ static struct clk_branch gcc_blsp1_uart6_apps_clk = {
.clkr = {
.enable_reg = 0x07040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_uart6_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&blsp1_uart6_apps_clk_src.clkr.hw
@@ -1080,7 +1115,7 @@ static struct clk_rcg2 pcie0_axi_m_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie0_axi_m_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1093,7 +1128,7 @@ static struct clk_branch gcc_pcie0_axi_m_clk = {
.clkr = {
.enable_reg = 0x28038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_axi_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_axi_m_clk_src.clkr.hw
@@ -1110,7 +1145,7 @@ static struct clk_branch gcc_anoc_pcie0_1lane_m_clk = {
.clkr = {
.enable_reg = 0x2e07c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_pcie0_1lane_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_axi_m_clk_src.clkr.hw
@@ -1127,7 +1162,7 @@ static struct clk_rcg2 pcie1_axi_m_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie1_axi_m_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1140,7 +1175,7 @@ static struct clk_branch gcc_pcie1_axi_m_clk = {
.clkr = {
.enable_reg = 0x29038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_axi_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_axi_m_clk_src.clkr.hw
@@ -1157,7 +1192,7 @@ static struct clk_branch gcc_anoc_pcie1_1lane_m_clk = {
.clkr = {
.enable_reg = 0x2e08c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_pcie1_1lane_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_axi_m_clk_src.clkr.hw
@@ -1179,7 +1214,7 @@ static struct clk_rcg2 pcie2_axi_m_clk_src = {
.freq_tbl = ftbl_pcie2_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie2_axi_m_clk_src",
.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -1192,7 +1227,7 @@ static struct clk_branch gcc_pcie2_axi_m_clk = {
.clkr = {
.enable_reg = 0x2a038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_axi_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_axi_m_clk_src.clkr.hw
@@ -1209,7 +1244,7 @@ static struct clk_branch gcc_anoc_pcie2_2lane_m_clk = {
.clkr = {
.enable_reg = 0x2e080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_pcie2_2lane_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_axi_m_clk_src.clkr.hw
@@ -1226,7 +1261,7 @@ static struct clk_rcg2 pcie3_axi_m_clk_src = {
.freq_tbl = ftbl_pcie2_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie3_axi_m_clk_src",
.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -1239,7 +1274,7 @@ static struct clk_branch gcc_pcie3_axi_m_clk = {
.clkr = {
.enable_reg = 0x2b038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_axi_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_axi_m_clk_src.clkr.hw
@@ -1256,7 +1291,7 @@ static struct clk_branch gcc_anoc_pcie3_2lane_m_clk = {
.clkr = {
.enable_reg = 0x2e090,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_pcie3_2lane_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_axi_m_clk_src.clkr.hw
@@ -1273,7 +1308,7 @@ static struct clk_rcg2 pcie0_axi_s_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie0_axi_s_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1286,7 +1321,7 @@ static struct clk_branch gcc_pcie0_axi_s_clk = {
.clkr = {
.enable_reg = 0x2803c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_axi_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_axi_s_clk_src.clkr.hw
@@ -1303,7 +1338,7 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
.clkr = {
.enable_reg = 0x28040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_axi_s_bridge_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_axi_s_clk_src.clkr.hw
@@ -1320,7 +1355,7 @@ static struct clk_branch gcc_snoc_pcie0_1lane_s_clk = {
.clkr = {
.enable_reg = 0x2e048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_snoc_pcie0_1lane_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_axi_s_clk_src.clkr.hw
@@ -1337,7 +1372,7 @@ static struct clk_rcg2 pcie1_axi_s_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie1_axi_s_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1350,7 +1385,7 @@ static struct clk_branch gcc_pcie1_axi_s_clk = {
.clkr = {
.enable_reg = 0x2903c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_axi_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_axi_s_clk_src.clkr.hw
@@ -1367,7 +1402,7 @@ static struct clk_branch gcc_pcie1_axi_s_bridge_clk = {
.clkr = {
.enable_reg = 0x29040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_axi_s_bridge_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_axi_s_clk_src.clkr.hw
@@ -1384,7 +1419,7 @@ static struct clk_branch gcc_snoc_pcie1_1lane_s_clk = {
.clkr = {
.enable_reg = 0x2e04c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_snoc_pcie1_1lane_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_axi_s_clk_src.clkr.hw
@@ -1401,7 +1436,7 @@ static struct clk_rcg2 pcie2_axi_s_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie2_axi_s_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1414,7 +1449,7 @@ static struct clk_branch gcc_pcie2_axi_s_clk = {
.clkr = {
.enable_reg = 0x2a03c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_axi_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_axi_s_clk_src.clkr.hw
@@ -1431,7 +1466,7 @@ static struct clk_branch gcc_pcie2_axi_s_bridge_clk = {
.clkr = {
.enable_reg = 0x2a040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_axi_s_bridge_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_axi_s_clk_src.clkr.hw
@@ -1448,7 +1483,7 @@ static struct clk_branch gcc_snoc_pcie2_2lane_s_clk = {
.clkr = {
.enable_reg = 0x2e050,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_snoc_pcie2_2lane_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_axi_s_clk_src.clkr.hw
@@ -1465,7 +1500,7 @@ static struct clk_rcg2 pcie3_axi_s_clk_src = {
.freq_tbl = ftbl_pcie0_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie3_axi_s_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1478,7 +1513,7 @@ static struct clk_branch gcc_pcie3_axi_s_clk = {
.clkr = {
.enable_reg = 0x2b03c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_axi_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_axi_s_clk_src.clkr.hw
@@ -1495,7 +1530,7 @@ static struct clk_branch gcc_pcie3_axi_s_bridge_clk = {
.clkr = {
.enable_reg = 0x2b040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_axi_s_bridge_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_axi_s_clk_src.clkr.hw
@@ -1512,7 +1547,7 @@ static struct clk_branch gcc_snoc_pcie3_2lane_s_clk = {
.clkr = {
.enable_reg = 0x2e054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_snoc_pcie3_2lane_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_axi_s_clk_src.clkr.hw
@@ -1527,7 +1562,7 @@ static struct clk_branch gcc_snoc_pcie3_2lane_s_clk = {
static struct clk_regmap_phy_mux pcie0_pipe_clk_src = {
.reg = 0x28064,
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "pcie0_pipe_clk_src",
.parent_data = &(const struct clk_parent_data) {
.index = DT_PCIE30_PHY0_PIPE_CLK,
@@ -1541,7 +1576,7 @@ static struct clk_regmap_phy_mux pcie0_pipe_clk_src = {
static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
.reg = 0x29064,
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "pcie1_pipe_clk_src",
.parent_data = &(const struct clk_parent_data) {
.index = DT_PCIE30_PHY1_PIPE_CLK,
@@ -1555,7 +1590,7 @@ static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
.reg = 0x2a064,
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "pcie2_pipe_clk_src",
.parent_data = &(const struct clk_parent_data) {
.index = DT_PCIE30_PHY2_PIPE_CLK,
@@ -1569,7 +1604,7 @@ static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
.reg = 0x2b064,
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "pcie3_pipe_clk_src",
.parent_data = &(const struct clk_parent_data) {
.index = DT_PCIE30_PHY3_PIPE_CLK,
@@ -1591,7 +1626,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = {
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie0_rchng_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1604,7 +1639,7 @@ static struct clk_branch gcc_pcie0_rchng_clk = {
.clkr = {
.enable_reg = 0x28028,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_rchng_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie0_rchng_clk_src.clkr.hw
@@ -1622,7 +1657,7 @@ static struct clk_rcg2 pcie1_rchng_clk_src = {
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie1_rchng_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1635,7 +1670,7 @@ static struct clk_branch gcc_pcie1_rchng_clk = {
.clkr = {
.enable_reg = 0x29028,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_rchng_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie1_rchng_clk_src.clkr.hw
@@ -1652,7 +1687,7 @@ static struct clk_rcg2 pcie2_rchng_clk_src = {
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie2_rchng_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1665,7 +1700,7 @@ static struct clk_branch gcc_pcie2_rchng_clk = {
.clkr = {
.enable_reg = 0x2a028,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_rchng_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie2_rchng_clk_src.clkr.hw
@@ -1682,7 +1717,7 @@ static struct clk_rcg2 pcie3_rchng_clk_src = {
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie3_rchng_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1695,7 +1730,7 @@ static struct clk_branch gcc_pcie3_rchng_clk = {
.clkr = {
.enable_reg = 0x2b028,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_rchng_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie3_rchng_clk_src.clkr.hw
@@ -1718,7 +1753,7 @@ static struct clk_rcg2 pcie_aux_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcie_aux_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk),
@@ -1731,7 +1766,7 @@ static struct clk_branch gcc_pcie0_aux_clk = {
.clkr = {
.enable_reg = 0x28034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_aux_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie_aux_clk_src.clkr.hw
@@ -1748,7 +1783,7 @@ static struct clk_branch gcc_pcie1_aux_clk = {
.clkr = {
.enable_reg = 0x29034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_aux_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie_aux_clk_src.clkr.hw
@@ -1765,7 +1800,7 @@ static struct clk_branch gcc_pcie2_aux_clk = {
.clkr = {
.enable_reg = 0x2a034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_aux_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie_aux_clk_src.clkr.hw
@@ -1782,7 +1817,7 @@ static struct clk_branch gcc_pcie3_aux_clk = {
.clkr = {
.enable_reg = 0x2b034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_aux_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcie_aux_clk_src.clkr.hw
@@ -1805,7 +1840,7 @@ static struct clk_rcg2 usb0_aux_clk_src = {
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "usb0_aux_clk_src",
.parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_core_pi_sleep_clk),
@@ -1818,7 +1853,7 @@ static struct clk_branch gcc_usb0_aux_clk = {
.clkr = {
.enable_reg = 0x2c048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_usb0_aux_clk",
.parent_hws = (const struct clk_hw *[]) {
&usb0_aux_clk_src.clkr.hw
@@ -1842,7 +1877,7 @@ static struct clk_rcg2 usb0_master_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "usb0_master_clk_src",
.parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_out_main_div2_gpll0),
@@ -1855,7 +1890,7 @@ static struct clk_branch gcc_usb0_master_clk = {
.clkr = {
.enable_reg = 0x2c044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_usb0_master_clk",
.parent_hws = (const struct clk_hw *[]) {
&usb0_master_clk_src.clkr.hw
@@ -1872,7 +1907,7 @@ static struct clk_branch gcc_snoc_usb_clk = {
.clkr = {
.enable_reg = 0x2e058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_snoc_usb_clk",
.parent_hws = (const struct clk_hw *[]) {
&usb0_master_clk_src.clkr.hw
@@ -1889,7 +1924,7 @@ static struct clk_branch gcc_anoc_usb_axi_clk = {
.clkr = {
.enable_reg = 0x2e084,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_usb_axi_clk",
.parent_hws = (const struct clk_hw *[]) {
&usb0_master_clk_src.clkr.hw
@@ -1913,7 +1948,7 @@ static struct clk_rcg2 usb0_mock_utmi_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll4_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "usb0_mock_utmi_clk_src",
.parent_data = gcc_xo_gpll4_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_out_main_div2),
@@ -1925,7 +1960,7 @@ static struct clk_regmap_div usb0_mock_utmi_div_clk_src = {
.reg = 0x2c040,
.shift = 0,
.width = 2,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "usb0_mock_utmi_div_clk_src",
.parent_data = &(const struct clk_parent_data) {
.hw = &usb0_mock_utmi_clk_src.clkr.hw,
@@ -1941,7 +1976,7 @@ static struct clk_branch gcc_usb0_mock_utmi_clk = {
.clkr = {
.enable_reg = 0x2c04c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_usb0_mock_utmi_clk",
.parent_hws = (const struct clk_hw *[]) {
&usb0_mock_utmi_div_clk_src.clkr.hw
@@ -1959,7 +1994,7 @@ static struct clk_regmap_mux usb0_pipe_clk_src = {
.width = 2,
.parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "usb0_pipe_clk_src",
.parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
.num_parents = ARRAY_SIZE(gcc_usb3phy_0_cc_pipe_clk_xo),
@@ -1988,7 +2023,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll0_out_main_div2),
@@ -2001,7 +2036,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.clkr = {
.enable_reg = 0x3302c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sdcc1_apps_clk",
.parent_hws = (const struct clk_hw *[]) {
&sdcc1_apps_clk_src.clkr.hw
@@ -2024,7 +2059,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_gpll0_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "sdcc1_ice_core_clk_src",
.parent_data = gcc_xo_gpll0_gpll4_gpll0_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_div2),
@@ -2037,7 +2072,7 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
.clkr = {
.enable_reg = 0x33030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sdcc1_ice_core_clk",
.parent_hws = (const struct clk_hw *[]) {
&sdcc1_ice_core_clk_src.clkr.hw
@@ -2062,7 +2097,7 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
.freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "pcnoc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -2071,12 +2106,44 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
},
};
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16010,
+ .clkr = {
+ .enable_reg = 0x16010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16014,
+ .clkr = {
+ .enable_reg = 0x16014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_nsscfg_clk = {
.halt_reg = 0x1702c,
.clkr = {
.enable_reg = 0x1702c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nsscfg_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2093,7 +2160,7 @@ static struct clk_branch gcc_nssnoc_nsscc_clk = {
.clkr = {
.enable_reg = 0x17030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_nsscc_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2110,7 +2177,7 @@ static struct clk_branch gcc_nsscc_clk = {
.clkr = {
.enable_reg = 0x17034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nsscc_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2127,7 +2194,7 @@ static struct clk_branch gcc_nssnoc_pcnoc_1_clk = {
.clkr = {
.enable_reg = 0x17080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_pcnoc_1_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2144,7 +2211,7 @@ static struct clk_branch gcc_qdss_dap_ahb_clk = {
.clkr = {
.enable_reg = 0x2d064,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_dap_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2161,7 +2228,7 @@ static struct clk_branch gcc_qdss_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x2d068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_cfg_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2178,7 +2245,7 @@ static struct clk_branch gcc_qpic_ahb_clk = {
.clkr = {
.enable_reg = 0x32010,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qpic_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2195,7 +2262,7 @@ static struct clk_branch gcc_qpic_clk = {
.clkr = {
.enable_reg = 0x32014,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qpic_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2213,7 +2280,7 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.clkr = {
.enable_reg = 0x0b004,
.enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_blsp1_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2230,7 +2297,7 @@ static struct clk_branch gcc_mdio_ahb_clk = {
.clkr = {
.enable_reg = 0x17040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_mdio_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2248,7 +2315,7 @@ static struct clk_branch gcc_prng_ahb_clk = {
.clkr = {
.enable_reg = 0x0b004,
.enable_mask = BIT(10),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_prng_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2265,7 +2332,7 @@ static struct clk_branch gcc_uniphy0_ahb_clk = {
.clkr = {
.enable_reg = 0x1704c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy0_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2282,7 +2349,7 @@ static struct clk_branch gcc_uniphy1_ahb_clk = {
.clkr = {
.enable_reg = 0x1705c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy1_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2299,7 +2366,7 @@ static struct clk_branch gcc_uniphy2_ahb_clk = {
.clkr = {
.enable_reg = 0x1706c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy2_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2316,7 +2383,7 @@ static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
.clkr = {
.enable_reg = 0x3a004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_cmn_12gpll_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2333,7 +2400,7 @@ static struct clk_branch gcc_cmn_12gpll_apu_clk = {
.clkr = {
.enable_reg = 0x3a00c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_cmn_12gpll_apu_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2350,7 +2417,7 @@ static struct clk_branch gcc_pcie0_ahb_clk = {
.clkr = {
.enable_reg = 0x28030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie0_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2367,7 +2434,7 @@ static struct clk_branch gcc_pcie1_ahb_clk = {
.clkr = {
.enable_reg = 0x29030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie1_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2384,7 +2451,7 @@ static struct clk_branch gcc_pcie2_ahb_clk = {
.clkr = {
.enable_reg = 0x2a030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie2_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2401,7 +2468,7 @@ static struct clk_branch gcc_pcie3_ahb_clk = {
.clkr = {
.enable_reg = 0x2b030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie3_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2418,7 +2485,7 @@ static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x2c05c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_usb0_phy_cfg_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2435,7 +2502,7 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.clkr = {
.enable_reg = 0x33034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sdcc1_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2460,7 +2527,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
.freq_tbl = ftbl_system_noc_bfdcd_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "system_noc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll4,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -2475,7 +2542,7 @@ static struct clk_branch gcc_q6ss_boot_clk = {
.clkr = {
.enable_reg = 0x25080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6ss_boot_clk",
.parent_hws = (const struct clk_hw *[]) {
&system_noc_bfdcd_clk_src.clkr.hw
@@ -2492,7 +2559,7 @@ static struct clk_branch gcc_nssnoc_snoc_clk = {
.clkr = {
.enable_reg = 0x17028,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_snoc_clk",
.parent_hws = (const struct clk_hw *[]) {
&system_noc_bfdcd_clk_src.clkr.hw
@@ -2509,7 +2576,7 @@ static struct clk_branch gcc_nssnoc_snoc_1_clk = {
.clkr = {
.enable_reg = 0x1707c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_snoc_1_clk",
.parent_hws = (const struct clk_hw *[]) {
&system_noc_bfdcd_clk_src.clkr.hw
@@ -2526,7 +2593,7 @@ static struct clk_branch gcc_qdss_etr_usb_clk = {
.clkr = {
.enable_reg = 0x2d060,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_etr_usb_clk",
.parent_hws = (const struct clk_hw *[]) {
&system_noc_bfdcd_clk_src.clkr.hw
@@ -2549,7 +2616,7 @@ static struct clk_rcg2 wcss_ahb_clk_src = {
.freq_tbl = ftbl_wcss_ahb_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "wcss_ahb_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -2562,7 +2629,7 @@ static struct clk_branch gcc_q6_ahb_clk = {
.clkr = {
.enable_reg = 0x25014,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_ahb_clk_src.clkr.hw
@@ -2579,7 +2646,7 @@ static struct clk_branch gcc_q6_ahb_s_clk = {
.clkr = {
.enable_reg = 0x25018,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6_ahb_s_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_ahb_clk_src.clkr.hw
@@ -2596,7 +2663,7 @@ static struct clk_branch gcc_wcss_ecahb_clk = {
.clkr = {
.enable_reg = 0x25058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_ecahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_ahb_clk_src.clkr.hw
@@ -2613,7 +2680,7 @@ static struct clk_branch gcc_wcss_acmt_clk = {
.clkr = {
.enable_reg = 0x2505c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_acmt_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_ahb_clk_src.clkr.hw
@@ -2630,7 +2697,7 @@ static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
.clkr = {
.enable_reg = 0x2e030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sys_noc_wcss_ahb_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_ahb_clk_src.clkr.hw
@@ -2654,7 +2721,7 @@ static struct clk_rcg2 wcss_axi_m_clk_src = {
.freq_tbl = ftbl_wcss_axi_m_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "wcss_axi_m_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -2667,7 +2734,7 @@ static struct clk_branch gcc_anoc_wcss_axi_m_clk = {
.clkr = {
.enable_reg = 0x2e0a8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_anoc_wcss_axi_m_clk",
.parent_hws = (const struct clk_hw *[]) {
&wcss_axi_m_clk_src.clkr.hw
@@ -2689,7 +2756,7 @@ static struct clk_rcg2 qdss_at_clk_src = {
.freq_tbl = ftbl_qdss_at_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "qdss_at_clk_src",
.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2702,7 +2769,7 @@ static struct clk_branch gcc_q6ss_atbm_clk = {
.clkr = {
.enable_reg = 0x2501c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6ss_atbm_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2719,7 +2786,7 @@ static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
.clkr = {
.enable_reg = 0x2503c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_dbg_ifc_atb_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2736,7 +2803,7 @@ static struct clk_branch gcc_nssnoc_atb_clk = {
.clkr = {
.enable_reg = 0x17014,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_atb_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2753,7 +2820,7 @@ static struct clk_branch gcc_qdss_at_clk = {
.clkr = {
.enable_reg = 0x2d038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_at_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2770,7 +2837,7 @@ static struct clk_branch gcc_sys_noc_at_clk = {
.clkr = {
.enable_reg = 0x2e038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sys_noc_at_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2787,7 +2854,7 @@ static struct clk_branch gcc_pcnoc_at_clk = {
.clkr = {
.enable_reg = 0x31024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_pcnoc_at_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2802,7 +2869,7 @@ static struct clk_branch gcc_pcnoc_at_clk = {
static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
.mult = 1,
.div = 6,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_eud_at_div_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_at_clk_src.clkr.hw
@@ -2818,7 +2885,7 @@ static struct clk_branch gcc_usb0_eud_at_clk = {
.clkr = {
.enable_reg = 0x30004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_usb0_eud_at_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_eud_at_div_clk_src.hw
@@ -2835,7 +2902,7 @@ static struct clk_branch gcc_qdss_eud_at_clk = {
.clkr = {
.enable_reg = 0x2d06c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_eud_at_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_eud_at_div_clk_src.hw
@@ -2858,7 +2925,7 @@ static struct clk_rcg2 qdss_stm_clk_src = {
.freq_tbl = ftbl_qdss_stm_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "qdss_stm_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -2871,7 +2938,7 @@ static struct clk_branch gcc_qdss_stm_clk = {
.clkr = {
.enable_reg = 0x2d03c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_stm_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_stm_clk_src.clkr.hw
@@ -2888,7 +2955,7 @@ static struct clk_branch gcc_sys_noc_qdss_stm_axi_clk = {
.clkr = {
.enable_reg = 0x2e034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_sys_noc_qdss_stm_axi_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_stm_clk_src.clkr.hw
@@ -2910,7 +2977,7 @@ static struct clk_rcg2 qdss_traceclkin_clk_src = {
.freq_tbl = ftbl_qdss_traceclkin_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "qdss_traceclkin_clk_src",
.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2923,7 +2990,7 @@ static struct clk_branch gcc_qdss_traceclkin_clk = {
.clkr = {
.enable_reg = 0x2d040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_traceclkin_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_traceclkin_clk_src.clkr.hw
@@ -2945,7 +3012,7 @@ static struct clk_rcg2 qdss_tsctr_clk_src = {
.freq_tbl = ftbl_qdss_tsctr_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "qdss_tsctr_clk_src",
.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2956,7 +3023,7 @@ static struct clk_rcg2 qdss_tsctr_clk_src = {
static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
.mult = 1,
.div = 2,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "qdss_tsctr_div2_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_clk_src.clkr.hw
@@ -2972,7 +3039,7 @@ static struct clk_branch gcc_q6_tsctr_1to2_clk = {
.clkr = {
.enable_reg = 0x25020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6_tsctr_1to2_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div2_clk_src.hw
@@ -2989,7 +3056,7 @@ static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
.clkr = {
.enable_reg = 0x25040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_dbg_ifc_nts_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div2_clk_src.hw
@@ -3006,7 +3073,7 @@ static struct clk_branch gcc_qdss_tsctr_div2_clk = {
.clkr = {
.enable_reg = 0x2d044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_tsctr_div2_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div2_clk_src.hw
@@ -3029,7 +3096,7 @@ static struct clk_rcg2 uniphy_sys_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "uniphy_sys_clk_src",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3043,7 +3110,7 @@ static struct clk_rcg2 nss_ts_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "nss_ts_clk_src",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3056,7 +3123,7 @@ static struct clk_branch gcc_qdss_ts_clk = {
.clkr = {
.enable_reg = 0x2d078,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_ts_clk",
.parent_hws = (const struct clk_hw *[]) {
&nss_ts_clk_src.clkr.hw
@@ -3071,7 +3138,7 @@ static struct clk_branch gcc_qdss_ts_clk = {
static struct clk_fixed_factor qdss_dap_sync_clk_src = {
.mult = 1,
.div = 4,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "qdss_dap_sync_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_clk_src.clkr.hw
@@ -3086,7 +3153,7 @@ static struct clk_branch gcc_qdss_tsctr_div4_clk = {
.clkr = {
.enable_reg = 0x2d04c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_tsctr_div4_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3101,7 +3168,7 @@ static struct clk_branch gcc_qdss_tsctr_div4_clk = {
static struct clk_fixed_factor qdss_tsctr_div8_clk_src = {
.mult = 1,
.div = 8,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "qdss_tsctr_div8_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_clk_src.clkr.hw
@@ -3116,7 +3183,7 @@ static struct clk_branch gcc_nss_ts_clk = {
.clkr = {
.enable_reg = 0x17018,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nss_ts_clk",
.parent_hws = (const struct clk_hw *[]) {
&nss_ts_clk_src.clkr.hw
@@ -3133,7 +3200,7 @@ static struct clk_branch gcc_qdss_tsctr_div8_clk = {
.clkr = {
.enable_reg = 0x2d050,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_tsctr_div8_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div8_clk_src.hw
@@ -3148,7 +3215,7 @@ static struct clk_branch gcc_qdss_tsctr_div8_clk = {
static struct clk_fixed_factor qdss_tsctr_div16_clk_src = {
.mult = 1,
.div = 16,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "qdss_tsctr_div16_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_clk_src.clkr.hw
@@ -3163,7 +3230,7 @@ static struct clk_branch gcc_qdss_tsctr_div16_clk = {
.clkr = {
.enable_reg = 0x2d054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_tsctr_div16_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div16_clk_src.hw
@@ -3180,7 +3247,7 @@ static struct clk_branch gcc_q6ss_pclkdbg_clk = {
.clkr = {
.enable_reg = 0x25024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6ss_pclkdbg_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3197,7 +3264,7 @@ static struct clk_branch gcc_q6ss_trig_clk = {
.clkr = {
.enable_reg = 0x25068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6ss_trig_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3214,7 +3281,7 @@ static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
.clkr = {
.enable_reg = 0x25038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_dbg_ifc_apb_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3231,7 +3298,7 @@ static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = {
.clkr = {
.enable_reg = 0x25044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_dbg_ifc_dapbus_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3248,7 +3315,7 @@ static struct clk_branch gcc_qdss_dap_clk = {
.clkr = {
.enable_reg = 0x2d058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_dap_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3265,7 +3332,7 @@ static struct clk_branch gcc_qdss_apb2jtag_clk = {
.clkr = {
.enable_reg = 0x2d05c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_apb2jtag_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_dap_sync_clk_src.hw
@@ -3280,7 +3347,7 @@ static struct clk_branch gcc_qdss_apb2jtag_clk = {
static struct clk_fixed_factor qdss_tsctr_div3_clk_src = {
.mult = 1,
.div = 3,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "qdss_tsctr_div3_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_clk_src.clkr.hw
@@ -3295,7 +3362,7 @@ static struct clk_branch gcc_qdss_tsctr_div3_clk = {
.clkr = {
.enable_reg = 0x2d048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_qdss_tsctr_div3_clk",
.parent_hws = (const struct clk_hw *[]) {
&qdss_tsctr_div3_clk_src.hw
@@ -3321,7 +3388,7 @@ static struct clk_rcg2 qpic_io_macro_clk_src = {
.freq_tbl = ftbl_qpic_io_macro_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "qpic_io_macro_clk_src",
.parent_data = gcc_xo_gpll0_gpll2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
@@ -3334,7 +3401,7 @@ static struct clk_branch gcc_qpic_io_macro_clk = {
.clkr = {
.enable_reg = 0x3200c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data){
.name = "gcc_qpic_io_macro_clk",
.parent_hws = (const struct clk_hw *[]){
&qpic_io_macro_clk_src.clkr.hw
@@ -3356,7 +3423,7 @@ static struct clk_rcg2 q6_axi_clk_src = {
.freq_tbl = ftbl_q6_axi_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll2_gpll4_pi_sleep_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "q6_axi_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_gpll4_pi_sleep,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll4_pi_sleep),
@@ -3369,7 +3436,7 @@ static struct clk_branch gcc_q6_axim_clk = {
.clkr = {
.enable_reg = 0x2500c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_q6_axim_clk",
.parent_hws = (const struct clk_hw *[]) {
&q6_axi_clk_src.clkr.hw
@@ -3387,7 +3454,7 @@ static struct clk_branch gcc_wcss_q6_tbu_clk = {
.clkr = {
.enable_reg = 0xb00c,
.enable_mask = BIT(6),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_wcss_q6_tbu_clk",
.parent_hws = (const struct clk_hw *[]) {
&q6_axi_clk_src.clkr.hw
@@ -3404,7 +3471,7 @@ static struct clk_branch gcc_mem_noc_q6_axi_clk = {
.clkr = {
.enable_reg = 0x19010,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_mem_noc_q6_axi_clk",
.parent_hws = (const struct clk_hw *[]) {
&q6_axi_clk_src.clkr.hw
@@ -3433,7 +3500,7 @@ static struct clk_rcg2 q6_axim2_clk_src = {
.freq_tbl = ftbl_q6_axim2_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubinc_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "q6_axim2_clk_src",
.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -3451,7 +3518,7 @@ static struct clk_rcg2 nssnoc_memnoc_bfdcd_clk_src = {
.freq_tbl = ftbl_nssnoc_memnoc_bfdcd_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_aux_gpll2_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "nssnoc_memnoc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_aux_gpll2,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_gpll2),
@@ -3464,7 +3531,7 @@ static struct clk_branch gcc_nssnoc_memnoc_clk = {
.clkr = {
.enable_reg = 0x17024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_memnoc_clk",
.parent_hws = (const struct clk_hw *[]) {
&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3481,7 +3548,7 @@ static struct clk_branch gcc_nssnoc_mem_noc_1_clk = {
.clkr = {
.enable_reg = 0x17084,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_mem_noc_1_clk",
.parent_hws = (const struct clk_hw *[]) {
&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3498,7 +3565,7 @@ static struct clk_branch gcc_nss_tbu_clk = {
.clkr = {
.enable_reg = 0xb00c,
.enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nss_tbu_clk",
.parent_hws = (const struct clk_hw *[]) {
&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3515,7 +3582,7 @@ static struct clk_branch gcc_mem_noc_nssnoc_clk = {
.clkr = {
.enable_reg = 0x19014,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_mem_noc_nssnoc_clk",
.parent_hws = (const struct clk_hw *[]) {
&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3537,7 +3604,7 @@ static struct clk_rcg2 lpass_axim_clk_src = {
.freq_tbl = ftbl_lpass_axim_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_axim_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3550,7 +3617,7 @@ static struct clk_rcg2 lpass_sway_clk_src = {
.freq_tbl = ftbl_lpass_axim_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_sway_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3569,7 +3636,7 @@ static struct clk_rcg2 adss_pwm_clk_src = {
.freq_tbl = ftbl_adss_pwm_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "adss_pwm_clk_src",
.parent_data = gcc_xo_gpll0,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3582,7 +3649,7 @@ static struct clk_branch gcc_adss_pwm_clk = {
.clkr = {
.enable_reg = 0x1c00c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_adss_pwm_clk",
.parent_hws = (const struct clk_hw *[]) {
&adss_pwm_clk_src.clkr.hw
@@ -3605,7 +3672,7 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gp1_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3618,7 +3685,7 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gp2_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3631,7 +3698,7 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "gp3_clk_src",
.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3644,7 +3711,7 @@ static struct clk_branch gcc_xo_clk_src = {
.clkr = {
.enable_reg = 0x34004,
.enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_xo_clk_src",
.parent_data = gcc_xo_data,
.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3659,7 +3726,7 @@ static struct clk_branch gcc_nssnoc_xo_dcd_clk = {
.clkr = {
.enable_reg = 0x17074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_xo_dcd_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_clk_src.clkr.hw
@@ -3676,7 +3743,7 @@ static struct clk_branch gcc_xo_clk = {
.clkr = {
.enable_reg = 0x34018,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_xo_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_clk_src.clkr.hw
@@ -3693,7 +3760,7 @@ static struct clk_branch gcc_uniphy0_sys_clk = {
.clkr = {
.enable_reg = 0x17048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy0_sys_clk",
.parent_hws = (const struct clk_hw *[]) {
&uniphy_sys_clk_src.clkr.hw
@@ -3710,7 +3777,7 @@ static struct clk_branch gcc_uniphy1_sys_clk = {
.clkr = {
.enable_reg = 0x17058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy1_sys_clk",
.parent_hws = (const struct clk_hw *[]) {
&uniphy_sys_clk_src.clkr.hw
@@ -3727,7 +3794,7 @@ static struct clk_branch gcc_uniphy2_sys_clk = {
.clkr = {
.enable_reg = 0x17068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_uniphy2_sys_clk",
.parent_hws = (const struct clk_hw *[]) {
&uniphy_sys_clk_src.clkr.hw
@@ -3744,7 +3811,7 @@ static struct clk_branch gcc_cmn_12gpll_sys_clk = {
.clkr = {
.enable_reg = 0x3a008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_cmn_12gpll_sys_clk",
.parent_hws = (const struct clk_hw *[]) {
&uniphy_sys_clk_src.clkr.hw
@@ -3759,7 +3826,7 @@ static struct clk_branch gcc_cmn_12gpll_sys_clk = {
static struct clk_fixed_factor gcc_xo_div4_clk_src = {
.mult = 1,
.div = 4,
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_xo_div4_clk_src",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_clk_src.clkr.hw
@@ -3775,7 +3842,7 @@ static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
.clkr = {
.enable_reg = 0x1701c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_qosgen_ref_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_div4_clk_src.hw
@@ -3792,7 +3859,7 @@ static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
.clkr = {
.enable_reg = 0x17020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_nssnoc_timeout_ref_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_div4_clk_src.hw
@@ -3809,7 +3876,7 @@ static struct clk_branch gcc_xo_div4_clk = {
.clkr = {
.enable_reg = 0x3401c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "gcc_xo_div4_clk",
.parent_hws = (const struct clk_hw *[]) {
&gcc_xo_div4_clk_src.hw
@@ -3880,6 +3947,10 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
[GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
[GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_CRYPTO_CLK_SRC] = &gcc_crypto_clk_src.clkr,
[PCIE0_AXI_M_CLK_SRC] = &pcie0_axi_m_clk_src.clkr,
[GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
[PCIE1_AXI_M_CLK_SRC] = &pcie1_axi_m_clk_src.clkr,
@@ -4063,6 +4134,7 @@ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
[GCC_CMN_BLK_AHB_ARES] = { 0x3a010, 0 },
[GCC_CMN_BLK_SYS_ARES] = { 0x3a010, 1 },
[GCC_CMN_BLK_APU_ARES] = { 0x3a010, 2 },
+ [GCC_CRYPTO_BCR] = { 0x16000, 0 },
[GCC_DCC_BCR] = { 0x35000, 0 },
[GCC_DDRSS_BCR] = { 0x11000, 0 },
[GCC_IMEM_BCR] = { 0x0e000, 0 },
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 096deff2ba25..48995e50c6bd 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -650,7 +650,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.name = "gcc_usb30_prim_mock_utmi_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -686,7 +686,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
.name = "gcc_camss_axi_clk_src",
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -706,7 +706,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
.name = "gcc_camss_cci_clk_src",
.parent_data = gcc_parents_9,
.num_parents = ARRAY_SIZE(gcc_parents_9),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -728,7 +728,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
.name = "gcc_camss_csi0phytimer_clk_src",
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -742,7 +742,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
.name = "gcc_camss_csi1phytimer_clk_src",
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -764,7 +764,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -779,7 +779,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -794,7 +794,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -809,7 +809,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -830,7 +830,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
.name = "gcc_camss_ope_ahb_clk_src",
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -854,7 +854,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -888,7 +888,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
.name = "gcc_camss_tfe_0_clk_src",
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -912,7 +912,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
.name = "gcc_camss_tfe_0_csid_clk_src",
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -926,7 +926,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
.name = "gcc_camss_tfe_1_clk_src",
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -940,7 +940,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
.name = "gcc_camss_tfe_1_csid_clk_src",
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -963,7 +963,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
.parent_data = gcc_parents_10,
.num_parents = ARRAY_SIZE(gcc_parents_10),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -984,7 +984,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
.name = "gcc_camss_top_ahb_clk_src",
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1006,7 +1006,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parents_2,
.num_parents = ARRAY_SIZE(gcc_parents_2),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1020,7 +1020,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parents_2,
.num_parents = ARRAY_SIZE(gcc_parents_2),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1034,7 +1034,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parents_2,
.num_parents = ARRAY_SIZE(gcc_parents_2),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1054,7 +1054,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1082,7 +1082,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -1098,7 +1098,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -1114,7 +1114,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -1130,7 +1130,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -1146,7 +1146,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -1162,7 +1162,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parents_1,
.num_parents = ARRAY_SIZE(gcc_parents_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -1219,7 +1219,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1266,7 +1266,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.name = "gcc_usb30_prim_master_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1280,7 +1280,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
.name = "gcc_usb3_prim_phy_aux_clk_src",
.parent_data = gcc_parents_13,
.num_parents = ARRAY_SIZE(gcc_parents_13),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -1303,7 +1303,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
.parent_data = gcc_parents_14,
.num_parents = ARRAY_SIZE(gcc_parents_14),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index 04a99dbaa57e..b90c71637814 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -7421,9 +7422,19 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
regmap = qcom_cc_map(pdev, &gcc_sc8280xp_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
/*
* Keep the clocks always-ON
@@ -7445,7 +7456,10 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static const struct of_device_id gcc_sc8280xp_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index db918c92a522..6afce8e42ede 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -25,8 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_SLEEP_CLK,
diff --git a/drivers/clk/qcom/gcc-sdx75.c b/drivers/clk/qcom/gcc-sdx75.c
new file mode 100644
index 000000000000..b6772abdcec5
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdx75.c
@@ -0,0 +1,2970 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sdx75-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_EMAC0_SGMIIPHY_MAC_RCLK,
+ DT_EMAC0_SGMIIPHY_MAC_TCLK,
+ DT_EMAC0_SGMIIPHY_RCLK,
+ DT_EMAC0_SGMIIPHY_TCLK,
+ DT_EMAC1_SGMIIPHY_MAC_RCLK,
+ DT_EMAC1_SGMIIPHY_MAC_TCLK,
+ DT_EMAC1_SGMIIPHY_RCLK,
+ DT_EMAC1_SGMIIPHY_TCLK,
+ DT_PCIE20_PHY_AUX_CLK,
+ DT_PCIE_1_PIPE_CLK,
+ DT_PCIE_2_PIPE_CLK,
+ DT_PCIE_PIPE_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_EMAC0_SGMIIPHY_MAC_RCLK,
+ P_EMAC0_SGMIIPHY_MAC_TCLK,
+ P_EMAC0_SGMIIPHY_RCLK,
+ P_EMAC0_SGMIIPHY_TCLK,
+ P_EMAC1_SGMIIPHY_MAC_RCLK,
+ P_EMAC1_SGMIIPHY_MAC_TCLK,
+ P_EMAC1_SGMIIPHY_RCLK,
+ P_EMAC1_SGMIIPHY_TCLK,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL5_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL8_OUT_MAIN,
+ P_PCIE20_PHY_AUX_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_PCIE_2_PIPE_CLK,
+ P_PCIE_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x7d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x7d000,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x7d000,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x7d000,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x7d000,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GPLL5_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll5.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_EMAC0_SGMIIPHY_RCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_EMAC0_SGMIIPHY_RCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_EMAC0_SGMIIPHY_TCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_EMAC0_SGMIIPHY_TCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_EMAC0_SGMIIPHY_MAC_RCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_EMAC0_SGMIIPHY_MAC_RCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_EMAC0_SGMIIPHY_MAC_TCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_EMAC0_SGMIIPHY_MAC_TCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_EMAC1_SGMIIPHY_RCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_EMAC1_SGMIIPHY_RCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_EMAC1_SGMIIPHY_TCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_EMAC1_SGMIIPHY_TCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_EMAC1_SGMIIPHY_MAC_RCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_EMAC1_SGMIIPHY_MAC_RCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_EMAC1_SGMIIPHY_MAC_TCLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_EMAC1_SGMIIPHY_MAC_TCLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_PCIE20_PHY_AUX_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_PCIE20_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_emac0_cc_sgmiiphy_rx_clk_src = {
+ .reg = 0x71060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_cc_sgmiiphy_rx_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac0_cc_sgmiiphy_tx_clk_src = {
+ .reg = 0x71058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_cc_sgmiiphy_tx_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac0_sgmiiphy_mac_rclk_src = {
+ .reg = 0x71098,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_sgmiiphy_mac_rclk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac0_sgmiiphy_mac_tclk_src = {
+ .reg = 0x71094,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_sgmiiphy_mac_tclk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac1_cc_sgmiiphy_rx_clk_src = {
+ .reg = 0x72060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_cc_sgmiiphy_rx_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac1_cc_sgmiiphy_tx_clk_src = {
+ .reg = 0x72058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_cc_sgmiiphy_tx_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac1_sgmiiphy_mac_rclk_src = {
+ .reg = 0x72098,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_sgmiiphy_mac_rclk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_emac1_sgmiiphy_mac_tclk_src = {
+ .reg = 0x72094,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_sgmiiphy_mac_tclk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x67084,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_2_pipe_clk_src = {
+ .reg = 0x68050,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_2_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_aux_clk_src = {
+ .reg = 0x53074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_pipe_clk_src = {
+ .reg = 0x53058,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_phy_pipe_clk_src = {
+ .reg = 0x27070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_eee_emac0_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_eee_emac0_clk_src = {
+ .cmd_rcgr = 0x710b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_eee_emac0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eee_emac0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_eee_emac1_clk_src = {
+ .cmd_rcgr = 0x720b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_eee_emac0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eee_emac1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7102c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(125000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(230400000, P_GPLL5_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_ptp_clk_src = {
+ .cmd_rcgr = 0x7107c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = {
+ F(5000000, P_GPLL0_OUT_EVEN, 10, 1, 6),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_rgmii_clk_src = {
+ .cmd_rcgr = 0x71064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7202c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_ptp_clk_src = {
+ .cmd_rcgr = 0x7207c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_ptp_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_emac1_rgmii_clk_src = {
+ .cmd_rcgr = 0x72064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rgmii_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x47004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x48004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x49004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_phy_clk_src = {
+ .cmd_rcgr = 0x67044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_phy_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_1_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_1_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_phy_clk_src = {
+ .cmd_rcgr = 0x68064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_phy_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x68038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_1_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x5305c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
+ .cmd_rcgr = 0x53078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_1_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rchng_phy_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x34010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x6c010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x6c148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x6c280,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x6c3b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x6c4f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x6c628,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x6c760,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x6c898,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s8_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s8_clk_src = {
+ .cmd_rcgr = 0x6c9d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s8_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x6b014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_17,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GPLL8_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x6a018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_18,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0x27034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2704c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x27074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div2_clk_src = {
+ .reg = 0x67088,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_2_pipe_div2_clk_src = {
+ .reg = 0x68088,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mock_utmi_postdiv_clk_src = {
+ .reg = 0x27064,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x37004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eee_emac0_clk = {
+ .halt_reg = 0x710ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x710ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eee_emac0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eee_emac0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eee_emac1_clk = {
+ .halt_reg = 0x720ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x720ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eee_emac1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eee_emac1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_axi_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_cc_sgmiiphy_rx_clk = {
+ .halt_reg = 0x7105c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7105c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_cc_sgmiiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_cc_sgmiiphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_cc_sgmiiphy_tx_clk = {
+ .halt_reg = 0x71054,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x71054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_cc_sgmiiphy_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_cc_sgmiiphy_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_phy_aux_clk = {
+ .halt_reg = 0x71028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_ptp_clk = {
+ .halt_reg = 0x71044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rgmii_clk = {
+ .halt_reg = 0x71050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rpcs_rx_clk = {
+ .halt_reg = 0x710a0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x710a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rpcs_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_sgmiiphy_mac_rclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rpcs_tx_clk = {
+ .halt_reg = 0x7109c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rpcs_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_sgmiiphy_mac_tclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_slv_ahb_clk = {
+ .halt_reg = 0x71024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_xgxs_rx_clk = {
+ .halt_reg = 0x710a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x710a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_xgxs_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_sgmiiphy_mac_rclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_xgxs_tx_clk = {
+ .halt_reg = 0x710a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x710a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_xgxs_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_sgmiiphy_mac_tclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_axi_clk = {
+ .halt_reg = 0x72018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x72018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x72018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_cc_sgmiiphy_rx_clk = {
+ .halt_reg = 0x7205c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_cc_sgmiiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_cc_sgmiiphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_cc_sgmiiphy_tx_clk = {
+ .halt_reg = 0x72054,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x72054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_cc_sgmiiphy_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_cc_sgmiiphy_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_phy_aux_clk = {
+ .halt_reg = 0x72028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x72028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_ptp_clk = {
+ .halt_reg = 0x72044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x72044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_rgmii_clk = {
+ .halt_reg = 0x72050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x72050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_rpcs_rx_clk = {
+ .halt_reg = 0x720a0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x720a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rpcs_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_sgmiiphy_mac_rclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_rpcs_tx_clk = {
+ .halt_reg = 0x7209c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7209c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_rpcs_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_sgmiiphy_mac_tclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_slv_ahb_clk = {
+ .halt_reg = 0x72024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x72024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x72024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_xgxs_rx_clk = {
+ .halt_reg = 0x720a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x720a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_xgxs_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_sgmiiphy_mac_rclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac1_xgxs_tx_clk = {
+ .halt_reg = 0x720a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x720a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac1_xgxs_tx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac1_sgmiiphy_mac_tclk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_0_clkref_en = {
+ .halt_reg = 0x98108,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98108,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_1_clkref_en = {
+ .halt_reg = 0x9810c,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x9810c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x47000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x49000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x98004,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x67038,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x67034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x67034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_en = {
+ .halt_reg = 0x98114,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98114,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x67028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x67068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x6705c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_div2_clk = {
+ .halt_reg = 0x6708c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x6701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x67018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x68058,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x68034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x68034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_clkref_en = {
+ .halt_reg = 0x98110,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x68028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_phy_rchng_clk = {
+ .halt_reg = 0x68098,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x6807c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_div2_clk = {
+ .halt_reg = 0x6808c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x6801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x68018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x5303c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5303c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x53034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x53028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x5304c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5304c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rchng_phy_clk = {
+ .halt_reg = 0x53038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rchng_phy_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_rchng_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x53048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x5301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x34008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x2d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x6c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x6c13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x6c274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x6c3ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x6c4e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x6c61c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x6c754,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x6c88c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s8_clk = {
+ .halt_reg = 0x6c9c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x2d004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x6b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x6b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x6a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_clkref_en = {
+ .halt_reg = 0x98008,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x27030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mstr_axi_clk = {
+ .halt_reg = 0x27024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x2702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_slv_ahb_clk = {
+ .halt_reg = 0x27028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x27068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x2706c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x2706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_en = {
+ .halt_reg = 0x98000,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x98000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x29004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x29004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_emac0_gdsc = {
+ .gdscr = 0x71004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_emac0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_emac1_gdsc = {
+ .gdscr = 0x72004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_emac1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_1_gdsc = {
+ .gdscr = 0x67004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_1_phy_gdsc = {
+ .gdscr = 0x56004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_2_gdsc = {
+ .gdscr = 0x68004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_2_phy_gdsc = {
+ .gdscr = 0x6e004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_gdsc = {
+ .gdscr = 0x53004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_phy_gdsc = {
+ .gdscr = 0x54004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_gdsc = {
+ .gdscr = 0x27004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_phy_gdsc = {
+ .gdscr = 0x28008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sdx75_clocks[] = {
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_EEE_EMAC0_CLK] = &gcc_eee_emac0_clk.clkr,
+ [GCC_EEE_EMAC0_CLK_SRC] = &gcc_eee_emac0_clk_src.clkr,
+ [GCC_EEE_EMAC1_CLK] = &gcc_eee_emac1_clk.clkr,
+ [GCC_EEE_EMAC1_CLK_SRC] = &gcc_eee_emac1_clk_src.clkr,
+ [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr,
+ [GCC_EMAC0_CC_SGMIIPHY_RX_CLK] = &gcc_emac0_cc_sgmiiphy_rx_clk.clkr,
+ [GCC_EMAC0_CC_SGMIIPHY_RX_CLK_SRC] = &gcc_emac0_cc_sgmiiphy_rx_clk_src.clkr,
+ [GCC_EMAC0_CC_SGMIIPHY_TX_CLK] = &gcc_emac0_cc_sgmiiphy_tx_clk.clkr,
+ [GCC_EMAC0_CC_SGMIIPHY_TX_CLK_SRC] = &gcc_emac0_cc_sgmiiphy_tx_clk_src.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK] = &gcc_emac0_phy_aux_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK_SRC] = &gcc_emac0_phy_aux_clk_src.clkr,
+ [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr,
+ [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr,
+ [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr,
+ [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr,
+ [GCC_EMAC0_RPCS_RX_CLK] = &gcc_emac0_rpcs_rx_clk.clkr,
+ [GCC_EMAC0_RPCS_TX_CLK] = &gcc_emac0_rpcs_tx_clk.clkr,
+ [GCC_EMAC0_SGMIIPHY_MAC_RCLK_SRC] = &gcc_emac0_sgmiiphy_mac_rclk_src.clkr,
+ [GCC_EMAC0_SGMIIPHY_MAC_TCLK_SRC] = &gcc_emac0_sgmiiphy_mac_tclk_src.clkr,
+ [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr,
+ [GCC_EMAC0_XGXS_RX_CLK] = &gcc_emac0_xgxs_rx_clk.clkr,
+ [GCC_EMAC0_XGXS_TX_CLK] = &gcc_emac0_xgxs_tx_clk.clkr,
+ [GCC_EMAC1_AXI_CLK] = &gcc_emac1_axi_clk.clkr,
+ [GCC_EMAC1_CC_SGMIIPHY_RX_CLK] = &gcc_emac1_cc_sgmiiphy_rx_clk.clkr,
+ [GCC_EMAC1_CC_SGMIIPHY_RX_CLK_SRC] = &gcc_emac1_cc_sgmiiphy_rx_clk_src.clkr,
+ [GCC_EMAC1_CC_SGMIIPHY_TX_CLK] = &gcc_emac1_cc_sgmiiphy_tx_clk.clkr,
+ [GCC_EMAC1_CC_SGMIIPHY_TX_CLK_SRC] = &gcc_emac1_cc_sgmiiphy_tx_clk_src.clkr,
+ [GCC_EMAC1_PHY_AUX_CLK] = &gcc_emac1_phy_aux_clk.clkr,
+ [GCC_EMAC1_PHY_AUX_CLK_SRC] = &gcc_emac1_phy_aux_clk_src.clkr,
+ [GCC_EMAC1_PTP_CLK] = &gcc_emac1_ptp_clk.clkr,
+ [GCC_EMAC1_PTP_CLK_SRC] = &gcc_emac1_ptp_clk_src.clkr,
+ [GCC_EMAC1_RGMII_CLK] = &gcc_emac1_rgmii_clk.clkr,
+ [GCC_EMAC1_RGMII_CLK_SRC] = &gcc_emac1_rgmii_clk_src.clkr,
+ [GCC_EMAC1_RPCS_RX_CLK] = &gcc_emac1_rpcs_rx_clk.clkr,
+ [GCC_EMAC1_RPCS_TX_CLK] = &gcc_emac1_rpcs_tx_clk.clkr,
+ [GCC_EMAC1_SGMIIPHY_MAC_RCLK_SRC] = &gcc_emac1_sgmiiphy_mac_rclk_src.clkr,
+ [GCC_EMAC1_SGMIIPHY_MAC_TCLK_SRC] = &gcc_emac1_sgmiiphy_mac_tclk_src.clkr,
+ [GCC_EMAC1_SLV_AHB_CLK] = &gcc_emac1_slv_ahb_clk.clkr,
+ [GCC_EMAC1_XGXS_RX_CLK] = &gcc_emac1_xgxs_rx_clk.clkr,
+ [GCC_EMAC1_XGXS_TX_CLK] = &gcc_emac1_xgxs_tx_clk.clkr,
+ [GCC_EMAC_0_CLKREF_EN] = &gcc_emac_0_clkref_en.clkr,
+ [GCC_EMAC_1_CLKREF_EN] = &gcc_emac_1_clkref_en.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_PHY_CLK_SRC] = &gcc_pcie_1_aux_phy_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_EN] = &gcc_pcie_1_clkref_en.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK] = &gcc_pcie_1_pipe_div2_clk.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK_SRC] = &gcc_pcie_1_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_PHY_CLK_SRC] = &gcc_pcie_2_aux_phy_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_CLKREF_EN] = &gcc_pcie_2_clkref_en.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK] = &gcc_pcie_2_phy_rchng_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK_SRC] = &gcc_pcie_2_pipe_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_DIV2_CLK] = &gcc_pcie_2_pipe_div2_clk.clkr,
+ [GCC_PCIE_2_PIPE_DIV2_CLK_SRC] = &gcc_pcie_2_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_PIPE_CLK_SRC] = &gcc_pcie_pipe_clk_src.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S8_CLK] = &gcc_qupv3_wrap0_s8_clk.clkr,
+ [GCC_QUPV3_WRAP0_S8_CLK_SRC] = &gcc_qupv3_wrap0_s8_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_USB2_CLKREF_EN] = &gcc_usb2_clkref_en.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK_SRC] = &gcc_usb3_phy_pipe_clk_src.clkr,
+ [GCC_USB3_PRIM_CLKREF_EN] = &gcc_usb3_prim_clkref_en.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL8] = &gpll8.clkr,
+};
+
+static struct gdsc *gcc_sdx75_gdscs[] = {
+ [GCC_EMAC0_GDSC] = &gcc_emac0_gdsc,
+ [GCC_EMAC1_GDSC] = &gcc_emac1_gdsc,
+ [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc,
+ [GCC_PCIE_1_PHY_GDSC] = &gcc_pcie_1_phy_gdsc,
+ [GCC_PCIE_2_GDSC] = &gcc_pcie_2_gdsc,
+ [GCC_PCIE_2_PHY_GDSC] = &gcc_pcie_2_phy_gdsc,
+ [GCC_PCIE_GDSC] = &gcc_pcie_gdsc,
+ [GCC_PCIE_PHY_GDSC] = &gcc_pcie_phy_gdsc,
+ [GCC_USB30_GDSC] = &gcc_usb30_gdsc,
+ [GCC_USB3_PHY_GDSC] = &gcc_usb3_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sdx75_resets[] = {
+ [GCC_EMAC0_BCR] = { 0x71000 },
+ [GCC_EMAC0_RGMII_CLK_ARES] = { 0x71050, 2 },
+ [GCC_EMAC1_BCR] = { 0x72000 },
+ [GCC_EMMC_BCR] = { 0x6b000 },
+ [GCC_PCIE_1_BCR] = { 0x67000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e700 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x56120 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x56000 },
+ [GCC_PCIE_2_BCR] = { 0x68000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x9f700 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x6e130 },
+ [GCC_PCIE_2_PHY_BCR] = { 0x6e000 },
+ [GCC_PCIE_BCR] = { 0x53000 },
+ [GCC_PCIE_LINK_DOWN_BCR] = { 0x87000 },
+ [GCC_PCIE_NOCSR_COM_PHY_BCR] = { 0x88008 },
+ [GCC_PCIE_PHY_BCR] = { 0x54000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x88000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x88004 },
+ [GCC_PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x8800c },
+ [GCC_QUSB2PHY_BCR] = { 0x2a000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x84000 },
+ [GCC_USB30_BCR] = { 0x27000 },
+ [GCC_USB3_PHY_BCR] = { 0x28000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x28004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x29000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s8_clk_src),
+};
+
+static const struct regmap_config gcc_sdx75_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdx75_desc = {
+ .config = &gcc_sdx75_regmap_config,
+ .clks = gcc_sdx75_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdx75_clocks),
+ .resets = gcc_sdx75_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdx75_resets),
+ .gdscs = gcc_sdx75_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdx75_gdscs),
+};
+
+static const struct of_device_id gcc_sdx75_match_table[] = {
+ { .compatible = "qcom,sdx75-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdx75_match_table);
+
+static int gcc_sdx75_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdx75_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep clocks always enabled:
+ * gcc_ahb_pcie_link_clk
+ * gcc_xo_pcie_link_clk
+ */
+ regmap_update_bits(regmap, 0x3e004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x3e008, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gcc_sdx75_desc, regmap);
+}
+
+static struct platform_driver gcc_sdx75_driver = {
+ .probe = gcc_sdx75_probe,
+ .driver = {
+ .name = "gcc-sdx75",
+ .of_match_table = gcc_sdx75_match_table,
+ },
+};
+
+static int __init gcc_sdx75_init(void)
+{
+ return platform_driver_register(&gcc_sdx75_driver);
+}
+subsys_initcall(gcc_sdx75_init);
+
+static void __exit gcc_sdx75_exit(void)
+{
+ platform_driver_unregister(&gcc_sdx75_driver);
+}
+module_exit(gcc_sdx75_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDX75 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index 5f09aefa7fb9..033e308ff865 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -119,6 +119,8 @@ static const struct alpha_pll_config gpll10_config = {
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
.config_ctl_val = 0x4001055b,
+ .test_ctl_hi1_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
};
static struct clk_alpha_pll gpll10 = {
@@ -170,6 +172,8 @@ static const struct alpha_pll_config gpll11_config = {
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.config_ctl_val = 0x4001055b,
+ .test_ctl_hi1_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
};
static struct clk_alpha_pll gpll11 = {
@@ -362,6 +366,8 @@ static const struct alpha_pll_config gpll8_config = {
.post_div_val = 0x1 << 8,
.post_div_mask = GENMASK(11, 8),
.config_ctl_val = 0x4001055b,
+ .test_ctl_hi1_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
};
static struct clk_alpha_pll gpll8 = {
@@ -413,6 +419,8 @@ static const struct alpha_pll_config gpll9_config = {
.post_div_mask = GENMASK(9, 8),
.main_output_mask = BIT(0),
.config_ctl_val = 0x00004289,
+ .test_ctl_mask = GENMASK(31, 0),
+ .test_ctl_val = 0x08000000,
};
static struct clk_alpha_pll gpll9 = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 84764cc3db4f..75635d40a12d 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -334,6 +334,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_gp1_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parent_data_1,
@@ -349,6 +350,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_gp1_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parent_data_1,
@@ -364,6 +366,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_gp1_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parent_data_1,
@@ -384,6 +387,7 @@ static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_2,
.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk_src",
.parent_data = gcc_parent_data_2,
@@ -405,6 +409,7 @@ static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_phy_rchng_clk_src",
.parent_data = gcc_parent_data_0,
@@ -420,6 +425,7 @@ static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_2,
.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk_src",
.parent_data = gcc_parent_data_2,
@@ -435,6 +441,7 @@ static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_phy_rchng_clk_src",
.parent_data = gcc_parent_data_0,
@@ -455,6 +462,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parent_data_0,
@@ -493,6 +501,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
};
@@ -510,6 +519,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
};
@@ -527,6 +537,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
};
@@ -544,6 +555,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
};
@@ -561,6 +573,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
};
@@ -590,6 +603,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s5_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
};
@@ -607,6 +621,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
};
@@ -624,6 +639,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
};
@@ -660,6 +676,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
};
@@ -677,6 +694,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
};
@@ -694,6 +712,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
};
@@ -711,6 +730,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
};
@@ -728,6 +748,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
};
@@ -745,6 +766,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
};
@@ -762,6 +784,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
};
@@ -779,6 +802,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
};
@@ -796,6 +820,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
};
@@ -813,6 +838,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
};
@@ -830,6 +856,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
};
@@ -847,6 +874,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
};
@@ -864,6 +892,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
};
@@ -881,6 +910,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
};
@@ -899,6 +929,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_7,
.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_7,
@@ -921,6 +952,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
@@ -944,6 +976,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk_src",
.parent_data = gcc_parent_data_0,
@@ -966,6 +999,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
@@ -987,6 +1021,7 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
.parent_data = gcc_parent_data_3,
@@ -1002,6 +1037,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_clk_src",
.parent_data = gcc_parent_data_0,
@@ -1025,6 +1061,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_master_clk_src",
.parent_data = gcc_parent_data_0,
@@ -1040,6 +1077,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
.parent_data = gcc_parent_data_0,
@@ -1055,6 +1093,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_2,
.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .hw_clk_ctrl = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_aux_clk_src",
.parent_data = gcc_parent_data_2,
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
index ea1e9505c335..8e147ee294ee 100644
--- a/drivers/clk/qcom/gpucc-sc8280xp.c
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,gpucc-sc8280xp.h>
@@ -424,10 +425,21 @@ static struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &gpu_cc_sc8280xp_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
@@ -439,7 +451,10 @@ static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x109c, BIT(0), BIT(0));
- return qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static const struct of_device_id gpu_cc_sc8280xp_match_table[] = {
diff --git a/drivers/clk/qcom/gpucc-sm6375.c b/drivers/clk/qcom/gpucc-sm6375.c
index d3620344a009..2d863dc3d83b 100644
--- a/drivers/clk/qcom/gpucc-sm6375.c
+++ b/drivers/clk/qcom/gpucc-sm6375.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm6375-gpucc.h>
@@ -434,15 +435,29 @@ MODULE_DEVICE_TABLE(of, gpucc_sm6375_match_table);
static int gpucc_sm6375_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &gpucc_sm6375_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
clk_lucid_pll_configure(&gpucc_pll0, regmap, &gpucc_pll0_config);
clk_lucid_pll_configure(&gpucc_pll1, regmap, &gpucc_pll1_config);
- return qcom_cc_really_probe(pdev, &gpucc_sm6375_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &gpucc_sm6375_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static struct platform_driver gpucc_sm6375_driver = {
diff --git a/drivers/clk/qcom/gpucc-sm8450.c b/drivers/clk/qcom/gpucc-sm8450.c
new file mode 100644
index 000000000000..16c0381b3087
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8450.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-gpucc.h>
+#include <dt-bindings/reset/qcom,sm8450-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0xb000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x34,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = ftbl_gpu_cc_xo_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x9430,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_xo_div_clk_src = {
+ .reg = 0x9050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_xo_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x912c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x912c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x9130,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9130,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_demet_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_demet_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_demet_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_freq_measure_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_ff_clk = {
+ .halt_reg = 0x90c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x90a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_rdvm_clk = {
+ .halt_reg = 0x90c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_rdvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPUCC_GPU_CC_GX_BCR,
+ GPUCC_GPU_CC_ACD_BCR,
+ GPUCC_GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8450_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_CLK] = &gpu_cc_demet_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_FF_CLK] = &gpu_cc_gx_ff_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_RDVM_CLK] = &gpu_cc_gx_gfx3d_rdvm_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+ [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8450_resets[] = {
+ [GPUCC_GPU_CC_XO_BCR] = { 0x9000 },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x9104 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPUCC_GPU_CC_FF_BCR] = { 0x9470 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPUCC_GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+};
+
+static struct gdsc *gpu_cc_sm8450_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8450_desc = {
+ .config = &gpu_cc_sm8450_regmap_config,
+ .clks = gpu_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8450_clocks),
+ .resets = gpu_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8450_resets),
+ .gdscs = gpu_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8450_match_table);
+
+static int gpu_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8450_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8450_driver = {
+ .probe = gpu_cc_sm8450_probe,
+ .driver = {
+ .name = "sm8450-gpucc",
+ .of_match_table = gpu_cc_sm8450_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sm8450_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sm8550.c b/drivers/clk/qcom/gpucc-sm8550.c
new file mode 100644
index 000000000000..8a2e3522af51
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8550.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8550-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ /* .l includes RINGOSC_CAL_L_VAL, CAL_L_VAL, L_VAL fields */
+ .l = 0x4444000d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ /* .l includes RINGOSC_CAL_L_VAL, CAL_L_VAL, L_VAL fields */
+ .l = 0x44440016,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = ftbl_gpu_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_xo_div_clk_src = {
+ .reg = 0x9050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gpu_cc_sm8550_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+ [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr,
+};
+
+static struct gdsc *gpu_cc_sm8550_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8550_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x9104 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPUCC_GPU_CC_FF_BCR] = { 0x9470 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_sm8550_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8550_desc = {
+ .config = &gpu_cc_sm8550_regmap_config,
+ .clks = gpu_cc_sm8550_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8550_clocks),
+ .resets = gpu_cc_sm8550_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8550_resets),
+ .gdscs = gpu_cc_sm8550_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8550_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sm8550-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8550_match_table);
+
+static int gpu_cc_sm8550_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8550_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Keep clocks always enabled:
+ * gpu_cc_cxo_aon_clk
+ * gpu_cc_demet_clk
+ */
+ regmap_update_bits(regmap, 0x9004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x900c, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8550_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8550_driver = {
+ .probe = gpu_cc_sm8550_probe,
+ .driver = {
+ .name = "gpu_cc-sm8550",
+ .of_match_table = gpu_cc_sm8550_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8550_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8550_driver);
+}
+subsys_initcall(gpu_cc_sm8550_init);
+
+static void __exit gpu_cc_sm8550_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8550_driver);
+}
+module_exit(gpu_cc_sm8550_exit);
+
+MODULE_DESCRIPTION("QTI GPUCC SM8550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscc-sc8280xp.c b/drivers/clk/qcom/lpasscc-sc8280xp.c
new file mode 100644
index 000000000000..43b37ce397cf
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sc8280xp.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sc8280xp-lpasscc.h>
+
+#include "common.h"
+#include "reset.h"
+
+static const struct qcom_reset_map lpass_audiocc_sc8280xp_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
+ [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
+};
+
+static struct regmap_config lpass_audiocc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-audio-csr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpass_audiocc_sc8280xp_reset_desc = {
+ .config = &lpass_audiocc_sc8280xp_regmap_config,
+ .resets = lpass_audiocc_sc8280xp_resets,
+ .num_resets = ARRAY_SIZE(lpass_audiocc_sc8280xp_resets),
+};
+
+static const struct qcom_reset_map lpasscc_sc8280xp_resets[] = {
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xc010, 1 },
+};
+
+static struct regmap_config lpasscc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-tcsr",
+ .max_register = 0x12000,
+};
+
+static const struct qcom_cc_desc lpasscc_sc8280xp_reset_desc = {
+ .config = &lpasscc_sc8280xp_regmap_config,
+ .resets = lpasscc_sc8280xp_resets,
+ .num_resets = ARRAY_SIZE(lpasscc_sc8280xp_resets),
+};
+
+static const struct of_device_id lpasscc_sc8280xp_match_table[] = {
+ {
+ .compatible = "qcom,sc8280xp-lpassaudiocc",
+ .data = &lpass_audiocc_sc8280xp_reset_desc,
+ }, {
+ .compatible = "qcom,sc8280xp-lpasscc",
+ .data = &lpasscc_sc8280xp_reset_desc,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpasscc_sc8280xp_match_table);
+
+static int lpasscc_sc8280xp_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc = of_device_get_match_data(&pdev->dev);
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static struct platform_driver lpasscc_sc8280xp_driver = {
+ .probe = lpasscc_sc8280xp_probe,
+ .driver = {
+ .name = "lpasscc-sc8280xp",
+ .of_match_table = lpasscc_sc8280xp_match_table,
+ },
+};
+
+module_platform_driver(lpasscc_sc8280xp_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("QTI LPASSCC SC8280XP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 4273fce9a4a4..82f6bad144a9 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -485,7 +485,7 @@ static struct clk_rcg2 mdp_clk_src = {
.name = "mdp_clk_src",
.parent_data = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
.num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_dsi_hdmi_gpll0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -2204,23 +2204,6 @@ static struct clk_branch ocmemcx_ocmemnoc_clk = {
},
};
-static struct clk_branch oxili_ocmemgx_clk = {
- .halt_reg = 0x402c,
- .clkr = {
- .enable_reg = 0x402c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "oxili_ocmemgx_clk",
- .parent_data = (const struct clk_parent_data[]){
- { .fw_name = "gfx3d_clk_src", .name = "gfx3d_clk_src" },
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch ocmemnoc_clk = {
.halt_reg = 0x50b4,
.clkr = {
@@ -2401,7 +2384,7 @@ static struct gdsc mdss_gdsc = {
.pd = {
.name = "mdss",
},
- .pwrsts = PWRSTS_RET_ON,
+ .pwrsts = PWRSTS_OFF_ON,
};
static struct gdsc camss_jpeg_gdsc = {
@@ -2512,7 +2495,6 @@ static struct clk_regmap *mmcc_msm8226_clocks[] = {
[MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
[MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
[OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
- [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
[OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
[OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
[OXILICX_AXI_CLK] = &oxilicx_axi_clk.clkr,
@@ -2670,7 +2652,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
[MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
[OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
[OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
- [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
[OCMEMNOC_CLK] = &ocmemnoc_clk.clkr,
[OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
[OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
new file mode 100644
index 000000000000..b148877fc73d
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8350-videocc.h>
+#include <dt-bindings/reset/qcom,sm8350-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x25,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_pll1_config = {
+ .l = 0x2b,
+ .alpha = 0xc000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll video_pll1 = {
+ .offset = 0x7d0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll1.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0xbd4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(720000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0xb94,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(840000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0xbb4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xef0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_SLEEP_CLK,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0xecc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0xd54,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0xc54,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
+ .reg = 0xdd4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0xcf4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0xd34,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xd34,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xd34,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0xc34,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc34,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_clk = {
+ .halt_reg = 0xdb4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdb4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xdb4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_div2_clk = {
+ .halt_reg = 0xdf4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdf4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xdf4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0xcd4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xcd4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0xf10,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf10,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvs0c_gdsc = {
+ .gdscr = 0xbf8,
+ .pd = {
+ .name = "mvs0c_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs1c_gdsc = {
+ .gdscr = 0xc98,
+ .pd = {
+ .name = "mvs1c_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0xd18,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs1_gdsc = {
+ .gdscr = 0xd98,
+ .pd = {
+ .name = "mvs1_gdsc",
+ },
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sm8350_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV2_CLK] = &video_cc_mvs1_div2_clk.clkr,
+ [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL1] = &video_pll1.clkr,
+};
+
+static const struct qcom_reset_map video_cc_sm8350_resets[] = {
+ [VIDEO_CC_CVP_INTERFACE_BCR] = { 0xe54 },
+ [VIDEO_CC_CVP_MVS0_BCR] = { 0xd14 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0xc34, 2 },
+ [VIDEO_CC_CVP_MVS0C_BCR] = { 0xbf4 },
+ [VIDEO_CC_CVP_MVS1_BCR] = { 0xd94 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0xcd4, 2 },
+ [VIDEO_CC_CVP_MVS1C_BCR] = { 0xc94 },
+};
+
+static struct gdsc *video_cc_sm8350_gdscs[] = {
+ [MVS0C_GDSC] = &mvs0c_gdsc,
+ [MVS1C_GDSC] = &mvs1c_gdsc,
+ [MVS0_GDSC] = &mvs0_gdsc,
+ [MVS1_GDSC] = &mvs1_gdsc,
+};
+
+static const struct regmap_config video_cc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc video_cc_sm8350_desc = {
+ .config = &video_cc_sm8350_regmap_config,
+ .clks = video_cc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8350_clocks),
+ .resets = video_cc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8350_resets),
+ .gdscs = video_cc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8350_gdscs),
+};
+
+static int video_cc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
+ clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
+
+ /*
+ * Keep clocks always enabled:
+ * video_cc_ahb_clk
+ * video_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8350_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static const struct of_device_id video_cc_sm8350_match_table[] = {
+ { .compatible = "qcom,sm8350-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8350_match_table);
+
+static struct platform_driver video_cc_sm8350_driver = {
+ .probe = video_cc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-videocc",
+ .of_match_table = video_cc_sm8350_match_table,
+ },
+};
+module_platform_driver(video_cc_sm8350_driver);
+
+MODULE_DESCRIPTION("QTI SM8350 VIDEOCC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
new file mode 100644
index 000000000000..7d0029b8b799
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+ P_VIDEO_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config video_cc_pll0_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x0044001e,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_cc_pll1_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x0044002b,
+ .alpha = 0xc000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll video_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll1.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(576000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80b8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x806c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
+ .reg = 0x80dc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0x8094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x80b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_clk = {
+ .halt_reg = 0x80d4,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x80d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x809c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc video_cc_mvs1c_gdsc = {
+ .gdscr = 0x8074,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs1_gdsc = {
+ .gdscr = 0x80c0,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs1c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *video_cc_sm8450_clocks[] = {
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_PLL1] = &video_cc_pll1.clkr,
+};
+
+static struct gdsc *video_cc_sm8450_gdscs[] = {
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS1C_GDSC] = &video_cc_mvs1c_gdsc,
+ [VIDEO_CC_MVS1_GDSC] = &video_cc_mvs1_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sm8450_resets[] = {
+ [CVP_VIDEO_CC_INTERFACE_BCR] = { 0x80e0 },
+ [CVP_VIDEO_CC_MVS0_BCR] = { 0x8098 },
+ [CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+ [CVP_VIDEO_CC_MVS1_BCR] = { 0x80bc },
+ [CVP_VIDEO_CC_MVS1C_BCR] = { 0x8070 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0x808c, 2 },
+};
+
+static const struct regmap_config video_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f4c,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc video_cc_sm8450_desc = {
+ .config = &video_cc_sm8450_regmap_config,
+ .clks = video_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8450_clocks),
+ .resets = video_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8450_resets),
+ .gdscs = video_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id video_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table);
+
+static int video_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm8450_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+
+ /*
+ * Keep clocks always enabled:
+ * video_cc_ahb_clk
+ * video_cc_sleep_clk
+ * video_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0x80e4, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x8130, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x8114, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8450_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver video_cc_sm8450_driver = {
+ .probe = video_cc_sm8450_probe,
+ .driver = {
+ .name = "video_cc-sm8450",
+ .of_match_table = video_cc_sm8450_match_table,
+ },
+};
+
+static int __init video_cc_sm8450_init(void)
+{
+ return platform_driver_register(&video_cc_sm8450_driver);
+}
+subsys_initcall(video_cc_sm8450_init);
+
+static void __exit video_cc_sm8450_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8450_driver);
+}
+module_exit(video_cc_sm8450_exit);
+
+MODULE_DESCRIPTION("QTI VIDEOCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
new file mode 100644
index 000000000000..e2400fe23e60
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+ P_VIDEO_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct alpha_pll_config video_cc_pll0_config = {
+ /* .l includes RINGOSC_CAL_L_VAL, CAL_L_VAL, L_VAL fields */
+ .l = 0x44440025,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_cc_pll1_config = {
+ /* .l includes RINGOSC_CAL_L_VAL, CAL_L_VAL, L_VAL fields */
+ .l = 0x44440036,
+ .alpha = 0xb000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll video_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll1.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
+ .reg = 0x80ec,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0x809c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x80b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_clk = {
+ .halt_reg = 0x80e0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x80e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x80a4,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc video_cc_mvs1c_gdsc = {
+ .gdscr = 0x8078,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs1_gdsc = {
+ .gdscr = 0x80cc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs1c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *video_cc_sm8550_clocks[] = {
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_PLL1] = &video_cc_pll1.clkr,
+};
+
+static struct gdsc *video_cc_sm8550_gdscs[] = {
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS1C_GDSC] = &video_cc_mvs1c_gdsc,
+ [VIDEO_CC_MVS1_GDSC] = &video_cc_mvs1_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sm8550_resets[] = {
+ [CVP_VIDEO_CC_INTERFACE_BCR] = { 0x80f0 },
+ [CVP_VIDEO_CC_MVS0_BCR] = { 0x80a0 },
+ [CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+ [CVP_VIDEO_CC_MVS1_BCR] = { 0x80c8 },
+ [CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0x8090, 2 },
+};
+
+static const struct regmap_config video_cc_sm8550_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f4c,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc video_cc_sm8550_desc = {
+ .config = &video_cc_sm8550_regmap_config,
+ .clks = video_cc_sm8550_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8550_clocks),
+ .resets = video_cc_sm8550_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8550_resets),
+ .gdscs = video_cc_sm8550_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8550_gdscs),
+};
+
+static const struct of_device_id video_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sm8550-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table);
+
+static int video_cc_sm8550_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm8550_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+
+ /*
+ * Keep clocks always enabled:
+ * video_cc_ahb_clk
+ * video_cc_sleep_clk
+ * video_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0x80f4, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x8140, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x8124, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver video_cc_sm8550_driver = {
+ .probe = video_cc_sm8550_probe,
+ .driver = {
+ .name = "video_cc-sm8550",
+ .of_match_table = video_cc_sm8550_match_table,
+ },
+};
+
+static int __init video_cc_sm8550_init(void)
+{
+ return platform_driver_register(&video_cc_sm8550_driver);
+}
+subsys_initcall(video_cc_sm8550_init);
+
+static void __exit video_cc_sm8550_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8550_driver);
+}
+module_exit(video_cc_sm8550_exit);
+
+MODULE_DESCRIPTION("QTI VIDEOCC SM8550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/tegra/clk-tegra-super-cclk.c b/drivers/clk/tegra/clk-tegra-super-cclk.c
index 68d7bcd5fc8a..3b22a4d0dffc 100644
--- a/drivers/clk/tegra/clk-tegra-super-cclk.c
+++ b/drivers/clk/tegra/clk-tegra-super-cclk.c
@@ -86,9 +86,18 @@ static int cclk_super_determine_rate(struct clk_hw *hw,
if (rate <= pllp_rate) {
if (super->flags & TEGRA20_SUPER_CLK)
rate = pllp_rate;
- else
- rate = tegra_clk_super_ops.round_rate(hw, rate,
- &pllp_rate);
+ else {
+ struct clk_rate_request parent = {
+ .rate = req->rate,
+ .best_parent_rate = pllp_rate,
+ };
+
+ clk_hw_get_rate_range(hw, &parent.min_rate,
+ &parent.max_rate);
+ tegra_clk_super_ops.determine_rate(hw, &parent);
+ pllp_rate = parent.best_parent_rate;
+ rate = parent.rate;
+ }
req->best_parent_rate = pllp_rate;
req->best_parent_hw = pllp_hw;
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 9af280735cba..7a8d402f05be 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -67,6 +67,7 @@ config COMEDI_TEST
config COMEDI_PARPORT
tristate "Parallel port support"
+ depends on HAS_IOPORT
help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
@@ -79,6 +80,7 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
+ depends on HAS_IOPORT
help
Enable support for SSV Embedded Systems DIL/Net-PC
@@ -89,6 +91,7 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
+ depends on ISA
help
Enable comedi ISA and PC/104 drivers to be built
@@ -100,7 +103,8 @@ if COMEDI_ISA_DRIVERS
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
@@ -161,8 +165,9 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -173,8 +178,9 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -183,8 +189,9 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -203,7 +210,7 @@ config COMEDI_PCM3724
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
- select COMEDI_AMPLC_DIO200
+ depends on COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards
@@ -255,7 +262,8 @@ config COMEDI_DAC02
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
@@ -265,7 +273,7 @@ config COMEDI_DAS16M1
config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support"
- select COMEDI_DAS08
+ depends on COMEDI_DAS08
help
Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards:
@@ -278,8 +286,9 @@ config COMEDI_DAS08_ISA
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for Keithley Metrabyte/ComputerBoards DAS16
@@ -296,7 +305,8 @@ config COMEDI_DAS16
config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802
@@ -308,8 +318,9 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
@@ -323,7 +334,8 @@ config COMEDI_DAS1800
config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles
@@ -402,7 +414,8 @@ config COMEDI_FL512
config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
@@ -456,8 +469,9 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
+ depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for National Instruments AT-A2150 cards
@@ -466,7 +480,8 @@ config COMEDI_NI_AT_A2150
config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for National Instruments AT-AO-6/10 cards
@@ -497,7 +512,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
- select COMEDI_NI_LABPC
+ depends on COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -561,7 +576,7 @@ endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
Enable support for comedi PCI drivers.
@@ -710,7 +725,8 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for ADlink PCI9111 cards
@@ -720,7 +736,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
- select COMEDI_8254
+ depends on COMEDI_8254
help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -729,7 +745,8 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731
@@ -773,7 +790,8 @@ config COMEDI_ADV_PCI1760
config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for Advantech PCI DIO cards
@@ -786,7 +804,7 @@ config COMEDI_ADV_PCI_DIO
config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
- select COMEDI_AMPLC_DIO200
+ depends on COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards.
@@ -814,7 +832,8 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Amplicon PCI224 and PCI234 AO boards
@@ -823,7 +842,8 @@ config COMEDI_AMPLC_PCI224
config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
@@ -842,7 +862,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
- select COMEDI_DAS08
+ depends on COMEDI_DAS08
help
Enable support for PCI DAS-08 cards.
@@ -929,7 +949,8 @@ config COMEDI_CB_PCIDAS64
config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
@@ -953,7 +974,8 @@ config COMEDI_CB_PCIDDA
config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI Migration
@@ -973,7 +995,8 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
@@ -1031,7 +1054,7 @@ config COMEDI_NI_670X
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
- select COMEDI_NI_LABPC
+ depends on COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC PCI-1200.
@@ -1053,6 +1076,7 @@ config COMEDI_NI_PCIDIO
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
depends on HAS_DMA
+ depends on HAS_IOPORT
select COMEDI_NI_TIOCMD
select COMEDI_8255
help
@@ -1074,7 +1098,8 @@ config COMEDI_NI_PCIMIO
config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for Real Time Devices PCI4520/DM7520
@@ -1114,7 +1139,8 @@ if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support"
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
@@ -1124,7 +1150,7 @@ config COMEDI_CB_DAS16_CS
config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support"
- select COMEDI_DAS08
+ depends on COMEDI_DAS08
help
Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card
@@ -1134,6 +1160,7 @@ config COMEDI_DAS08_CS
config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support"
+ depends on HAS_IOPORT
help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
@@ -1142,6 +1169,7 @@ config COMEDI_NI_DAQ_700_CS
config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support"
+ depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
@@ -1151,7 +1179,7 @@ config COMEDI_NI_DAQ_DIO24_CS
config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support"
- select COMEDI_NI_LABPC
+ depends on COMEDI_NI_LABPC
help
Enable support for the National Instruments PCMCIA DAQCard-1200
@@ -1160,6 +1188,7 @@ config COMEDI_NI_LABPC_CS
config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
+ depends on HAS_IOPORT
select COMEDI_NI_TIO
select COMEDI_8255
help
@@ -1172,6 +1201,7 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
+ depends on HAS_IOPORT
help
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
@@ -1248,12 +1278,14 @@ endif # COMEDI_USB_DRIVERS
config COMEDI_8254
tristate
+ depends on HAS_IOPORT
config COMEDI_8255
tristate
config COMEDI_8255_SA
tristate "Standalone 8255 support"
+ depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for 8255 digital I/O as a standalone driver.
@@ -1285,7 +1317,7 @@ config COMEDI_KCOMEDILIB
called kcomedilib.
config COMEDI_AMPLC_DIO200
- select COMEDI_8254
+ depends on COMEDI_8254
tristate
config COMEDI_AMPLC_PC236
@@ -1294,7 +1326,7 @@ config COMEDI_AMPLC_PC236
config COMEDI_DAS08
tristate
- select COMEDI_8254
+ depends on COMEDI_8254
select COMEDI_8255
config COMEDI_ISADMA
@@ -1302,7 +1334,8 @@ config COMEDI_ISADMA
config COMEDI_NI_LABPC
tristate
- select COMEDI_8254
+ depends on HAS_IOPORT
+ depends on COMEDI_8254
select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 8e43918d38c4..1548dea15df1 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -97,7 +97,6 @@ static DEFINE_MUTEX(comedi_subdevice_minor_table_lock);
static struct comedi_subdevice
*comedi_subdevice_minor_table[COMEDI_NUM_SUBDEVICE_MINORS];
-static struct class *comedi_class;
static struct cdev comedi_cdev;
static void comedi_device_init(struct comedi_device *dev)
@@ -187,18 +186,6 @@ static struct comedi_device *comedi_clear_board_minor(unsigned int minor)
return dev;
}
-static void comedi_free_board_dev(struct comedi_device *dev)
-{
- if (dev) {
- comedi_device_cleanup(dev);
- if (dev->class_dev) {
- device_destroy(comedi_class,
- MKDEV(COMEDI_MAJOR, dev->minor));
- }
- comedi_dev_put(dev);
- }
-}
-
static struct comedi_subdevice *
comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor)
{
@@ -611,6 +598,23 @@ static struct attribute *comedi_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(comedi_dev);
+static const struct class comedi_class = {
+ .name = "comedi",
+ .dev_groups = comedi_dev_groups,
+};
+
+static void comedi_free_board_dev(struct comedi_device *dev)
+{
+ if (dev) {
+ comedi_device_cleanup(dev);
+ if (dev->class_dev) {
+ device_destroy(&comedi_class,
+ MKDEV(COMEDI_MAJOR, dev->minor));
+ }
+ comedi_dev_put(dev);
+ }
+}
+
static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
unsigned int bits)
{
@@ -3263,7 +3267,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
return ERR_PTR(-EBUSY);
}
dev->minor = i;
- csdev = device_create(comedi_class, hardware_device,
+ csdev = device_create(&comedi_class, hardware_device,
MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i);
if (!IS_ERR(csdev))
dev->class_dev = get_device(csdev);
@@ -3312,7 +3316,7 @@ int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
}
i += COMEDI_NUM_BOARD_MINORS;
s->minor = i;
- csdev = device_create(comedi_class, dev->class_dev,
+ csdev = device_create(&comedi_class, dev->class_dev,
MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i",
dev->minor, s->index);
if (!IS_ERR(csdev))
@@ -3337,7 +3341,7 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
comedi_subdevice_minor_table[i] = NULL;
mutex_unlock(&comedi_subdevice_minor_table_lock);
if (s->class_dev) {
- device_destroy(comedi_class, MKDEV(COMEDI_MAJOR, s->minor));
+ device_destroy(&comedi_class, MKDEV(COMEDI_MAJOR, s->minor));
s->class_dev = NULL;
}
}
@@ -3383,15 +3387,12 @@ static int __init comedi_init(void)
if (retval)
goto out_unregister_chrdev_region;
- comedi_class = class_create("comedi");
- if (IS_ERR(comedi_class)) {
- retval = PTR_ERR(comedi_class);
+ retval = class_register(&comedi_class);
+ if (retval) {
pr_err("failed to create class\n");
goto out_cdev_del;
}
- comedi_class->dev_groups = comedi_dev_groups;
-
/* create devices files for legacy/manual use */
for (i = 0; i < comedi_num_legacy_minors; i++) {
struct comedi_device *dev;
@@ -3413,7 +3414,7 @@ static int __init comedi_init(void)
out_cleanup_board_minors:
comedi_cleanup_board_minors();
- class_destroy(comedi_class);
+ class_unregister(&comedi_class);
out_cdev_del:
cdev_del(&comedi_cdev);
out_unregister_chrdev_region:
@@ -3425,7 +3426,7 @@ module_init(comedi_init);
static void __exit comedi_cleanup(void)
{
comedi_cleanup_board_minors();
- class_destroy(comedi_class);
+ class_unregister(&comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index c02dc19a679b..30ea8b53ebf8 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -60,7 +60,9 @@
static bool config_mode;
static unsigned int set_amplitude;
static unsigned int set_period;
-static struct class *ctcls;
+static const struct class ctcls = {
+ .name = CLASS_NAME,
+};
static struct device *ctdev;
module_param_named(noauto, config_mode, bool, 0444);
@@ -795,13 +797,13 @@ static int __init comedi_test_init(void)
}
if (!config_mode) {
- ctcls = class_create(CLASS_NAME);
- if (IS_ERR(ctcls)) {
+ ret = class_register(&ctcls);
+ if (ret) {
pr_warn("comedi_test: unable to create class\n");
goto clean3;
}
- ctdev = device_create(ctcls, NULL, MKDEV(0, 0), NULL, DEV_NAME);
+ ctdev = device_create(&ctcls, NULL, MKDEV(0, 0), NULL, DEV_NAME);
if (IS_ERR(ctdev)) {
pr_warn("comedi_test: unable to create device\n");
goto clean2;
@@ -817,13 +819,10 @@ static int __init comedi_test_init(void)
return 0;
clean:
- device_destroy(ctcls, MKDEV(0, 0));
+ device_destroy(&ctcls, MKDEV(0, 0));
clean2:
- class_destroy(ctcls);
- ctdev = NULL;
+ class_unregister(&ctcls);
clean3:
- ctcls = NULL;
-
return 0;
}
module_init(comedi_test_init);
@@ -833,9 +832,9 @@ static void __exit comedi_test_exit(void)
if (ctdev)
comedi_auto_unconfig(ctdev);
- if (ctcls) {
- device_destroy(ctcls, MKDEV(0, 0));
- class_destroy(ctcls);
+ if (class_is_registered(&ctcls)) {
+ device_destroy(&ctcls, MKDEV(0, 0));
+ class_unregister(&ctcls);
}
comedi_driver_unregister(&waveform_driver);
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index d9cb937665cf..ed1f57511955 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -5,10 +5,11 @@
*
* This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
*/
-#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/counter.h>
#include <linux/device.h>
-#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
@@ -17,8 +18,11 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/types.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
#define QUAD8_EXTENT 32
@@ -34,118 +38,196 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
#define QUAD8_NUM_COUNTERS 8
-/**
- * struct channel_reg - channel register structure
- * @data: Count data
- * @control: Channel flags and control
- */
-struct channel_reg {
- u8 data;
- u8 control;
-};
-
-/**
- * struct quad8_reg - device register structure
- * @channel: quadrature counter data and control
- * @interrupt_status: channel interrupt status
- * @channel_oper: enable/reset counters and interrupt functions
- * @index_interrupt: enable channel interrupts
- * @reserved: reserved for Factory Use
- * @index_input_levels: index signal logical input level
- * @cable_status: differential encoder cable status
- */
-struct quad8_reg {
- struct channel_reg channel[QUAD8_NUM_COUNTERS];
- u8 interrupt_status;
- u8 channel_oper;
- u8 index_interrupt;
- u8 reserved[3];
- u8 index_input_levels;
- u8 cable_status;
-};
+#define QUAD8_DATA(_channel) ((_channel) * 2)
+#define QUAD8_CONTROL(_channel) (QUAD8_DATA(_channel) + 1)
+#define QUAD8_INTERRUPT_STATUS 0x10
+#define QUAD8_CHANNEL_OPERATION 0x11
+#define QUAD8_INDEX_INTERRUPT 0x12
+#define QUAD8_INDEX_INPUT_LEVELS 0x16
+#define QUAD8_CABLE_STATUS 0x17
/**
* struct quad8 - device private data structure
* @lock: lock to prevent clobbering device states during R/W ops
- * @counter: instance of the counter_device
+ * @cmr: array of Counter Mode Register states
+ * @ior: array of Input / Output Control Register states
+ * @idr: array of Index Control Register states
* @fck_prescaler: array of filter clock prescaler configurations
* @preset: array of preset values
- * @count_mode: array of count mode configurations
- * @quadrature_mode: array of quadrature mode configurations
- * @quadrature_scale: array of quadrature mode scale configurations
- * @ab_enable: array of A and B inputs enable configurations
- * @preset_enable: array of set_to_preset_on_index attribute configurations
- * @irq_trigger: array of current IRQ trigger function configurations
- * @synchronous_mode: array of index function synchronous mode configurations
- * @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
- * @reg: I/O address offset for the device registers
+ * @map: regmap for the device
*/
struct quad8 {
spinlock_t lock;
+ u8 cmr[QUAD8_NUM_COUNTERS];
+ u8 ior[QUAD8_NUM_COUNTERS];
+ u8 idr[QUAD8_NUM_COUNTERS];
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
- unsigned int count_mode[QUAD8_NUM_COUNTERS];
- unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
- unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
- unsigned int ab_enable[QUAD8_NUM_COUNTERS];
- unsigned int preset_enable[QUAD8_NUM_COUNTERS];
- unsigned int irq_trigger[QUAD8_NUM_COUNTERS];
- unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
- unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
- struct quad8_reg __iomem *reg;
+ struct regmap *map;
+};
+
+static const struct regmap_range quad8_wr_ranges[] = {
+ regmap_reg_range(0x0, 0xF), regmap_reg_range(0x11, 0x12), regmap_reg_range(0x17, 0x17),
+};
+static const struct regmap_range quad8_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x12), regmap_reg_range(0x16, 0x18),
+};
+static const struct regmap_access_table quad8_wr_table = {
+ .yes_ranges = quad8_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(quad8_wr_ranges),
+};
+static const struct regmap_access_table quad8_rd_table = {
+ .yes_ranges = quad8_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(quad8_rd_ranges),
+};
+static const struct regmap_config quad8_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &quad8_wr_table,
+ .rd_table = &quad8_rd_table,
};
/* Error flag */
-#define QUAD8_FLAG_E BIT(4)
+#define FLAG_E BIT(4)
/* Up/Down flag */
-#define QUAD8_FLAG_UD BIT(5)
+#define FLAG_UD BIT(5)
+/* Counting up */
+#define UP 0x1
+
+#define REGISTER_SELECTION GENMASK(6, 5)
+
/* Reset and Load Signal Decoders */
-#define QUAD8_CTR_RLD 0x00
+#define SELECT_RLD u8_encode_bits(0x0, REGISTER_SELECTION)
/* Counter Mode Register */
-#define QUAD8_CTR_CMR 0x20
+#define SELECT_CMR u8_encode_bits(0x1, REGISTER_SELECTION)
/* Input / Output Control Register */
-#define QUAD8_CTR_IOR 0x40
+#define SELECT_IOR u8_encode_bits(0x2, REGISTER_SELECTION)
/* Index Control Register */
-#define QUAD8_CTR_IDR 0x60
+#define SELECT_IDR u8_encode_bits(0x3, REGISTER_SELECTION)
+
+/*
+ * Reset and Load Signal Decoders
+ */
+#define RESETS GENMASK(2, 1)
+#define LOADS GENMASK(4, 3)
/* Reset Byte Pointer (three byte data pointer) */
-#define QUAD8_RLD_RESET_BP 0x01
-/* Reset Counter */
-#define QUAD8_RLD_RESET_CNTR 0x02
-/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
-#define QUAD8_RLD_RESET_FLAGS 0x04
+#define RESET_BP BIT(0)
+/* Reset Borrow Toggle, Carry toggle, Compare toggle, Sign, and Index flags */
+#define RESET_BT_CT_CPT_S_IDX u8_encode_bits(0x2, RESETS)
/* Reset Error flag */
-#define QUAD8_RLD_RESET_E 0x06
+#define RESET_E u8_encode_bits(0x3, RESETS)
/* Preset Register to Counter */
-#define QUAD8_RLD_PRESET_CNTR 0x08
+#define TRANSFER_PR_TO_CNTR u8_encode_bits(0x1, LOADS)
/* Transfer Counter to Output Latch */
-#define QUAD8_RLD_CNTR_OUT 0x10
+#define TRANSFER_CNTR_TO_OL u8_encode_bits(0x2, LOADS)
/* Transfer Preset Register LSB to FCK Prescaler */
-#define QUAD8_RLD_PRESET_PSC 0x18
-#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
-#define QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC 0x04
-#define QUAD8_CMR_QUADRATURE_X1 0x08
-#define QUAD8_CMR_QUADRATURE_X2 0x10
-#define QUAD8_CMR_QUADRATURE_X4 0x18
+#define TRANSFER_PR0_TO_PSC u8_encode_bits(0x3, LOADS)
+
+/*
+ * Counter Mode Registers
+ */
+#define COUNT_ENCODING BIT(0)
+#define COUNT_MODE GENMASK(2, 1)
+#define QUADRATURE_MODE GENMASK(4, 3)
+/* Binary count */
+#define BINARY u8_encode_bits(0x0, COUNT_ENCODING)
+/* Normal count */
+#define NORMAL_COUNT 0x0
+/* Range Limit */
+#define RANGE_LIMIT 0x1
+/* Non-recycle count */
+#define NON_RECYCLE_COUNT 0x2
+/* Modulo-N */
+#define MODULO_N 0x3
+/* Non-quadrature */
+#define NON_QUADRATURE 0x0
+/* Quadrature X1 */
+#define QUADRATURE_X1 0x1
+/* Quadrature X2 */
+#define QUADRATURE_X2 0x2
+/* Quadrature X4 */
+#define QUADRATURE_X4 0x3
+
+/*
+ * Input/Output Control Register
+ */
+#define AB_GATE BIT(0)
+#define LOAD_PIN BIT(1)
+#define FLG_PINS GENMASK(4, 3)
+/* Disable inputs A and B */
+#define DISABLE_AB u8_encode_bits(0x0, AB_GATE)
+/* Load Counter input */
+#define LOAD_CNTR 0x0
+/* FLG1 = CARRY(active low); FLG2 = BORROW(active low) */
+#define FLG1_CARRY_FLG2_BORROW 0x0
+/* FLG1 = COMPARE(active low); FLG2 = BORROW(active low) */
+#define FLG1_COMPARE_FLG2_BORROW 0x1
+/* FLG1 = Carry(active low)/Borrow(active low); FLG2 = U/D(active low) flag */
+#define FLG1_CARRYBORROW_FLG2_UD 0x2
+/* FLG1 = INDX (low pulse at INDEX pin active level); FLG2 = E flag */
+#define FLG1_INDX_FLG2_E 0x3
+
+/*
+ * INDEX CONTROL REGISTERS
+ */
+#define INDEX_MODE BIT(0)
+#define INDEX_POLARITY BIT(1)
+/* Disable Index mode */
+#define DISABLE_INDEX_MODE 0x0
+/* Enable Index mode */
+#define ENABLE_INDEX_MODE 0x1
+/* Negative Index Polarity */
+#define NEGATIVE_INDEX_POLARITY 0x0
+/* Positive Index Polarity */
+#define POSITIVE_INDEX_POLARITY 0x1
+
+/*
+ * Channel Operation Register
+ */
+#define COUNTERS_OPERATION BIT(0)
+#define INTERRUPT_FUNCTION BIT(2)
+/* Enable all Counters */
+#define ENABLE_COUNTERS u8_encode_bits(0x0, COUNTERS_OPERATION)
+/* Reset all Counters */
+#define RESET_COUNTERS u8_encode_bits(0x1, COUNTERS_OPERATION)
+/* Disable the interrupt function */
+#define DISABLE_INTERRUPT_FUNCTION u8_encode_bits(0x0, INTERRUPT_FUNCTION)
+/* Enable the interrupt function */
+#define ENABLE_INTERRUPT_FUNCTION u8_encode_bits(0x1, INTERRUPT_FUNCTION)
+/* Any write to the Channel Operation register clears any pending interrupts */
+#define CLEAR_PENDING_INTERRUPTS (ENABLE_COUNTERS | ENABLE_INTERRUPT_FUNCTION)
/* Each Counter is 24 bits wide */
#define LS7267_CNTR_MAX GENMASK(23, 0)
+static __always_inline int quad8_control_register_update(struct regmap *const map, u8 *const buf,
+ const size_t channel, const u8 val,
+ const u8 field)
+{
+ u8p_replace_bits(&buf[channel], val, field);
+ return regmap_write(map, QUAD8_CONTROL(channel), buf[channel]);
+}
+
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
const struct quad8 *const priv = counter_priv(counter);
- unsigned int state;
+ int ret;
/* Only Index signal levels can be read */
if (signal->id < 16)
return -EINVAL;
- state = ioread8(&priv->reg->index_input_levels) & BIT(signal->id - 16);
+ ret = regmap_test_bits(priv->map, QUAD8_INDEX_INPUT_LEVELS, BIT(signal->id - 16));
+ if (ret < 0)
+ return ret;
- *level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
+ *level = (ret) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
@@ -154,65 +236,81 @@ static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct quad8 *const priv = counter_priv(counter);
- struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned long irqflags;
- int i;
-
- *val = 0;
+ u8 value[3];
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
- /* Reset Byte Pointer; transfer Counter to Output Latch */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
- &chan->control);
-
- for (i = 0; i < 3; i++)
- *val |= (unsigned long)ioread8(&chan->data) << (8 * i);
+ ret = regmap_write(priv->map, QUAD8_CONTROL(count->id),
+ SELECT_RLD | RESET_BP | TRANSFER_CNTR_TO_OL);
+ if (ret)
+ goto exit_unlock;
+ ret = regmap_noinc_read(priv->map, QUAD8_DATA(count->id), value, sizeof(value));
+exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ *val = get_unaligned_le24(value);
+
+ return ret;
+}
+
+static int quad8_preset_register_set(struct quad8 *const priv, const size_t id,
+ const unsigned long preset)
+{
+ u8 value[3];
+ int ret;
+
+ put_unaligned_le24(preset, value);
+
+ ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
+ if (ret)
+ return ret;
+ return regmap_noinc_write(priv->map, QUAD8_DATA(id), value, sizeof(value));
+}
+
+static int quad8_flag_register_reset(struct quad8 *const priv, const size_t id)
+{
+ int ret;
+
+ ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BT_CT_CPT_S_IDX);
+ if (ret)
+ return ret;
+ return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_E);
}
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
struct quad8 *const priv = counter_priv(counter);
- struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned long irqflags;
- int i;
+ int ret;
if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
-
/* Counter can only be set via Preset Register */
- for (i = 0; i < 3; i++)
- iowrite8(val >> (8 * i), &chan->data);
+ ret = quad8_preset_register_set(priv, count->id, val);
+ if (ret)
+ goto exit_unlock;
+ ret = regmap_write(priv->map, QUAD8_CONTROL(count->id), SELECT_RLD | TRANSFER_PR_TO_CNTR);
+ if (ret)
+ goto exit_unlock;
- /* Transfer Preset Register to Counter */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, &chan->control);
-
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
+ ret = quad8_flag_register_reset(priv, count->id);
+ if (ret)
+ goto exit_unlock;
/* Set Preset Register back to original value */
- val = priv->preset[count->id];
- for (i = 0; i < 3; i++)
- iowrite8(val >> (8 * i), &chan->data);
-
- /* Reset Borrow, Carry, Compare, and Sign flags */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
- /* Reset Error flag */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
+ ret = quad8_preset_register_set(priv, count->id, priv->preset[count->id]);
+exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static const enum counter_function quad8_count_functions_list[] = {
@@ -225,19 +323,17 @@ static const enum counter_function quad8_count_functions_list[] = {
static int quad8_function_get(const struct quad8 *const priv, const size_t id,
enum counter_function *const function)
{
- if (!priv->quadrature_mode[id]) {
+ switch (u8_get_bits(priv->cmr[id], QUADRATURE_MODE)) {
+ case NON_QUADRATURE:
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
return 0;
- }
-
- switch (priv->quadrature_scale[id]) {
- case 0:
+ case QUADRATURE_X1:
*function = COUNTER_FUNCTION_QUADRATURE_X1_A;
return 0;
- case 1:
+ case QUADRATURE_X2:
*function = COUNTER_FUNCTION_QUADRATURE_X2_A;
return 0;
- case 2:
+ case QUADRATURE_X4:
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
default:
@@ -269,60 +365,46 @@ static int quad8_function_write(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
- unsigned int *const quadrature_mode = priv->quadrature_mode + id;
- unsigned int *const scale = priv->quadrature_scale + id;
- unsigned int *const synchronous_mode = priv->synchronous_mode + id;
- u8 __iomem *const control = &priv->reg->channel[id].control;
unsigned long irqflags;
unsigned int mode_cfg;
- unsigned int idr_cfg;
+ bool synchronous_mode;
+ int ret;
- spin_lock_irqsave(&priv->lock, irqflags);
-
- mode_cfg = priv->count_mode[id] << 1;
- idr_cfg = priv->index_polarity[id] << 1;
-
- if (function == COUNTER_FUNCTION_PULSE_DIRECTION) {
- *quadrature_mode = 0;
-
- /* Quadrature scaling only available in quadrature mode */
- *scale = 0;
+ switch (function) {
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
+ mode_cfg = NON_QUADRATURE;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X1_A:
+ mode_cfg = QUADRATURE_X1;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
+ mode_cfg = QUADRATURE_X2;
+ break;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ mode_cfg = QUADRATURE_X4;
+ break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
- /* Synchronous function not supported in non-quadrature mode */
- if (*synchronous_mode) {
- *synchronous_mode = 0;
- /* Disable synchronous function mode */
- iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
- }
- } else {
- *quadrature_mode = 1;
+ spin_lock_irqsave(&priv->lock, irqflags);
- switch (function) {
- case COUNTER_FUNCTION_QUADRATURE_X1_A:
- *scale = 0;
- mode_cfg |= QUAD8_CMR_QUADRATURE_X1;
- break;
- case COUNTER_FUNCTION_QUADRATURE_X2_A:
- *scale = 1;
- mode_cfg |= QUAD8_CMR_QUADRATURE_X2;
- break;
- case COUNTER_FUNCTION_QUADRATURE_X4:
- *scale = 2;
- mode_cfg |= QUAD8_CMR_QUADRATURE_X4;
- break;
- default:
- /* should never reach this path */
- spin_unlock_irqrestore(&priv->lock, irqflags);
- return -EINVAL;
- }
+ /* Synchronous function not supported in non-quadrature mode */
+ synchronous_mode = u8_get_bits(priv->idr[id], INDEX_MODE) == ENABLE_INDEX_MODE;
+ if (synchronous_mode && mode_cfg == NON_QUADRATURE) {
+ ret = quad8_control_register_update(priv->map, priv->idr, id, DISABLE_INDEX_MODE,
+ INDEX_MODE);
+ if (ret)
+ goto exit_unlock;
}
- /* Load mode configuration to Counter Mode Register */
- iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
+ ret = quad8_control_register_update(priv->map, priv->cmr, id, mode_cfg, QUADRATURE_MODE);
+exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_direction_read(struct counter_device *counter,
@@ -330,13 +412,13 @@ static int quad8_direction_read(struct counter_device *counter,
enum counter_count_direction *direction)
{
const struct quad8 *const priv = counter_priv(counter);
- unsigned int ud_flag;
- u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
-
- /* U/D flag: nonzero = up, zero = down */
- ud_flag = ioread8(flag_addr) & QUAD8_FLAG_UD;
+ unsigned int flag;
+ int ret;
- *direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
+ ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
+ if (ret)
+ return ret;
+ *direction = (u8_get_bits(flag, FLAG_UD) == UP) ? COUNTER_COUNT_DIRECTION_FORWARD :
COUNTER_COUNT_DIRECTION_BACKWARD;
return 0;
@@ -366,13 +448,13 @@ static int quad8_action_read(struct counter_device *counter,
const size_t signal_a_id = count->synapses[0].signal->id;
enum counter_count_direction direction;
+ /* Default action mode */
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+
/* Handle Index signals */
if (synapse->signal->id >= 16) {
- if (!priv->preset_enable[count->id])
+ if (u8_get_bits(priv->ior[count->id], LOAD_PIN) == LOAD_CNTR)
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
- else
- *action = COUNTER_SYNAPSE_ACTION_NONE;
-
return 0;
}
@@ -392,9 +474,6 @@ static int quad8_action_read(struct counter_device *counter,
spin_unlock_irqrestore(&priv->lock, irqflags);
- /* Default action mode */
- *action = COUNTER_SYNAPSE_ACTION_NONE;
-
/* Determine action mode based on current count function mode */
switch (function) {
case COUNTER_FUNCTION_PULSE_DIRECTION:
@@ -422,67 +501,57 @@ static int quad8_action_read(struct counter_device *counter,
}
}
-enum {
- QUAD8_EVENT_CARRY = 0,
- QUAD8_EVENT_COMPARE = 1,
- QUAD8_EVENT_CARRY_BORROW = 2,
- QUAD8_EVENT_INDEX = 3,
-};
-
static int quad8_events_configure(struct counter_device *counter)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irq_enabled = 0;
unsigned long irqflags;
struct counter_event_node *event_node;
- unsigned int next_irq_trigger;
- unsigned long ior_cfg;
+ u8 flg_pins;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
list_for_each_entry(event_node, &counter->events_list, l) {
switch (event_node->event) {
case COUNTER_EVENT_OVERFLOW:
- next_irq_trigger = QUAD8_EVENT_CARRY;
+ flg_pins = FLG1_CARRY_FLG2_BORROW;
break;
case COUNTER_EVENT_THRESHOLD:
- next_irq_trigger = QUAD8_EVENT_COMPARE;
+ flg_pins = FLG1_COMPARE_FLG2_BORROW;
break;
case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
- next_irq_trigger = QUAD8_EVENT_CARRY_BORROW;
+ flg_pins = FLG1_CARRYBORROW_FLG2_UD;
break;
case COUNTER_EVENT_INDEX:
- next_irq_trigger = QUAD8_EVENT_INDEX;
+ flg_pins = FLG1_INDX_FLG2_E;
break;
default:
/* should never reach this path */
- spin_unlock_irqrestore(&priv->lock, irqflags);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_unlock;
}
/* Enable IRQ line */
irq_enabled |= BIT(event_node->channel);
/* Skip configuration if it is the same as previously set */
- if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
+ if (flg_pins == u8_get_bits(priv->ior[event_node->channel], FLG_PINS))
continue;
/* Save new IRQ function configuration */
- priv->irq_trigger[event_node->channel] = next_irq_trigger;
-
- /* Load configuration to I/O Control Register */
- ior_cfg = priv->ab_enable[event_node->channel] |
- priv->preset_enable[event_node->channel] << 1 |
- priv->irq_trigger[event_node->channel] << 3;
- iowrite8(QUAD8_CTR_IOR | ior_cfg,
- &priv->reg->channel[event_node->channel].control);
+ ret = quad8_control_register_update(priv->map, priv->ior, event_node->channel,
+ flg_pins, FLG_PINS);
+ if (ret)
+ goto exit_unlock;
}
- iowrite8(irq_enabled, &priv->reg->index_interrupt);
+ ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, irq_enabled);
+exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_watch_validate(struct counter_device *counter,
@@ -531,7 +600,7 @@ static int quad8_index_polarity_get(struct counter_device *counter,
const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- *index_polarity = priv->index_polarity[channel_id];
+ *index_polarity = u8_get_bits(priv->idr[channel_id], INDEX_POLARITY);
return 0;
}
@@ -542,22 +611,17 @@ static int quad8_index_polarity_set(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- u8 __iomem *const control = &priv->reg->channel[channel_id].control;
unsigned long irqflags;
- unsigned int idr_cfg = index_polarity << 1;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
- idr_cfg |= priv->synchronous_mode[channel_id];
-
- priv->index_polarity[channel_id] = index_polarity;
-
- /* Load Index Control configuration to Index Control Register */
- iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
+ ret = quad8_control_register_update(priv->map, priv->idr, channel_id, index_polarity,
+ INDEX_POLARITY);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_polarity_read(struct counter_device *counter,
@@ -571,7 +635,7 @@ static int quad8_polarity_read(struct counter_device *counter,
if (err)
return err;
- *polarity = (index_polarity) ? COUNTER_SIGNAL_POLARITY_POSITIVE :
+ *polarity = (index_polarity == POSITIVE_INDEX_POLARITY) ? COUNTER_SIGNAL_POLARITY_POSITIVE :
COUNTER_SIGNAL_POLARITY_NEGATIVE;
return 0;
@@ -581,7 +645,8 @@ static int quad8_polarity_write(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_polarity polarity)
{
- const u32 pol = (polarity == COUNTER_SIGNAL_POLARITY_POSITIVE) ? 1 : 0;
+ const u32 pol = (polarity == COUNTER_SIGNAL_POLARITY_POSITIVE) ? POSITIVE_INDEX_POLARITY :
+ NEGATIVE_INDEX_POLARITY;
return quad8_index_polarity_set(counter, signal, pol);
}
@@ -598,7 +663,7 @@ static int quad8_synchronous_mode_get(struct counter_device *counter,
const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- *synchronous_mode = priv->synchronous_mode[channel_id];
+ *synchronous_mode = u8_get_bits(priv->idr[channel_id], INDEX_MODE);
return 0;
}
@@ -609,28 +674,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- u8 __iomem *const control = &priv->reg->channel[channel_id].control;
+ u8 quadrature_mode;
unsigned long irqflags;
- unsigned int idr_cfg = synchronous_mode;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
- idr_cfg |= priv->index_polarity[channel_id] << 1;
-
/* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
- spin_unlock_irqrestore(&priv->lock, irqflags);
- return -EINVAL;
+ quadrature_mode = u8_get_bits(priv->idr[channel_id], QUADRATURE_MODE);
+ if (synchronous_mode && quadrature_mode == NON_QUADRATURE) {
+ ret = -EINVAL;
+ goto exit_unlock;
}
- priv->synchronous_mode[channel_id] = synchronous_mode;
-
- /* Load Index Control configuration to Index Control Register */
- iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
+ ret = quad8_control_register_update(priv->map, priv->idr, channel_id, synchronous_mode,
+ INDEX_MODE);
+exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_count_floor_read(struct counter_device *counter,
@@ -648,18 +711,17 @@ static int quad8_count_mode_read(struct counter_device *counter,
{
const struct quad8 *const priv = counter_priv(counter);
- /* Map 104-QUAD-8 count mode to Generic Counter count mode */
- switch (priv->count_mode[count->id]) {
- case 0:
+ switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
+ case NORMAL_COUNT:
*cnt_mode = COUNTER_COUNT_MODE_NORMAL;
break;
- case 1:
+ case RANGE_LIMIT:
*cnt_mode = COUNTER_COUNT_MODE_RANGE_LIMIT;
break;
- case 2:
+ case NON_RECYCLE_COUNT:
*cnt_mode = COUNTER_COUNT_MODE_NON_RECYCLE;
break;
- case 3:
+ case MODULO_N:
*cnt_mode = COUNTER_COUNT_MODE_MODULO_N;
break;
}
@@ -673,23 +735,21 @@ static int quad8_count_mode_write(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
unsigned int count_mode;
- unsigned int mode_cfg;
- u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
+ int ret;
- /* Map Generic Counter count mode to 104-QUAD-8 count mode */
switch (cnt_mode) {
case COUNTER_COUNT_MODE_NORMAL:
- count_mode = 0;
+ count_mode = NORMAL_COUNT;
break;
case COUNTER_COUNT_MODE_RANGE_LIMIT:
- count_mode = 1;
+ count_mode = RANGE_LIMIT;
break;
case COUNTER_COUNT_MODE_NON_RECYCLE:
- count_mode = 2;
+ count_mode = NON_RECYCLE_COUNT;
break;
case COUNTER_COUNT_MODE_MODULO_N:
- count_mode = 3;
+ count_mode = MODULO_N;
break;
default:
/* should never reach this path */
@@ -698,21 +758,12 @@ static int quad8_count_mode_write(struct counter_device *counter,
spin_lock_irqsave(&priv->lock, irqflags);
- priv->count_mode[count->id] = count_mode;
-
- /* Set count mode configuration value */
- mode_cfg = count_mode << 1;
-
- /* Add quadrature mode configuration */
- if (priv->quadrature_mode[count->id])
- mode_cfg |= (priv->quadrature_scale[count->id] + 1) << 3;
-
- /* Load mode configuration to Counter Mode Register */
- iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
+ ret = quad8_control_register_update(priv->map, priv->cmr, count->id, count_mode,
+ COUNT_MODE);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_count_enable_read(struct counter_device *counter,
@@ -720,7 +771,7 @@ static int quad8_count_enable_read(struct counter_device *counter,
{
const struct quad8 *const priv = counter_priv(counter);
- *enable = priv->ab_enable[count->id];
+ *enable = u8_get_bits(priv->ior[count->id], AB_GATE);
return 0;
}
@@ -729,23 +780,16 @@ static int quad8_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct quad8 *const priv = counter_priv(counter);
- u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
- unsigned int ior_cfg;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
- priv->ab_enable[count->id] = enable;
-
- ior_cfg = enable | priv->preset_enable[count->id] << 1 |
- priv->irq_trigger[count->id] << 3;
-
- /* Load I/O control configuration */
- iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
+ ret = quad8_control_register_update(priv->map, priv->ior, count->id, enable, AB_GATE);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static const char *const quad8_noise_error_states[] = {
@@ -757,9 +801,13 @@ static int quad8_error_noise_get(struct counter_device *counter,
struct counter_count *count, u32 *noise_error)
{
const struct quad8 *const priv = counter_priv(counter);
- u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
+ unsigned int flag;
+ int ret;
- *noise_error = !!(ioread8(flag_addr) & QUAD8_FLAG_E);
+ ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
+ if (ret)
+ return ret;
+ *noise_error = u8_get_bits(flag, FLAG_E);
return 0;
}
@@ -774,38 +822,24 @@ static int quad8_count_preset_read(struct counter_device *counter,
return 0;
}
-static void quad8_preset_register_set(struct quad8 *const priv, const int id,
- const unsigned int preset)
-{
- struct channel_reg __iomem *const chan = priv->reg->channel + id;
- int i;
-
- priv->preset[id] = preset;
-
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
-
- /* Set Preset Register */
- for (i = 0; i < 3; i++)
- iowrite8(preset >> (8 * i), &chan->data);
-}
-
static int quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, u64 preset)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
+ int ret;
if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
- quad8_preset_register_set(priv, count->id, preset);
+ priv->preset[count->id] = preset;
+ ret = quad8_preset_register_set(priv, count->id, preset);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_count_ceiling_read(struct counter_device *counter,
@@ -817,9 +851,9 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
- switch (priv->count_mode[count->id]) {
- case 1:
- case 3:
+ switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
+ case RANGE_LIMIT:
+ case MODULO_N:
*ceiling = priv->preset[count->id];
break;
default:
@@ -837,6 +871,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
+ int ret;
if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
@@ -844,17 +879,20 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
- switch (priv->count_mode[count->id]) {
- case 1:
- case 3:
- quad8_preset_register_set(priv, count->id, ceiling);
- spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
+ case RANGE_LIMIT:
+ case MODULO_N:
+ priv->preset[count->id] = ceiling;
+ ret = quad8_preset_register_set(priv, count->id, ceiling);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
spin_unlock_irqrestore(&priv->lock, irqflags);
- return -EINVAL;
+ return ret;
}
static int quad8_count_preset_enable_read(struct counter_device *counter,
@@ -863,7 +901,8 @@ static int quad8_count_preset_enable_read(struct counter_device *counter,
{
const struct quad8 *const priv = counter_priv(counter);
- *preset_enable = !priv->preset_enable[count->id];
+ /* Preset enable is active low in Input/Output Control register */
+ *preset_enable = !u8_get_bits(priv->ior[count->id], LOAD_PIN);
return 0;
}
@@ -873,26 +912,18 @@ static int quad8_count_preset_enable_write(struct counter_device *counter,
u8 preset_enable)
{
struct quad8 *const priv = counter_priv(counter);
- u8 __iomem *const control = &priv->reg->channel[count->id].control;
unsigned long irqflags;
- unsigned int ior_cfg;
-
- /* Preset enable is active low in Input/Output Control register */
- preset_enable = !preset_enable;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
- priv->preset_enable[count->id] = preset_enable;
-
- ior_cfg = priv->ab_enable[count->id] | preset_enable << 1 |
- priv->irq_trigger[count->id] << 3;
-
- /* Load I/O control configuration to Input / Output Control Register */
- iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
+ /* Preset enable is active low in Input/Output Control register */
+ ret = quad8_control_register_update(priv->map, priv->ior, count->id, !preset_enable,
+ LOAD_PIN);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_signal_cable_fault_read(struct counter_device *counter,
@@ -903,7 +934,7 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter,
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
bool disabled;
- unsigned int status;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -914,13 +945,16 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter,
return -EINVAL;
}
- /* Logic 0 = cable fault */
- status = ioread8(&priv->reg->cable_status);
+ ret = regmap_test_bits(priv->map, QUAD8_CABLE_STATUS, BIT(channel_id));
+ if (ret < 0) {
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return ret;
+ }
spin_unlock_irqrestore(&priv->lock, irqflags);
- /* Mask respective channel and invert logic */
- *cable_fault = !(status & BIT(channel_id));
+ /* Logic 0 = cable fault */
+ *cable_fault = !ret;
return 0;
}
@@ -945,6 +979,7 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
unsigned int cable_fault_enable;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -956,11 +991,11 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
/* Enable is active low in Differential Encoder Cable Status register */
cable_fault_enable = ~priv->cable_fault_enable;
- iowrite8(cable_fault_enable, &priv->reg->cable_status);
+ ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, cable_fault_enable);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
@@ -974,30 +1009,37 @@ static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
return 0;
}
+static int quad8_filter_clock_prescaler_set(struct quad8 *const priv, const size_t id,
+ const u8 prescaler)
+{
+ int ret;
+
+ ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
+ if (ret)
+ return ret;
+ ret = regmap_write(priv->map, QUAD8_DATA(id), prescaler);
+ if (ret)
+ return ret;
+ return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | TRANSFER_PR0_TO_PSC);
+}
+
static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
struct counter_signal *signal,
u8 prescaler)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
- struct channel_reg __iomem *const chan = priv->reg->channel + channel_id;
unsigned long irqflags;
+ int ret;
spin_lock_irqsave(&priv->lock, irqflags);
priv->fck_prescaler[channel_id] = prescaler;
-
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
-
- /* Set filter clock factor */
- iowrite8(prescaler, &chan->data);
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- &chan->control);
+ ret = quad8_filter_clock_prescaler_set(priv, channel_id, prescaler);
spin_unlock_irqrestore(&priv->lock, irqflags);
- return 0;
+ return ret;
}
static struct counter_comp quad8_signal_ext[] = {
@@ -1150,77 +1192,93 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
{
struct counter_device *counter = private;
struct quad8 *const priv = counter_priv(counter);
+ unsigned int status;
unsigned long irq_status;
unsigned long channel;
+ unsigned int flg_pins;
u8 event;
+ int ret;
- irq_status = ioread8(&priv->reg->interrupt_status);
- if (!irq_status)
+ ret = regmap_read(priv->map, QUAD8_INTERRUPT_STATUS, &status);
+ if (ret)
+ return ret;
+ if (!status)
return IRQ_NONE;
+ irq_status = status;
for_each_set_bit(channel, &irq_status, QUAD8_NUM_COUNTERS) {
- switch (priv->irq_trigger[channel]) {
- case QUAD8_EVENT_CARRY:
+ flg_pins = u8_get_bits(priv->ior[channel], FLG_PINS);
+ switch (flg_pins) {
+ case FLG1_CARRY_FLG2_BORROW:
event = COUNTER_EVENT_OVERFLOW;
break;
- case QUAD8_EVENT_COMPARE:
+ case FLG1_COMPARE_FLG2_BORROW:
event = COUNTER_EVENT_THRESHOLD;
break;
- case QUAD8_EVENT_CARRY_BORROW:
+ case FLG1_CARRYBORROW_FLG2_UD:
event = COUNTER_EVENT_OVERFLOW_UNDERFLOW;
break;
- case QUAD8_EVENT_INDEX:
+ case FLG1_INDX_FLG2_E:
event = COUNTER_EVENT_INDEX;
break;
default:
/* should never reach this path */
WARN_ONCE(true, "invalid interrupt trigger function %u configured for channel %lu\n",
- priv->irq_trigger[channel], channel);
+ flg_pins, channel);
continue;
}
counter_push_event(counter, event, channel);
}
- /* Clear pending interrupts on device */
- iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
+ ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION, CLEAR_PENDING_INTERRUPTS);
+ if (ret)
+ return ret;
return IRQ_HANDLED;
}
-static void quad8_init_counter(struct channel_reg __iomem *const chan)
+static int quad8_init_counter(struct quad8 *const priv, const size_t channel)
{
- unsigned long i;
+ int ret;
+
+ ret = quad8_filter_clock_prescaler_set(priv, channel, 0);
+ if (ret)
+ return ret;
+ ret = quad8_preset_register_set(priv, channel, 0);
+ if (ret)
+ return ret;
+ ret = quad8_flag_register_reset(priv, channel);
+ if (ret)
+ return ret;
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
- /* Reset filter clock factor */
- iowrite8(0, &chan->data);
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- &chan->control);
- /* Reset Byte Pointer */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
- /* Reset Preset Register */
- for (i = 0; i < 3; i++)
- iowrite8(0x00, &chan->data);
- /* Reset Borrow, Carry, Compare, and Sign flags */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
- /* Reset Error flag */
- iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
/* Binary encoding; Normal count; non-quadrature mode */
- iowrite8(QUAD8_CTR_CMR, &chan->control);
+ priv->cmr[channel] = SELECT_CMR | BINARY | u8_encode_bits(NORMAL_COUNT, COUNT_MODE) |
+ u8_encode_bits(NON_QUADRATURE, QUADRATURE_MODE);
+ ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->cmr[channel]);
+ if (ret)
+ return ret;
+
/* Disable A and B inputs; preset on index; FLG1 as Carry */
- iowrite8(QUAD8_CTR_IOR, &chan->control);
+ priv->ior[channel] = SELECT_IOR | DISABLE_AB | u8_encode_bits(LOAD_CNTR, LOAD_PIN) |
+ u8_encode_bits(FLG1_CARRY_FLG2_BORROW, FLG_PINS);
+ ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->ior[channel]);
+ if (ret)
+ return ret;
+
/* Disable index function; negative index polarity */
- iowrite8(QUAD8_CTR_IDR, &chan->control);
+ priv->idr[channel] = SELECT_IDR | u8_encode_bits(DISABLE_INDEX_MODE, INDEX_MODE) |
+ u8_encode_bits(NEGATIVE_INDEX_POLARITY, INDEX_POLARITY);
+ return regmap_write(priv->map, QUAD8_CONTROL(channel), priv->idr[channel]);
}
static int quad8_probe(struct device *dev, unsigned int id)
{
struct counter_device *counter;
struct quad8 *priv;
+ void __iomem *regs;
unsigned long i;
- int err;
+ int ret;
if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -1233,10 +1291,15 @@ static int quad8_probe(struct device *dev, unsigned int id)
return -ENOMEM;
priv = counter_priv(counter);
- priv->reg = devm_ioport_map(dev, base[id], QUAD8_EXTENT);
- if (!priv->reg)
+ regs = devm_ioport_map(dev, base[id], QUAD8_EXTENT);
+ if (!regs)
return -ENOMEM;
+ priv->map = devm_regmap_init_mmio(dev, regs, &quad8_regmap_config);
+ if (IS_ERR(priv->map))
+ return dev_err_probe(dev, PTR_ERR(priv->map),
+ "Unable to initialize register map\n");
+
/* Initialize Counter device and driver data */
counter->name = dev_name(dev);
counter->parent = dev;
@@ -1249,25 +1312,38 @@ static int quad8_probe(struct device *dev, unsigned int id)
spin_lock_init(&priv->lock);
/* Reset Index/Interrupt Register */
- iowrite8(0x00, &priv->reg->index_interrupt);
+ ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, 0x00);
+ if (ret)
+ return ret;
/* Reset all counters and disable interrupt function */
- iowrite8(QUAD8_CHAN_OP_RESET_COUNTERS, &priv->reg->channel_oper);
+ ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
+ RESET_COUNTERS | DISABLE_INTERRUPT_FUNCTION);
+ if (ret)
+ return ret;
/* Set initial configuration for all counters */
- for (i = 0; i < QUAD8_NUM_COUNTERS; i++)
- quad8_init_counter(priv->reg->channel + i);
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ ret = quad8_init_counter(priv, i);
+ if (ret)
+ return ret;
+ }
/* Disable Differential Encoder Cable Status for all channels */
- iowrite8(0xFF, &priv->reg->cable_status);
+ ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, GENMASK(7, 0));
+ if (ret)
+ return ret;
/* Enable all counters and enable interrupt function */
- iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
+ ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
+ ENABLE_COUNTERS | ENABLE_INTERRUPT_FUNCTION);
+ if (ret)
+ return ret;
- err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
+ ret = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
IRQF_SHARED, counter->name, counter);
- if (err)
- return err;
+ if (ret)
+ return ret;
- err = devm_counter_add(dev, counter);
- if (err < 0)
- return dev_err_probe(dev, err, "Failed to add counter\n");
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add counter\n");
return 0;
}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 4228be917038..bca21df51168 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -10,20 +10,37 @@ menuconfig COUNTER
interface. You only need to enable this, if you also want to enable
one or more of the counter device drivers below.
+config I8254
+ tristate
+ select COUNTER
+ select REGMAP
+ help
+ Enables support for the i8254 interface library functions. The i8254
+ interface library provides functions to facilitate communication with
+ interfaces compatible with the venerable Intel 8254 Programmable
+ Interval Timer (PIT). The Intel 825x family of chips was first
+ released in the early 1980s but compatible interfaces are nowadays
+ typically found embedded in larger VLSI processing chips and FPGA
+ components.
+
+ If built as a module its name will be i8254.
+
if COUNTER
config 104_QUAD_8
tristate "ACCES 104-QUAD-8 driver"
depends on (PC104 && X86) || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
select ISA_BUS_API
+ select REGMAP_MMIO
help
Say yes here to build support for the ACCES 104-QUAD-8 quadrature
encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
A counter's respective error flag may be cleared by performing a write
- operation on the respective count value attribute. Although the
- 104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
- set, either directly or via the counter's preset attribute.
+ operation on the respective count value attribute. The 104-QUAD-8
+ counters may be set either directly or via the counter's preset
+ attribute.
The base port addresses for the devices may be configured via the base
array module parameter. The interrupt line numbers for the devices may
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 933fdd50b3e4..fa3c1d08f706 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_COUNTER) += counter.o
counter-y := counter-core.o counter-sysfs.o counter-chrdev.o
+obj-$(CONFIG_I8254) += i8254.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_INTERRUPT_CNT) += interrupt-cnt.o
obj-$(CONFIG_RZ_MTU3_CNT) += rz-mtu3-cnt.o
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
index b9efe66f9f8d..42c523343d32 100644
--- a/drivers/counter/counter-sysfs.c
+++ b/drivers/counter/counter-sysfs.c
@@ -88,7 +88,13 @@ static const char *const counter_count_mode_str[] = {
[COUNTER_COUNT_MODE_NORMAL] = "normal",
[COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
[COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
- [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n",
+ [COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT] = "interrupt on terminal count",
+ [COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT] = "hardware retriggerable one-shot",
+ [COUNTER_COUNT_MODE_RATE_GENERATOR] = "rate generator",
+ [COUNTER_COUNT_MODE_SQUARE_WAVE_MODE] = "square wave mode",
+ [COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE] = "software triggered strobe",
+ [COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE] = "hardware triggered strobe",
};
static const char *const counter_signal_polarity_str[] = {
diff --git a/drivers/counter/i8254.c b/drivers/counter/i8254.c
new file mode 100644
index 000000000000..c41e4fdc9601
--- /dev/null
+++ b/drivers/counter/i8254.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel 8254 Programmable Interval Timer
+ * Copyright (C) William Breathitt Gray
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/i8254.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <asm/unaligned.h>
+
+#define I8254_COUNTER_REG(_counter) (_counter)
+#define I8254_CONTROL_REG 0x3
+
+#define I8254_SC GENMASK(7, 6)
+#define I8254_RW GENMASK(5, 4)
+#define I8254_M GENMASK(3, 1)
+#define I8254_CONTROL(_sc, _rw, _m) \
+ (u8_encode_bits(_sc, I8254_SC) | u8_encode_bits(_rw, I8254_RW) | \
+ u8_encode_bits(_m, I8254_M))
+
+#define I8254_RW_TWO_BYTE 0x3
+#define I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT 0
+#define I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT 1
+#define I8254_MODE_RATE_GENERATOR 2
+#define I8254_MODE_SQUARE_WAVE_MODE 3
+#define I8254_MODE_SOFTWARE_TRIGGERED_STROBE 4
+#define I8254_MODE_HARDWARE_TRIGGERED_STROBE 5
+
+#define I8254_COUNTER_LATCH(_counter) I8254_CONTROL(_counter, 0x0, 0x0)
+#define I8254_PROGRAM_COUNTER(_counter, _mode) I8254_CONTROL(_counter, I8254_RW_TWO_BYTE, _mode)
+
+#define I8254_NUM_COUNTERS 3
+
+/**
+ * struct i8254 - I8254 device private data structure
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @preset: array of Counter Register states
+ * @out_mode: array of mode configuration states
+ * @map: Regmap for the device
+ */
+struct i8254 {
+ struct mutex lock;
+ u16 preset[I8254_NUM_COUNTERS];
+ u8 out_mode[I8254_NUM_COUNTERS];
+ struct regmap *map;
+};
+
+static int i8254_count_read(struct counter_device *const counter, struct counter_count *const count,
+ u64 *const val)
+{
+ struct i8254 *const priv = counter_priv(counter);
+ int ret;
+ u8 value[2];
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_write(priv->map, I8254_CONTROL_REG, I8254_COUNTER_LATCH(count->id));
+ if (ret) {
+ mutex_unlock(&priv->lock);
+ return ret;
+ }
+ ret = regmap_noinc_read(priv->map, I8254_COUNTER_REG(count->id), value, sizeof(value));
+ if (ret) {
+ mutex_unlock(&priv->lock);
+ return ret;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ *val = get_unaligned_le16(value);
+
+ return ret;
+}
+
+static int i8254_function_read(struct counter_device *const counter,
+ struct counter_count *const count,
+ enum counter_function *const function)
+{
+ *function = COUNTER_FUNCTION_DECREASE;
+ return 0;
+}
+
+#define I8254_SYNAPSES_PER_COUNT 2
+#define I8254_SIGNAL_ID_CLK 0
+#define I8254_SIGNAL_ID_GATE 1
+
+static int i8254_action_read(struct counter_device *const counter,
+ struct counter_count *const count,
+ struct counter_synapse *const synapse,
+ enum counter_synapse_action *const action)
+{
+ struct i8254 *const priv = counter_priv(counter);
+
+ switch (synapse->signal->id % I8254_SYNAPSES_PER_COUNT) {
+ case I8254_SIGNAL_ID_CLK:
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
+ return 0;
+ case I8254_SIGNAL_ID_GATE:
+ switch (priv->out_mode[count->id]) {
+ case I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
+ case I8254_MODE_RATE_GENERATOR:
+ case I8254_MODE_SQUARE_WAVE_MODE:
+ case I8254_MODE_HARDWARE_TRIGGERED_STROBE:
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ default:
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
+ }
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+}
+
+static int i8254_count_ceiling_read(struct counter_device *const counter,
+ struct counter_count *const count, u64 *const ceiling)
+{
+ struct i8254 *const priv = counter_priv(counter);
+
+ mutex_lock(&priv->lock);
+
+ switch (priv->out_mode[count->id]) {
+ case I8254_MODE_RATE_GENERATOR:
+ /* Rate Generator decrements 0 by one and the counter "wraps around" */
+ *ceiling = (priv->preset[count->id] == 0) ? U16_MAX : priv->preset[count->id];
+ break;
+ case I8254_MODE_SQUARE_WAVE_MODE:
+ if (priv->preset[count->id] % 2)
+ *ceiling = priv->preset[count->id] - 1;
+ else if (priv->preset[count->id] == 0)
+ /* Square Wave Mode decrements 0 by two and the counter "wraps around" */
+ *ceiling = U16_MAX - 1;
+ else
+ *ceiling = priv->preset[count->id];
+ break;
+ default:
+ *ceiling = U16_MAX;
+ break;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int i8254_count_mode_read(struct counter_device *const counter,
+ struct counter_count *const count,
+ enum counter_count_mode *const count_mode)
+{
+ const struct i8254 *const priv = counter_priv(counter);
+
+ switch (priv->out_mode[count->id]) {
+ case I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT:
+ *count_mode = COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT;
+ return 0;
+ case I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
+ *count_mode = COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT;
+ return 0;
+ case I8254_MODE_RATE_GENERATOR:
+ *count_mode = COUNTER_COUNT_MODE_RATE_GENERATOR;
+ return 0;
+ case I8254_MODE_SQUARE_WAVE_MODE:
+ *count_mode = COUNTER_COUNT_MODE_SQUARE_WAVE_MODE;
+ return 0;
+ case I8254_MODE_SOFTWARE_TRIGGERED_STROBE:
+ *count_mode = COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE;
+ return 0;
+ case I8254_MODE_HARDWARE_TRIGGERED_STROBE:
+ *count_mode = COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE;
+ return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+}
+
+static int i8254_count_mode_write(struct counter_device *const counter,
+ struct counter_count *const count,
+ const enum counter_count_mode count_mode)
+{
+ struct i8254 *const priv = counter_priv(counter);
+ u8 out_mode;
+ int ret;
+
+ switch (count_mode) {
+ case COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT:
+ out_mode = I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT;
+ break;
+ case COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
+ out_mode = I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT;
+ break;
+ case COUNTER_COUNT_MODE_RATE_GENERATOR:
+ out_mode = I8254_MODE_RATE_GENERATOR;
+ break;
+ case COUNTER_COUNT_MODE_SQUARE_WAVE_MODE:
+ out_mode = I8254_MODE_SQUARE_WAVE_MODE;
+ break;
+ case COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE:
+ out_mode = I8254_MODE_SOFTWARE_TRIGGERED_STROBE;
+ break;
+ case COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE:
+ out_mode = I8254_MODE_HARDWARE_TRIGGERED_STROBE;
+ break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->lock);
+
+ /* Counter Register is cleared when the counter is programmed */
+ priv->preset[count->id] = 0;
+ priv->out_mode[count->id] = out_mode;
+ ret = regmap_write(priv->map, I8254_CONTROL_REG,
+ I8254_PROGRAM_COUNTER(count->id, out_mode));
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int i8254_count_floor_read(struct counter_device *const counter,
+ struct counter_count *const count, u64 *const floor)
+{
+ struct i8254 *const priv = counter_priv(counter);
+
+ mutex_lock(&priv->lock);
+
+ switch (priv->out_mode[count->id]) {
+ case I8254_MODE_RATE_GENERATOR:
+ /* counter is always reloaded after 1, but 0 is a possible reload value */
+ *floor = (priv->preset[count->id] == 0) ? 0 : 1;
+ break;
+ case I8254_MODE_SQUARE_WAVE_MODE:
+ /* counter is always reloaded after 2 for even preset values */
+ *floor = (priv->preset[count->id] % 2 || priv->preset[count->id] == 0) ? 0 : 2;
+ break;
+ default:
+ *floor = 0;
+ break;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int i8254_count_preset_read(struct counter_device *const counter,
+ struct counter_count *const count, u64 *const preset)
+{
+ const struct i8254 *const priv = counter_priv(counter);
+
+ *preset = priv->preset[count->id];
+
+ return 0;
+}
+
+static int i8254_count_preset_write(struct counter_device *const counter,
+ struct counter_count *const count, const u64 preset)
+{
+ struct i8254 *const priv = counter_priv(counter);
+ int ret;
+ u8 value[2];
+
+ if (preset > U16_MAX)
+ return -ERANGE;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->out_mode[count->id] == I8254_MODE_RATE_GENERATOR ||
+ priv->out_mode[count->id] == I8254_MODE_SQUARE_WAVE_MODE) {
+ if (preset == 1) {
+ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+ }
+
+ priv->preset[count->id] = preset;
+
+ put_unaligned_le16(preset, value);
+ ret = regmap_noinc_write(priv->map, I8254_COUNTER_REG(count->id), value, 2);
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int i8254_init_hw(struct regmap *const map)
+{
+ unsigned long i;
+ int ret;
+
+ for (i = 0; i < I8254_NUM_COUNTERS; i++) {
+ /* Initialize each counter to Mode 0 */
+ ret = regmap_write(map, I8254_CONTROL_REG,
+ I8254_PROGRAM_COUNTER(i, I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops i8254_ops = {
+ .count_read = i8254_count_read,
+ .function_read = i8254_function_read,
+ .action_read = i8254_action_read,
+};
+
+#define I8254_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+}
+
+static struct counter_signal i8254_signals[] = {
+ I8254_SIGNAL(0, "CLK 0"), I8254_SIGNAL(1, "GATE 0"),
+ I8254_SIGNAL(2, "CLK 1"), I8254_SIGNAL(3, "GATE 1"),
+ I8254_SIGNAL(4, "CLK 2"), I8254_SIGNAL(5, "GATE 2"),
+};
+
+static const enum counter_synapse_action i8254_clk_actions[] = {
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+};
+static const enum counter_synapse_action i8254_gate_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+};
+
+#define I8254_SYNAPSES_BASE(_id) ((_id) * I8254_SYNAPSES_PER_COUNT)
+#define I8254_SYNAPSE_CLK(_id) { \
+ .actions_list = i8254_clk_actions, \
+ .num_actions = ARRAY_SIZE(i8254_clk_actions), \
+ .signal = &i8254_signals[I8254_SYNAPSES_BASE(_id) + 0], \
+}
+#define I8254_SYNAPSE_GATE(_id) { \
+ .actions_list = i8254_gate_actions, \
+ .num_actions = ARRAY_SIZE(i8254_gate_actions), \
+ .signal = &i8254_signals[I8254_SYNAPSES_BASE(_id) + 1], \
+}
+
+static struct counter_synapse i8254_synapses[] = {
+ I8254_SYNAPSE_CLK(0), I8254_SYNAPSE_GATE(0),
+ I8254_SYNAPSE_CLK(1), I8254_SYNAPSE_GATE(1),
+ I8254_SYNAPSE_CLK(2), I8254_SYNAPSE_GATE(2),
+};
+
+static const enum counter_function i8254_functions_list[] = {
+ COUNTER_FUNCTION_DECREASE,
+};
+
+static const enum counter_count_mode i8254_count_modes[] = {
+ COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
+ COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
+ COUNTER_COUNT_MODE_RATE_GENERATOR,
+ COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
+ COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
+ COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
+};
+
+static DEFINE_COUNTER_AVAILABLE(i8254_count_modes_available, i8254_count_modes);
+
+static struct counter_comp i8254_count_ext[] = {
+ COUNTER_COMP_CEILING(i8254_count_ceiling_read, NULL),
+ COUNTER_COMP_COUNT_MODE(i8254_count_mode_read, i8254_count_mode_write,
+ i8254_count_modes_available),
+ COUNTER_COMP_FLOOR(i8254_count_floor_read, NULL),
+ COUNTER_COMP_PRESET(i8254_count_preset_read, i8254_count_preset_write),
+};
+
+#define I8254_COUNT(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+ .functions_list = i8254_functions_list, \
+ .num_functions = ARRAY_SIZE(i8254_functions_list), \
+ .synapses = &i8254_synapses[I8254_SYNAPSES_BASE(_id)], \
+ .num_synapses = I8254_SYNAPSES_PER_COUNT, \
+ .ext = i8254_count_ext, \
+ .num_ext = ARRAY_SIZE(i8254_count_ext) \
+}
+
+static struct counter_count i8254_counts[I8254_NUM_COUNTERS] = {
+ I8254_COUNT(0, "Counter 0"), I8254_COUNT(1, "Counter 1"), I8254_COUNT(2, "Counter 2"),
+};
+
+/**
+ * devm_i8254_regmap_register - Register an i8254 Counter device
+ * @dev: device that is registering this i8254 Counter device
+ * @config: configuration for i8254_regmap_config
+ *
+ * Registers an Intel 8254 Programmable Interval Timer Counter device. Returns 0 on success and
+ * negative error number on failure.
+ */
+int devm_i8254_regmap_register(struct device *const dev,
+ const struct i8254_regmap_config *const config)
+{
+ struct counter_device *counter;
+ struct i8254 *priv;
+ int err;
+
+ if (!config->parent)
+ return -EINVAL;
+
+ if (!config->map)
+ return -EINVAL;
+
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
+ return -ENOMEM;
+ priv = counter_priv(counter);
+ priv->map = config->map;
+
+ counter->name = dev_name(config->parent);
+ counter->parent = config->parent;
+ counter->ops = &i8254_ops;
+ counter->counts = i8254_counts;
+ counter->num_counts = ARRAY_SIZE(i8254_counts);
+ counter->signals = i8254_signals;
+ counter->num_signals = ARRAY_SIZE(i8254_signals);
+
+ mutex_init(&priv->lock);
+
+ err = i8254_init_hw(priv->map);
+ if (err)
+ return err;
+
+ err = devm_counter_add(dev, counter);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to add counter\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, I8254);
+
+MODULE_AUTHOR("William Breathitt Gray");
+MODULE_DESCRIPTION("Intel 8254 Programmable Interval Timer");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 9bf20a5d6bda..6206d2dc3d47 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -342,6 +342,9 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ /* Reset input selector to its default input */
+ regmap_write(priv->regmap, TIM_TISEL, 0x0);
+
/* Register Counter device */
ret = devm_counter_add(dev, counter);
if (ret < 0)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1c51abddbc5..f429b9b37b76 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -218,7 +218,8 @@ config CPUFREQ_DT
If in doubt, say N.
config CPUFREQ_DT_PLATDEV
- bool
+ tristate "Generic DT based cpufreq platdev driver"
+ depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
management. This creates a 'cpufreq-dt' platform device, on the
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b0fc5e84f857..8afefdea4d80 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -21,6 +21,13 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
+static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
+ { .compatible = "marvell,ap806-cpu-clock" },
+ { .compatible = "marvell,ap807-cpu-clock" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
+
/*
* Setup the opps list with the divider for the max frequency, that
* will be filled at runtime.
@@ -127,7 +134,8 @@ static int __init armada_8k_cpufreq_init(void)
struct device_node *node;
struct cpumask cpus;
- node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
+ node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
+ NULL);
if (!node || !of_device_is_available(node)) {
of_node_put(node);
return -ENODEV;
@@ -204,12 +212,6 @@ static void __exit armada_8k_cpufreq_exit(void)
}
module_exit(armada_8k_cpufreq_exit);
-static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
- { .compatible = "marvell,ap806-cpu-clock" },
- { },
-};
-MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
-
MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
MODULE_DESCRIPTION("Armada 8K cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 338cf6cc6596..e2b20080de3a 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -5,6 +5,7 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -85,6 +86,8 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9500", },
{ .compatible = "st-ericsson,u9540", },
+ { .compatible = "starfive,jh7110", },
+
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
@@ -165,6 +168,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
+ { .compatible = "ti,am62a7", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,apq8064", },
@@ -214,3 +218,4 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 48e1772e98fd..9fb1501033bb 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -209,6 +209,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
+static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
+{
+ int ret = dev_pm_opp_disable(dev, freq);
+
+ if (ret < 0 && ret != -ENODEV)
+ dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
+}
+
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
@@ -254,17 +262,15 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
val &= 0x3;
if (val < OCOTP_CFG3_SPEED_996MHZ)
- if (dev_pm_opp_disable(dev, 996000000))
- dev_warn(dev, "failed to disable 996MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 996000000);
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
- if (dev_pm_opp_disable(dev, 852000000))
- dev_warn(dev, "failed to disable 852MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 852000000);
+
if (val != OCOTP_CFG3_SPEED_1P2GHZ)
- if (dev_pm_opp_disable(dev, 1200000000))
- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 1200000000);
}
return 0;
@@ -316,20 +322,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if (of_machine_is_compatible("fsl,imx6ul")) {
+ if (of_machine_is_compatible("fsl,imx6ul"))
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
- if (dev_pm_opp_disable(dev, 696000000))
- dev_warn(dev, "failed to disable 696MHz OPP\n");
- }
+ imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
- if (dev_pm_opp_disable(dev, 792000000))
- dev_warn(dev, "failed to disable 792MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
- if (dev_pm_opp_disable(dev, 900000000))
- dev_warn(dev, "failed to disable 900MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 900000000);
}
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f29182512b98..8ca2bce4341a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -302,6 +302,13 @@ static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+#define HYBRID_SCALING_FACTOR 78741
+
+static inline int core_get_scaling(void)
+{
+ return 100000;
+}
+
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
@@ -400,6 +407,26 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
return cppc_perf.nominal_perf;
}
+
+static int intel_pstate_cppc_get_scaling(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+
+ /*
+ * If the nominal frequency and the nominal performance are not
+ * zero and the ratio between them is not 100, return the hybrid
+ * scaling factor.
+ */
+ if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
+ cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
+ return HYBRID_SCALING_FACTOR;
+
+ return core_get_scaling();
+}
+
#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
@@ -492,6 +519,11 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu)
{
return -ENOTSUPP;
}
+
+static int intel_pstate_cppc_get_scaling(int cpu)
+{
+ return core_get_scaling();
+}
#endif /* CONFIG_ACPI_CPPC_LIB */
/**
@@ -1897,11 +1929,6 @@ static int core_get_turbo_pstate(int cpu)
return ret;
}
-static inline int core_get_scaling(void)
-{
- return 100000;
-}
-
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -1938,16 +1965,28 @@ static void hybrid_get_type(void *data)
*cpu_type = get_this_hybrid_cpu_type();
}
-static int hybrid_get_cpu_scaling(int cpu)
+static int hwp_get_cpu_scaling(int cpu)
{
u8 cpu_type = 0;
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
/* P-cores have a smaller perf level-to-freqency scaling factor. */
if (cpu_type == 0x40)
- return 78741;
+ return HYBRID_SCALING_FACTOR;
- return core_get_scaling();
+ /* Use default core scaling for E-cores */
+ if (cpu_type == 0x20)
+ return core_get_scaling();
+
+ /*
+ * If reached here, this system is either non-hybrid (like Tiger
+ * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
+ * no E cores (in which case CPUID for hybrid support is 0).
+ *
+ * The CPPC nominal_frequency field is 0 for non-hybrid systems,
+ * so the default core scaling will be used for them.
+ */
+ return intel_pstate_cppc_get_scaling(cpu);
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -3395,8 +3434,7 @@ static int __init intel_pstate_init(void)
if (!default_driver)
default_driver = &intel_pstate;
- if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
- pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+ pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling;
goto hwp_cpu_matched;
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 9a39a7ccfae9..fef68cb2b38f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -696,9 +696,16 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
- .proc_max_volt = 1360000,
+ .proc_max_volt = 1350000,
.sram_min_volt = 0,
- .sram_max_volt = 1360000,
+ .sram_max_volt = 1350000,
+ .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1300000,
.ccifreq_supported = false,
};
@@ -734,7 +741,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
- { .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index a78d7a27b4b5..f2830371d25f 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -661,7 +661,7 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to find icc paths\n");
for (num_domains = 0; num_domains < MAX_FREQ_DOMAINS; num_domains++)
if (!platform_get_resource(pdev, IORESOURCE_MEM, num_domains))
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 92acbb25abb3..d3510cfdb3eb 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -20,8 +20,6 @@
#include <asm/asi.h>
#include <asm/timer.h>
-static struct cpufreq_driver *cpufreq_us2e_driver;
-
struct us2e_freq_percpu_info {
struct cpufreq_frequency_table table[6];
};
@@ -300,12 +298,19 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver)
- us2e_freq_target(policy, 0);
-
+ us2e_freq_target(policy, 0);
return 0;
}
+static struct cpufreq_driver cpufreq_us2e_driver = {
+ .name = "UltraSPARC-IIe",
+ .init = us2e_freq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = us2e_freq_target,
+ .get = us2e_freq_get,
+ .exit = us2e_freq_cpu_exit,
+};
+
static int __init us2e_freq_init(void)
{
unsigned long manuf, impl, ver;
@@ -319,39 +324,15 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- struct cpufreq_driver *driver;
-
- ret = -ENOMEM;
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver)
- goto err_out;
-
- us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
- GFP_KERNEL);
+ us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ GFP_KERNEL);
if (!us2e_freq_table)
- goto err_out;
-
- driver->init = us2e_freq_cpu_init;
- driver->verify = cpufreq_generic_frequency_table_verify;
- driver->target_index = us2e_freq_target;
- driver->get = us2e_freq_get;
- driver->exit = us2e_freq_cpu_exit;
- strcpy(driver->name, "UltraSPARC-IIe");
+ return -ENOMEM;
- cpufreq_us2e_driver = driver;
- ret = cpufreq_register_driver(driver);
+ ret = cpufreq_register_driver(&cpufreq_us2e_driver);
if (ret)
- goto err_out;
+ kfree(us2e_freq_table);
- return 0;
-
-err_out:
- if (driver) {
- kfree(driver);
- cpufreq_us2e_driver = NULL;
- }
- kfree(us2e_freq_table);
- us2e_freq_table = NULL;
return ret;
}
@@ -360,13 +341,8 @@ err_out:
static void __exit us2e_freq_exit(void)
{
- if (cpufreq_us2e_driver) {
- cpufreq_unregister_driver(cpufreq_us2e_driver);
- kfree(cpufreq_us2e_driver);
- cpufreq_us2e_driver = NULL;
- kfree(us2e_freq_table);
- us2e_freq_table = NULL;
- }
+ cpufreq_unregister_driver(&cpufreq_us2e_driver);
+ kfree(us2e_freq_table);
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index e41b35b16afd..91d1ed558136 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -19,8 +19,6 @@
#include <asm/head.h>
#include <asm/timer.h>
-static struct cpufreq_driver *cpufreq_us3_driver;
-
struct us3_freq_percpu_info {
struct cpufreq_frequency_table table[4];
};
@@ -144,12 +142,19 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver)
- us3_freq_target(policy, 0);
-
+ us3_freq_target(policy, 0);
return 0;
}
+static struct cpufreq_driver cpufreq_us3_driver = {
+ .name = "UltraSPARC-III",
+ .init = us3_freq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = us3_freq_target,
+ .get = us3_freq_get,
+ .exit = us3_freq_cpu_exit,
+};
+
static int __init us3_freq_init(void)
{
unsigned long manuf, impl, ver;
@@ -167,39 +172,15 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- struct cpufreq_driver *driver;
-
- ret = -ENOMEM;
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver)
- goto err_out;
-
- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
- GFP_KERNEL);
+ us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ GFP_KERNEL);
if (!us3_freq_table)
- goto err_out;
-
- driver->init = us3_freq_cpu_init;
- driver->verify = cpufreq_generic_frequency_table_verify;
- driver->target_index = us3_freq_target;
- driver->get = us3_freq_get;
- driver->exit = us3_freq_cpu_exit;
- strcpy(driver->name, "UltraSPARC-III");
+ return -ENOMEM;
- cpufreq_us3_driver = driver;
- ret = cpufreq_register_driver(driver);
+ ret = cpufreq_register_driver(&cpufreq_us3_driver);
if (ret)
- goto err_out;
+ kfree(us3_freq_table);
- return 0;
-
-err_out:
- if (driver) {
- kfree(driver);
- cpufreq_us3_driver = NULL;
- }
- kfree(us3_freq_table);
- us3_freq_table = NULL;
return ret;
}
@@ -208,13 +189,8 @@ err_out:
static void __exit us3_freq_exit(void)
{
- if (cpufreq_us3_driver) {
- cpufreq_unregister_driver(cpufreq_us3_driver);
- kfree(cpufreq_us3_driver);
- cpufreq_us3_driver = NULL;
- kfree(us3_freq_table);
- us3_freq_table = NULL;
- }
+ cpufreq_unregister_driver(&cpufreq_us3_driver);
+ kfree(us3_freq_table);
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index c8d03346068a..36dad5ea5947 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -686,8 +686,10 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
cpu_dev = get_cpu_device(0);
- if (!cpu_dev)
- return -EPROBE_DEFER;
+ if (!cpu_dev) {
+ err = -EPROBE_DEFER;
+ goto err_free_res;
+ }
if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index be4209d97cb3..d5cd2fd25cad 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -337,6 +337,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
+ { .compatible = "ti,am62a7", .data = &am625_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9f5b2d28bff5..44e44b8d9ce6 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -92,17 +92,6 @@ config ZCRYPT_DEBUG
If unsure, say N.
-config ZCRYPT_MULTIDEVNODES
- bool "Support for multiple zcrypt device nodes"
- default y
- depends on S390
- depends on ZCRYPT
- help
- With this option enabled the zcrypt device driver can
- provide multiple devices nodes in /dev. Each device
- node can get customized to limit access and narrow
- down the use of the available crypto hardware.
-
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 1198bd306365..94849fa3bd74 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -480,6 +480,7 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
kfree(vc_req->req_data);
kfree(vc_req->sgs);
}
+ cond_resched();
}
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index ff4e78117b31..fcbf8295fde3 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -82,6 +82,7 @@ config CXL_PMEM
config CXL_MEM
tristate "CXL: Memory Expansion"
depends on CXL_PCI
+ select FW_UPLOAD
default CXL_BUS
help
The CXL.mem protocol allows a device to act as a provider of "System
@@ -139,4 +140,17 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
+config CXL_PMU
+ tristate "CXL Performance Monitoring Unit"
+ default CXL_BUS
+ depends on PERF_EVENTS
+ help
+ Support performance monitoring as defined in CXL rev 3.0
+ section 13.2: Performance Monitoring. CXL components may have
+ one or more CXL Performance Monitoring Units (CPMUs).
+
+ Say 'y/m' to enable a driver that will attach to performance
+ monitoring units and provide standard perf based interfaces.
+
+ If unsure say 'm'.
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 7e1765b09e04..658e6b84a769 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -258,7 +258,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
.start = res->start,
.end = res->end,
@@ -327,66 +327,21 @@ __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
return NULL;
}
-/*
- * A host bridge is a dport to a CFMWS decode and it is a uport to the
- * dport (PCIe Root Ports) in the host bridge.
- */
-static int add_host_bridge_uport(struct device *match, void *arg)
-{
- struct cxl_port *root_port = arg;
- struct device *host = root_port->dev.parent;
- struct acpi_device *hb = to_cxl_host_bridge(host, match);
- struct acpi_pci_root *pci_root;
- struct cxl_dport *dport;
- struct cxl_port *port;
- struct device *bridge;
- int rc;
-
- if (!hb)
- return 0;
-
- pci_root = acpi_pci_find_root(hb->handle);
- bridge = pci_root->bus->bridge;
- dport = cxl_find_dport_by_dev(root_port, bridge);
- if (!dport) {
- dev_dbg(host, "host bridge expected and not found\n");
- return 0;
- }
-
- if (dport->rch) {
- dev_info(bridge, "host supports CXL (restricted)\n");
- return 0;
- }
-
- rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
- if (rc)
- return rc;
-
- port = devm_cxl_add_port(host, bridge, dport->component_reg_phys,
- dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- dev_info(bridge, "host supports CXL\n");
-
- return 0;
-}
-
+/* Note, @dev is used by mock_acpi_table_parse_cedt() */
struct cxl_chbs_context {
struct device *dev;
unsigned long long uid;
- resource_size_t rcrb;
- resource_size_t chbcr;
+ resource_size_t base;
u32 cxl_version;
};
-static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
{
struct cxl_chbs_context *ctx = arg;
struct acpi_cedt_chbs *chbs;
- if (ctx->chbcr)
+ if (ctx->base != CXL_RESOURCE_NONE)
return 0;
chbs = (struct acpi_cedt_chbs *) header;
@@ -395,23 +350,39 @@ static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
return 0;
ctx->cxl_version = chbs->cxl_version;
- ctx->rcrb = CXL_RESOURCE_NONE;
- ctx->chbcr = CXL_RESOURCE_NONE;
-
if (!chbs->base)
return 0;
- if (chbs->cxl_version != ACPI_CEDT_CHBS_VERSION_CXL11) {
- ctx->chbcr = chbs->base;
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
+ chbs->length != CXL_RCRB_SIZE)
return 0;
+
+ ctx->base = chbs->base;
+
+ return 0;
+}
+
+static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
+ struct cxl_chbs_context *ctx)
+{
+ unsigned long long uid;
+ int rc;
+
+ rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
+ if (rc != AE_OK) {
+ dev_err(dev, "unable to retrieve _UID\n");
+ return -ENOENT;
}
- if (chbs->length != CXL_RCRB_SIZE)
- return 0;
+ dev_dbg(dev, "UID found: %lld\n", uid);
+ *ctx = (struct cxl_chbs_context) {
+ .dev = dev,
+ .uid = uid,
+ .base = CXL_RESOURCE_NONE,
+ .cxl_version = UINT_MAX,
+ };
- ctx->rcrb = chbs->base;
- ctx->chbcr = cxl_rcrb_to_component(ctx->dev, chbs->base,
- CXL_RCRB_DOWNSTREAM);
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
return 0;
}
@@ -420,7 +391,6 @@ static int add_host_bridge_dport(struct device *match, void *arg)
{
acpi_status rc;
struct device *bridge;
- unsigned long long uid;
struct cxl_dport *dport;
struct cxl_chbs_context ctx;
struct acpi_pci_root *pci_root;
@@ -431,51 +401,107 @@ static int add_host_bridge_dport(struct device *match, void *arg)
if (!hb)
return 0;
- rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
- if (rc != AE_OK) {
- dev_err(match, "unable to retrieve _UID\n");
- return -ENODEV;
- }
-
- dev_dbg(match, "UID found: %lld\n", uid);
-
- ctx = (struct cxl_chbs_context) {
- .dev = match,
- .uid = uid,
- };
- acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
+ rc = cxl_get_chbs(match, hb, &ctx);
+ if (rc)
+ return rc;
- if (!ctx.chbcr) {
+ if (ctx.cxl_version == UINT_MAX) {
dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
- uid);
+ ctx.uid);
return 0;
}
- if (ctx.rcrb != CXL_RESOURCE_NONE)
- dev_dbg(match, "RCRB found for UID %lld: %pa\n", uid, &ctx.rcrb);
-
- if (ctx.chbcr == CXL_RESOURCE_NONE) {
- dev_warn(match, "CHBCR invalid for Host Bridge (UID %lld)\n",
- uid);
+ if (ctx.base == CXL_RESOURCE_NONE) {
+ dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
+ ctx.uid);
return 0;
}
- dev_dbg(match, "CHBCR found: %pa\n", &ctx.chbcr);
-
pci_root = acpi_pci_find_root(hb->handle);
bridge = pci_root->bus->bridge;
- if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11)
- dport = devm_cxl_add_rch_dport(root_port, bridge, uid,
- ctx.chbcr, ctx.rcrb);
- else
- dport = devm_cxl_add_dport(root_port, bridge, uid,
- ctx.chbcr);
+
+ /*
+ * In RCH mode, bind the component regs base to the dport. In
+ * VH mode it will be bound to the CXL host bridge's port
+ * object later in add_host_bridge_uport().
+ */
+ if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
+ dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
+ &ctx.base);
+ dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
+ ctx.base);
+ } else {
+ dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
+ CXL_RESOURCE_NONE);
+ }
+
if (IS_ERR(dport))
return PTR_ERR(dport);
return 0;
}
+/*
+ * A host bridge is a dport to a CFMWS decode and it is a uport to the
+ * dport (PCIe Root Ports) in the host bridge.
+ */
+static int add_host_bridge_uport(struct device *match, void *arg)
+{
+ struct cxl_port *root_port = arg;
+ struct device *host = root_port->dev.parent;
+ struct acpi_device *hb = to_cxl_host_bridge(host, match);
+ struct acpi_pci_root *pci_root;
+ struct cxl_dport *dport;
+ struct cxl_port *port;
+ struct device *bridge;
+ struct cxl_chbs_context ctx;
+ resource_size_t component_reg_phys;
+ int rc;
+
+ if (!hb)
+ return 0;
+
+ pci_root = acpi_pci_find_root(hb->handle);
+ bridge = pci_root->bus->bridge;
+ dport = cxl_find_dport_by_dev(root_port, bridge);
+ if (!dport) {
+ dev_dbg(host, "host bridge expected and not found\n");
+ return 0;
+ }
+
+ if (dport->rch) {
+ dev_info(bridge, "host supports CXL (restricted)\n");
+ return 0;
+ }
+
+ rc = cxl_get_chbs(match, hb, &ctx);
+ if (rc)
+ return rc;
+
+ if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
+ dev_warn(bridge,
+ "CXL CHBS version mismatch, skip port registration\n");
+ return 0;
+ }
+
+ component_reg_phys = ctx.base;
+ if (component_reg_phys != CXL_RESOURCE_NONE)
+ dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
+ ctx.uid, &component_reg_phys);
+
+ rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
+ if (rc)
+ return rc;
+
+ port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ dev_info(bridge, "host supports CXL\n");
+
+ return 0;
+}
+
static int add_root_nvdimm_bridge(struct device *match, void *data)
{
struct cxl_decoder *cxld;
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index ca4ae31d8f57..1f66b5d4d935 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -12,5 +12,6 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-y += pmu.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 27f0968449de..45e7e044cf4a 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -6,6 +6,7 @@
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
+extern const struct device_type cxl_pmu_type;
extern struct attribute_group cxl_base_attribute_group;
@@ -63,6 +64,16 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+
+enum cxl_rcrb {
+ CXL_RCRB_DOWNSTREAM,
+ CXL_RCRB_UPSTREAM,
+};
+struct cxl_rcrb_info;
+resource_size_t __rcrb_to_component(struct device *dev,
+ struct cxl_rcrb_info *ri,
+ enum cxl_rcrb which);
+
extern struct rw_semaphore cxl_dpa_rwsem;
int cxl_memdev_init(void);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 7889ff203a34..4449b34a80cc 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -85,6 +85,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
struct cxl_component_regs *regs)
{
struct cxl_register_map map = {
+ .dev = &port->dev,
.resource = port->component_reg_phys,
.base = crb,
.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
@@ -97,8 +98,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
return -ENODEV;
}
- return cxl_map_component_regs(&port->dev, regs, &map,
- BIT(CXL_CM_CAP_CAP_ID_HDM));
+ return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
}
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
@@ -570,8 +570,9 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
{
- u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
- CXL_HDM_DECODER0_CTRL_TYPE);
+ u32p_replace_bits(ctrl,
+ !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
+ CXL_HDM_DECODER0_CTRL_HOSTONLY);
}
static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
@@ -764,7 +765,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
if (!len)
return -ENOENT;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = NULL;
cxld->reset = NULL;
cxld->hpa_range = info->dvsec_range[which];
@@ -793,8 +794,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_endpoint_decoder *cxled = NULL;
u64 size, base, skip, dpa_size, lo, hi;
- struct cxl_endpoint_decoder *cxled;
bool committed;
u32 remainder;
int i, rc;
@@ -827,6 +828,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
+ if (info)
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
@@ -837,10 +840,10 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
+ cxld->target_type = CXL_DECODER_DEVMEM;
if (cxld->id != port->commit_end + 1) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
@@ -856,12 +859,28 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
port->commit_end = cxld->id;
} else {
- /* unless / until type-2 drivers arrive, assume type-3 */
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
- ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ if (cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ /*
+ * Default by devtype until a device arrives that needs
+ * more precision.
+ */
+ if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ else
+ cxld->target_type = CXL_DECODER_DEVMEM;
+ } else {
+ /* To be overridden by region type at commit time */
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ }
+
+ if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
+ cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
}
- cxld->target_type = CXL_DECODER_EXPANDER;
}
rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
&cxld->interleave_ways);
@@ -880,7 +899,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
cxld->interleave_ways, cxld->interleave_granularity);
- if (!info) {
+ if (!cxled) {
lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
@@ -903,7 +922,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
skip = (hi << 32) + lo;
- cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index bea9cf31a12d..d6d067fbee97 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -182,7 +182,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
/**
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
* @mbox_cmd: initialized command to execute
*
* Context: Any context.
@@ -198,19 +198,19 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
*/
-int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
+int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *mbox_cmd)
{
size_t out_size, min_out;
int rc;
- if (mbox_cmd->size_in > cxlds->payload_size ||
- mbox_cmd->size_out > cxlds->payload_size)
+ if (mbox_cmd->size_in > mds->payload_size ||
+ mbox_cmd->size_out > mds->payload_size)
return -E2BIG;
out_size = mbox_cmd->size_out;
min_out = mbox_cmd->min_out;
- rc = cxlds->mbox_send(cxlds, mbox_cmd);
+ rc = mds->mbox_send(mds, mbox_cmd);
/*
* EIO is reserved for a payload size mismatch and mbox_send()
* may not return this error.
@@ -220,7 +220,8 @@ int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
if (rc)
return rc;
- if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
+ if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
+ mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
return cxl_mbox_cmd_rc2errno(mbox_cmd);
if (!out_size)
@@ -297,7 +298,7 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
}
static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
- struct cxl_dev_state *cxlds, u16 opcode,
+ struct cxl_memdev_state *mds, u16 opcode,
size_t in_size, size_t out_size, u64 in_payload)
{
*mbox = (struct cxl_mbox_cmd) {
@@ -312,7 +313,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
return PTR_ERR(mbox->payload_in);
if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
- dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
+ dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
cxl_mem_opcode_to_name(opcode));
kvfree(mbox->payload_in);
return -EBUSY;
@@ -321,7 +322,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
/* Prepare to handle a full payload for variable sized output */
if (out_size == CXL_VARIABLE_PAYLOAD)
- mbox->size_out = cxlds->payload_size;
+ mbox->size_out = mds->payload_size;
else
mbox->size_out = out_size;
@@ -343,7 +344,7 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_dev_state *cxlds)
+ struct cxl_memdev_state *mds)
{
if (send_cmd->raw.rsvd)
return -EINVAL;
@@ -353,13 +354,13 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > cxlds->payload_size)
+ if (send_cmd->out.size > mds->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
return -EPERM;
- dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
+ dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
*mem_cmd = (struct cxl_mem_command) {
.info = {
@@ -375,7 +376,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_dev_state *cxlds)
+ struct cxl_memdev_state *mds)
{
struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
const struct cxl_command_info *info = &c->info;
@@ -390,11 +391,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
return -EINVAL;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, cxlds->enabled_cmds))
+ if (!test_bit(info->id, mds->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, cxlds->exclusive_cmds))
+ if (test_bit(info->id, mds->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -423,7 +424,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
* @send_cmd: &struct cxl_send_command copied in from userspace.
*
* Return:
@@ -438,7 +439,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
* safe to send to the hardware.
*/
static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
- struct cxl_dev_state *cxlds,
+ struct cxl_memdev_state *mds,
const struct cxl_send_command *send_cmd)
{
struct cxl_mem_command mem_cmd;
@@ -452,20 +453,20 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > cxlds->payload_size)
+ if (send_cmd->in.size > mds->payload_size)
return -EINVAL;
/* Sanitize and construct a cxl_mem_command */
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
- rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
+ rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
else
- rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
+ rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
if (rc)
return rc;
/* Sanitize and construct a cxl_mbox_cmd */
- return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
+ return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
mem_cmd.info.size_in, mem_cmd.info.size_out,
send_cmd->in.payload);
}
@@ -473,6 +474,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
int cxl_query_cmd(struct cxl_memdev *cxlmd,
struct cxl_mem_query_commands __user *q)
{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct device *dev = &cxlmd->dev;
struct cxl_mem_command *cmd;
u32 n_commands;
@@ -494,9 +496,9 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
cxl_for_each_cmd(cmd) {
struct cxl_command_info info = cmd->info;
- if (test_bit(info.id, cxlmd->cxlds->enabled_cmds))
+ if (test_bit(info.id, mds->enabled_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
- if (test_bit(info.id, cxlmd->cxlds->exclusive_cmds))
+ if (test_bit(info.id, mds->exclusive_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
@@ -511,7 +513,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
* @mbox_cmd: The validated mailbox command.
* @out_payload: Pointer to userspace's output payload.
* @size_out: (Input) Max payload size to copy out.
@@ -532,12 +534,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
+static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *mbox_cmd,
u64 out_payload, s32 *size_out,
u32 *retval)
{
- struct device *dev = cxlds->dev;
+ struct device *dev = mds->cxlds.dev;
int rc;
dev_dbg(dev,
@@ -547,7 +549,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
cxl_mem_opcode_to_name(mbox_cmd->opcode),
mbox_cmd->opcode, mbox_cmd->size_in);
- rc = cxlds->mbox_send(cxlds, mbox_cmd);
+ rc = mds->mbox_send(mds, mbox_cmd);
if (rc)
goto out;
@@ -576,7 +578,7 @@ out:
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct device *dev = &cxlmd->dev;
struct cxl_send_command send;
struct cxl_mbox_cmd mbox_cmd;
@@ -587,11 +589,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
+ rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
if (rc)
return rc;
- rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
+ rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
&send.out.size, &send.retval);
if (rc)
return rc;
@@ -602,13 +604,14 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
return 0;
}
-static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8 *out)
+static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
+ u32 *size, u8 *out)
{
u32 remaining = *size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
+ u32 xfer_size = min_t(u32, remaining, mds->payload_size);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_get_log log;
int rc;
@@ -627,7 +630,7 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8
.payload_out = out,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
/*
* The output payload length that indicates the number
@@ -654,17 +657,18 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
* @size: Length of the Command Effects Log.
* @cel: CEL
*
* Iterate over each entry in the CEL and determine if the driver supports the
* command. If so, the command is enabled for the device and can be used later.
*/
-static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
+static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
{
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
+ struct device *dev = mds->cxlds.dev;
int i;
cel_entry = (struct cxl_cel_entry *) cel;
@@ -674,39 +678,39 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
if (!cmd && !cxl_is_poison_command(opcode)) {
- dev_dbg(cxlds->dev,
+ dev_dbg(dev,
"Opcode 0x%04x unsupported by driver\n", opcode);
continue;
}
if (cmd)
- set_bit(cmd->info.id, cxlds->enabled_cmds);
+ set_bit(cmd->info.id, mds->enabled_cmds);
if (cxl_is_poison_command(opcode))
- cxl_set_poison_cmd_enabled(&cxlds->poison, opcode);
+ cxl_set_poison_cmd_enabled(&mds->poison, opcode);
- dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode);
+ dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
}
}
-static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
{
struct cxl_mbox_get_supported_logs *ret;
struct cxl_mbox_cmd mbox_cmd;
int rc;
- ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+ ret = kvmalloc(mds->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
- .size_out = cxlds->payload_size,
+ .size_out = mds->payload_size,
.payload_out = ret,
/* At least the record number field must be valid */
.min_out = 2,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -729,22 +733,22 @@ static const uuid_t log_uuid[] = {
/**
* cxl_enumerate_cmds() - Enumerate commands for a device.
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
*
* Returns 0 if enumerate completed successfully.
*
* CXL devices have optional support for certain commands. This function will
* determine the set of supported commands for the hardware and update the
- * enabled_cmds bitmap in the @cxlds.
+ * enabled_cmds bitmap in the @mds.
*/
-int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
+int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
{
struct cxl_mbox_get_supported_logs *gsl;
- struct device *dev = cxlds->dev;
+ struct device *dev = mds->cxlds.dev;
struct cxl_mem_command *cmd;
int i, rc;
- gsl = cxl_get_gsl(cxlds);
+ gsl = cxl_get_gsl(mds);
if (IS_ERR(gsl))
return PTR_ERR(gsl);
@@ -765,19 +769,19 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
goto out;
}
- rc = cxl_xfer_log(cxlds, &uuid, &size, log);
+ rc = cxl_xfer_log(mds, &uuid, &size, log);
if (rc) {
kvfree(log);
goto out;
}
- cxl_walk_cel(cxlds, size, log);
+ cxl_walk_cel(mds, size, log);
kvfree(log);
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, cxlds->enabled_cmds);
+ set_bit(cmd->info.id, mds->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -838,7 +842,7 @@ static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
}
}
-static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
+static int cxl_clear_event_record(struct cxl_memdev_state *mds,
enum cxl_event_log_type log,
struct cxl_get_event_payload *get_pl)
{
@@ -852,9 +856,9 @@ static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
int i;
/* Payload size may limit the max handles */
- if (pl_size > cxlds->payload_size) {
- max_handles = (cxlds->payload_size - sizeof(*payload)) /
- sizeof(__le16);
+ if (pl_size > mds->payload_size) {
+ max_handles = (mds->payload_size - sizeof(*payload)) /
+ sizeof(__le16);
pl_size = struct_size(payload, handles, max_handles);
}
@@ -879,12 +883,12 @@ static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
i = 0;
for (cnt = 0; cnt < total; cnt++) {
payload->handles[i++] = get_pl->records[cnt].hdr.handle;
- dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
- log, le16_to_cpu(payload->handles[i]));
+ dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
+ le16_to_cpu(payload->handles[i]));
if (i == max_handles) {
payload->nr_recs = i;
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
goto free_pl;
i = 0;
@@ -895,7 +899,7 @@ static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
if (i) {
payload->nr_recs = i;
mbox_cmd.size_in = struct_size(payload, handles, i);
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
goto free_pl;
}
@@ -905,32 +909,34 @@ free_pl:
return rc;
}
-static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
+static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
enum cxl_event_log_type type)
{
+ struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+ struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
- mutex_lock(&cxlds->event.log_lock);
- payload = cxlds->event.buf;
+ mutex_lock(&mds->event.log_lock);
+ payload = mds->event.buf;
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
- .size_out = cxlds->payload_size,
+ .size_out = mds->payload_size,
.min_out = struct_size(payload, records, 0),
};
do {
int rc, i;
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
- dev_err_ratelimited(cxlds->dev,
+ dev_err_ratelimited(dev,
"Event log '%d': Failed to query event records : %d",
type, rc);
break;
@@ -941,27 +947,27 @@ static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
break;
for (i = 0; i < nr_rec; i++)
- cxl_event_trace_record(cxlds->cxlmd, type,
+ cxl_event_trace_record(cxlmd, type,
&payload->records[i]);
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
- trace_cxl_overflow(cxlds->cxlmd, type, payload);
+ trace_cxl_overflow(cxlmd, type, payload);
- rc = cxl_clear_event_record(cxlds, type, payload);
+ rc = cxl_clear_event_record(mds, type, payload);
if (rc) {
- dev_err_ratelimited(cxlds->dev,
+ dev_err_ratelimited(dev,
"Event log '%d': Failed to clear events : %d",
type, rc);
break;
}
} while (nr_rec);
- mutex_unlock(&cxlds->event.log_lock);
+ mutex_unlock(&mds->event.log_lock);
}
/**
* cxl_mem_get_event_records - Get Event Records from the device
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
* @status: Event Status register value identifying which events are available.
*
* Retrieve all event records available on the device, report them as trace
@@ -970,24 +976,24 @@ static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
* See CXL rev 3.0 @8.2.9.2.2 Get Event Records
* See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
*/
-void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
+void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
{
- dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
+ dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
if (status & CXLDEV_EVENT_STATUS_FATAL)
- cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
+ cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
if (status & CXLDEV_EVENT_STATUS_FAIL)
- cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
+ cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
if (status & CXLDEV_EVENT_STATUS_WARN)
- cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
+ cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
if (status & CXLDEV_EVENT_STATUS_INFO)
- cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
+ cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
/**
* cxl_mem_get_partition_info - Get partition info
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
*
* Retrieve the current partition info for the device specified. The active
* values are the current capacity in bytes. If not 0, the 'next' values are
@@ -997,7 +1003,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
*
* See CXL @8.2.9.5.2.1 Get Partition Info
*/
-static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
+static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
{
struct cxl_mbox_get_partition_info pi;
struct cxl_mbox_cmd mbox_cmd;
@@ -1008,17 +1014,17 @@ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
.size_out = sizeof(pi),
.payload_out = &pi,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
return rc;
- cxlds->active_volatile_bytes =
+ mds->active_volatile_bytes =
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlds->active_persistent_bytes =
+ mds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlds->next_volatile_bytes =
+ mds->next_volatile_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlds->next_persistent_bytes =
+ mds->next_persistent_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
@@ -1026,14 +1032,14 @@ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
/**
* cxl_dev_state_identify() - Send the IDENTIFY command to the device.
- * @cxlds: The device data for the operation
+ * @mds: The driver data for the operation
*
* Return: 0 if identify was executed successfully or media not ready.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
*/
-int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
+int cxl_dev_state_identify(struct cxl_memdev_state *mds)
{
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
@@ -1041,7 +1047,7 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
u32 val;
int rc;
- if (!cxlds->media_ready)
+ if (!mds->cxlds.media_ready)
return 0;
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -1049,31 +1055,92 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
.size_out = sizeof(id),
.payload_out = &id,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return rc;
- cxlds->total_bytes =
+ mds->total_bytes =
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlds->volatile_only_bytes =
+ mds->volatile_only_bytes =
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlds->persistent_only_bytes =
+ mds->persistent_only_bytes =
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlds->partition_align_bytes =
+ mds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- cxlds->lsa_size = le32_to_cpu(id.lsa_size);
- memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+ mds->lsa_size = le32_to_cpu(id.lsa_size);
+ memcpy(mds->firmware_version, id.fw_revision,
+ sizeof(id.fw_revision));
- if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) {
+ if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
val = get_unaligned_le24(id.poison_list_max_mer);
- cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
+ mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+/**
+ * cxl_mem_sanitize() - Send a sanitization command to the device.
+ * @mds: The device data for the operation
+ * @cmd: The specific sanitization command opcode
+ *
+ * Return: 0 if the command was executed successfully, regardless of
+ * whether or not the actual security operation is done in the background,
+ * such as for the Sanitize case.
+ * Error return values can be the result of the mailbox command, -EINVAL
+ * when security requirements are not met or invalid contexts.
+ *
+ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+ */
+int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+{
+ int rc;
+ u32 sec_out = 0;
+ struct cxl_get_security_output {
+ __le32 flags;
+ } out;
+ struct cxl_mbox_cmd sec_cmd = {
+ .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
+ .payload_out = &out,
+ .size_out = sizeof(out),
+ };
+ struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+
+ if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
+ return -EINVAL;
+
+ rc = cxl_internal_send_cmd(mds, &sec_cmd);
+ if (rc < 0) {
+ dev_err(cxlds->dev, "Failed to get security state : %d", rc);
+ return rc;
+ }
+
+ /*
+ * Prior to using these commands, any security applied to
+ * the user data areas of the device shall be DISABLED (or
+ * UNLOCKED for secure erase case).
+ */
+ sec_out = le32_to_cpu(out.flags);
+ if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
+ return -EINVAL;
+
+ if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
+ sec_out & CXL_PMEM_SEC_STATE_LOCKED)
+ return -EINVAL;
+
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc < 0) {
+ dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
+
static int add_dpa_res(struct device *dev, struct resource *parent,
struct resource *res, resource_size_t start,
resource_size_t size, const char *type)
@@ -1100,8 +1167,9 @@ static int add_dpa_res(struct device *dev, struct resource *parent,
return 0;
}
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
int rc;
@@ -1113,35 +1181,35 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
}
cxlds->dpa_res =
- (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
+ (struct resource)DEFINE_RES_MEM(0, mds->total_bytes);
- if (cxlds->partition_align_bytes == 0) {
+ if (mds->partition_align_bytes == 0) {
rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- cxlds->volatile_only_bytes, "ram");
+ mds->volatile_only_bytes, "ram");
if (rc)
return rc;
return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- cxlds->volatile_only_bytes,
- cxlds->persistent_only_bytes, "pmem");
+ mds->volatile_only_bytes,
+ mds->persistent_only_bytes, "pmem");
}
- rc = cxl_mem_get_partition_info(cxlds);
+ rc = cxl_mem_get_partition_info(mds);
if (rc) {
dev_err(dev, "Failed to query partition information\n");
return rc;
}
rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- cxlds->active_volatile_bytes, "ram");
+ mds->active_volatile_bytes, "ram");
if (rc)
return rc;
return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- cxlds->active_volatile_bytes,
- cxlds->active_persistent_bytes, "pmem");
+ mds->active_volatile_bytes,
+ mds->active_persistent_bytes, "pmem");
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
-int cxl_set_timestamp(struct cxl_dev_state *cxlds)
+int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_set_timestamp_in pi;
@@ -1154,7 +1222,7 @@ int cxl_set_timestamp(struct cxl_dev_state *cxlds)
.payload_in = &pi,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
/*
* Command is optional. Devices may have another way of providing
* a timestamp, or may return all 0s in timestamp fields.
@@ -1170,18 +1238,18 @@ EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
- rc = mutex_lock_interruptible(&cxlds->poison.lock);
+ rc = mutex_lock_interruptible(&mds->poison.lock);
if (rc)
return rc;
- po = cxlds->poison.list_out;
+ po = mds->poison.list_out;
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
@@ -1189,13 +1257,13 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
.opcode = CXL_MBOX_OP_GET_POISON,
.size_in = sizeof(pi),
.payload_in = &pi,
- .size_out = cxlds->payload_size,
+ .size_out = mds->payload_size,
.payload_out = po,
.min_out = struct_size(po, record, 0),
};
do {
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
@@ -1206,14 +1274,14 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
/* Protect against an uncleared _FLAG_MORE */
nr_records = nr_records + le16_to_cpu(po->count);
- if (nr_records >= cxlds->poison.max_errors) {
+ if (nr_records >= mds->poison.max_errors) {
dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
nr_records);
break;
}
} while (po->flags & CXL_POISON_FLAG_MORE);
- mutex_unlock(&cxlds->poison.lock);
+ mutex_unlock(&mds->poison.lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
@@ -1223,52 +1291,53 @@ static void free_poison_buf(void *buf)
kvfree(buf);
}
-/* Get Poison List output buffer is protected by cxlds->poison.lock */
-static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds)
+/* Get Poison List output buffer is protected by mds->poison.lock */
+static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
{
- cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL);
- if (!cxlds->poison.list_out)
+ mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
+ if (!mds->poison.list_out)
return -ENOMEM;
- return devm_add_action_or_reset(cxlds->dev, free_poison_buf,
- cxlds->poison.list_out);
+ return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
+ mds->poison.list_out);
}
-int cxl_poison_state_init(struct cxl_dev_state *cxlds)
+int cxl_poison_state_init(struct cxl_memdev_state *mds)
{
int rc;
- if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds))
+ if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
return 0;
- rc = cxl_poison_alloc_buf(cxlds);
+ rc = cxl_poison_alloc_buf(mds);
if (rc) {
- clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds);
+ clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
return rc;
}
- mutex_init(&cxlds->poison.lock);
+ mutex_init(&mds->poison.lock);
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
-struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
+struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
- struct cxl_dev_state *cxlds;
+ struct cxl_memdev_state *mds;
- cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
- if (!cxlds) {
+ mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
+ if (!mds) {
dev_err(dev, "No memory available\n");
return ERR_PTR(-ENOMEM);
}
- mutex_init(&cxlds->mbox_mutex);
- mutex_init(&cxlds->event.log_lock);
- cxlds->dev = dev;
+ mutex_init(&mds->mbox_mutex);
+ mutex_init(&mds->event.log_lock);
+ mds->cxlds.dev = dev;
+ mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- return cxlds;
+ return mds;
}
-EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
void __init cxl_mbox_init(void)
{
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 057a43267290..f99e7ec3cc40 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/idr.h>
@@ -39,8 +41,11 @@ static ssize_t firmware_version_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
+ if (!mds)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -49,8 +54,11 @@ static ssize_t payload_max_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
+ if (!mds)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%zu\n", mds->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -59,8 +67,11 @@ static ssize_t label_storage_size_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
+ if (!mds)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%zu\n", mds->lsa_size);
}
static DEVICE_ATTR_RO(label_storage_size);
@@ -107,6 +118,89 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(numa_node);
+static ssize_t security_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
+ u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ unsigned long state = mds->security.state;
+
+ if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
+ return sysfs_emit(buf, "sanitize\n");
+
+ if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
+ return sysfs_emit(buf, "disabled\n");
+ if (state & CXL_PMEM_SEC_STATE_FROZEN ||
+ state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
+ state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
+ return sysfs_emit(buf, "frozen\n");
+ if (state & CXL_PMEM_SEC_STATE_LOCKED)
+ return sysfs_emit(buf, "locked\n");
+ else
+ return sysfs_emit(buf, "unlocked\n");
+}
+static struct device_attribute dev_attr_security_state =
+ __ATTR(state, 0444, security_state_show, NULL);
+
+static ssize_t security_sanitize_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_port *port = cxlmd->endpoint;
+ bool sanitize;
+ ssize_t rc;
+
+ if (kstrtobool(buf, &sanitize) || !sanitize)
+ return -EINVAL;
+
+ if (!port || !is_cxl_endpoint(port))
+ return -EINVAL;
+
+ /* ensure no regions are mapped to this memdev */
+ if (port->commit_end != -1)
+ return -EBUSY;
+
+ rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
+
+ return rc ? rc : len;
+}
+static struct device_attribute dev_attr_security_sanitize =
+ __ATTR(sanitize, 0200, NULL, security_sanitize_store);
+
+static ssize_t security_erase_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_port *port = cxlmd->endpoint;
+ ssize_t rc;
+ bool erase;
+
+ if (kstrtobool(buf, &erase) || !erase)
+ return -EINVAL;
+
+ if (!port || !is_cxl_endpoint(port))
+ return -EINVAL;
+
+ /* ensure no regions are mapped to this memdev */
+ if (port->commit_end != -1)
+ return -EBUSY;
+
+ rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
+
+ return rc ? rc : len;
+}
+static struct device_attribute dev_attr_security_erase =
+ __ATTR(erase, 0200, NULL, security_erase_store);
+
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -140,7 +234,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
struct cxl_port *port;
int rc;
- port = dev_get_drvdata(&cxlmd->dev);
+ port = cxlmd->endpoint;
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
@@ -198,7 +292,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
- port = dev_get_drvdata(&cxlmd->dev);
+ port = cxlmd->endpoint;
if (port && is_cxl_endpoint(port) && port->commit_end != -1)
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
@@ -231,7 +325,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_inject_poison inject;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -255,13 +349,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.size_in = sizeof(inject),
.payload_in = &inject,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(cxlds->dev,
+ dev_warn_once(mds->cxlds.dev,
"poison inject dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
@@ -279,7 +373,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_clear_poison clear;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -312,14 +406,15 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.payload_in = &clear,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n",
- dpa, dev_name(&cxlr->dev));
+ dev_warn_once(mds->cxlds.dev,
+ "poison clear dpa:%#llx region: %s\n", dpa,
+ dev_name(&cxlr->dev));
record = (struct cxl_poison_record) {
.address = cpu_to_le64(dpa),
@@ -352,6 +447,13 @@ static struct attribute *cxl_memdev_ram_attributes[] = {
NULL,
};
+static struct attribute *cxl_memdev_security_attributes[] = {
+ &dev_attr_security_state.attr,
+ &dev_attr_security_sanitize.attr,
+ &dev_attr_security_erase.attr,
+ NULL,
+};
+
static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
int n)
{
@@ -375,10 +477,16 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = {
.attrs = cxl_memdev_pmem_attributes,
};
+static struct attribute_group cxl_memdev_security_attribute_group = {
+ .name = "security",
+ .attrs = cxl_memdev_security_attributes,
+};
+
static const struct attribute_group *cxl_memdev_attribute_groups[] = {
&cxl_memdev_attribute_group,
&cxl_memdev_ram_attribute_group,
&cxl_memdev_pmem_attribute_group,
+ &cxl_memdev_security_attribute_group,
NULL,
};
@@ -397,17 +505,18 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
- * @cxlds: The device state to operate on
+ * @mds: The device state to operate on
* @cmds: bitmap of commands to mark exclusive
*
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds.
*/
-void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
+void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
+ bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
@@ -415,23 +524,34 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
- * @cxlds: The device state to modify
+ * @mds: The device state to modify
* @cmds: bitmap of commands to mark available for userspace
*/
-void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
+void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
+ bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+static void cxl_memdev_security_shutdown(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds->security.poll)
+ cancel_delayed_work_sync(&mds->security.poll_dwork);
+}
+
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
+ cxl_memdev_security_shutdown(dev);
cxlmd->cxlds = NULL;
up_write(&cxl_memdev_rwsem);
}
@@ -511,10 +631,12 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct cxl_memdev *cxlmd = file->private_data;
+ struct cxl_dev_state *cxlds;
int rc = -ENXIO;
down_read(&cxl_memdev_rwsem);
- if (cxlmd->cxlds)
+ cxlds = cxlmd->cxlds;
+ if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
up_read(&cxl_memdev_rwsem);
@@ -542,6 +664,316 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * cxl_mem_get_fw_info - Get Firmware info
+ * @mds: The device data for the operation
+ *
+ * Retrieve firmware info for the device specified.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL-3.0 8.2.9.3.1 Get FW Info
+ */
+static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
+{
+ struct cxl_mbox_get_fw_info info;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_FW_INFO,
+ .size_out = sizeof(info),
+ .payload_out = &info,
+ };
+
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc < 0)
+ return rc;
+
+ mds->fw.num_slots = info.num_slots;
+ mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
+ info.slot_info);
+
+ return 0;
+}
+
+/**
+ * cxl_mem_activate_fw - Activate Firmware
+ * @mds: The device data for the operation
+ * @slot: slot number to activate
+ *
+ * Activate firmware in a given slot for the device specified.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL-3.0 8.2.9.3.3 Activate FW
+ */
+static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
+{
+ struct cxl_mbox_activate_fw activate;
+ struct cxl_mbox_cmd mbox_cmd;
+
+ if (slot == 0 || slot > mds->fw.num_slots)
+ return -EINVAL;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_ACTIVATE_FW,
+ .size_in = sizeof(activate),
+ .payload_in = &activate,
+ };
+
+ /* Only offline activation supported for now */
+ activate.action = CXL_FW_ACTIVATE_OFFLINE;
+ activate.slot = slot;
+
+ return cxl_internal_send_cmd(mds, &mbox_cmd);
+}
+
+/**
+ * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
+ * @mds: The device data for the operation
+ *
+ * Abort an in-progress firmware transfer for the device specified.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL-3.0 8.2.9.3.2 Transfer FW
+ */
+static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
+{
+ struct cxl_mbox_transfer_fw *transfer;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
+ if (!transfer)
+ return -ENOMEM;
+
+ /* Set a 1s poll interval and a total wait time of 30s */
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_TRANSFER_FW,
+ .size_in = sizeof(*transfer),
+ .payload_in = transfer,
+ .poll_interval_ms = 1000,
+ .poll_count = 30,
+ };
+
+ transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
+
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ kfree(transfer);
+ return rc;
+}
+
+static void cxl_fw_cleanup(struct fw_upload *fwl)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+
+ mds->fw.next_slot = 0;
+}
+
+static int cxl_fw_do_cancel(struct fw_upload *fwl)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ int rc;
+
+ rc = cxl_mem_abort_fw_xfer(mds);
+ if (rc < 0)
+ dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
+
+ return FW_UPLOAD_ERR_CANCELED;
+}
+
+static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
+ u32 size)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+ struct cxl_mbox_transfer_fw *transfer;
+
+ if (!size)
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+
+ mds->fw.oneshot = struct_size(transfer, data, size) <
+ mds->payload_size;
+
+ if (cxl_mem_get_fw_info(mds))
+ return FW_UPLOAD_ERR_HW_ERROR;
+
+ /*
+ * So far no state has been changed, hence no other cleanup is
+ * necessary. Simply return the cancelled status.
+ */
+ if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
+ return FW_UPLOAD_ERR_CANCELED;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct cxl_mbox_transfer_fw *transfer;
+ struct cxl_mbox_cmd mbox_cmd;
+ u32 cur_size, remaining;
+ size_t size_in;
+ int rc;
+
+ *written = 0;
+
+ /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
+ if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
+ dev_err(&cxlmd->dev,
+ "misaligned offset for FW transfer slice (%u)\n",
+ offset);
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ /*
+ * Pick transfer size based on mds->payload_size @size must bw 128-byte
+ * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
+ * sizeof(*transfer) is 128. These constraints imply that @cur_size
+ * will always be 128b aligned.
+ */
+ cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
+
+ remaining = size - cur_size;
+ size_in = struct_size(transfer, data, cur_size);
+
+ if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
+ return cxl_fw_do_cancel(fwl);
+
+ /*
+ * Slot numbers are 1-indexed
+ * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
+ * Check for rollover using modulo, and 1-index it by adding 1
+ */
+ mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
+
+ /* Do the transfer via mailbox cmd */
+ transfer = kzalloc(size_in, GFP_KERNEL);
+ if (!transfer)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
+ memcpy(transfer->data, data + offset, cur_size);
+ if (mds->fw.oneshot) {
+ transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
+ transfer->slot = mds->fw.next_slot;
+ } else {
+ if (offset == 0) {
+ transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
+ } else if (remaining == 0) {
+ transfer->action = CXL_FW_TRANSFER_ACTION_END;
+ transfer->slot = mds->fw.next_slot;
+ } else {
+ transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
+ }
+ }
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_TRANSFER_FW,
+ .size_in = size_in,
+ .payload_in = transfer,
+ .poll_interval_ms = 1000,
+ .poll_count = 30,
+ };
+
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc < 0) {
+ rc = FW_UPLOAD_ERR_RW_ERROR;
+ goto out_free;
+ }
+
+ *written = cur_size;
+
+ /* Activate FW if oneshot or if the last slice was written */
+ if (mds->fw.oneshot || remaining == 0) {
+ dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
+ mds->fw.next_slot);
+ rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
+ if (rc < 0) {
+ dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
+ rc);
+ rc = FW_UPLOAD_ERR_HW_ERROR;
+ goto out_free;
+ }
+ }
+
+ rc = FW_UPLOAD_ERR_NONE;
+
+out_free:
+ kfree(transfer);
+ return rc;
+}
+
+static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+
+ /*
+ * cxl_internal_send_cmd() handles background operations synchronously.
+ * No need to wait for completions here - any errors would've been
+ * reported and handled during the ->write() call(s).
+ * Just check if a cancel request was received, and return success.
+ */
+ if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
+ return cxl_fw_do_cancel(fwl);
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void cxl_fw_cancel(struct fw_upload *fwl)
+{
+ struct cxl_memdev_state *mds = fwl->dd_handle;
+
+ set_bit(CXL_FW_CANCEL, mds->fw.state);
+}
+
+static const struct fw_upload_ops cxl_memdev_fw_ops = {
+ .prepare = cxl_fw_prepare,
+ .write = cxl_fw_write,
+ .poll_complete = cxl_fw_poll_complete,
+ .cancel = cxl_fw_cancel,
+ .cleanup = cxl_fw_cleanup,
+};
+
+static void devm_cxl_remove_fw_upload(void *fwl)
+{
+ firmware_upload_unregister(fwl);
+}
+
+int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct device *dev = &cxlds->cxlmd->dev;
+ struct fw_upload *fwl;
+ int rc;
+
+ if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ return 0;
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ &cxl_memdev_fw_ops, mds);
+ if (IS_ERR(fwl))
+ return dev_err_probe(dev, PTR_ERR(fwl),
+ "Failed to register firmware loader\n");
+
+ rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
+ fwl);
+ if (rc)
+ dev_err(dev,
+ "Failed to add firmware loader remove action: %d\n",
+ rc);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
+
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
@@ -551,6 +983,35 @@ static const struct file_operations cxl_memdev_fops = {
.llseek = noop_llseek,
};
+static void put_sanitize(void *data)
+{
+ struct cxl_memdev_state *mds = data;
+
+ sysfs_put(mds->security.sanitize_node);
+}
+
+static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = &cxlmd->dev;
+ struct kernfs_node *sec;
+
+ sec = sysfs_get_dirent(dev->kobj.sd, "security");
+ if (!sec) {
+ dev_err(dev, "sysfs_get_dirent 'security' failed\n");
+ return -ENODEV;
+ }
+ mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+ sysfs_put(sec);
+ if (!mds->security.sanitize_node) {
+ dev_err(dev, "sysfs_get_dirent 'state' failed\n");
+ return -ENODEV;
+ }
+
+ return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
+ }
+
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd;
@@ -579,6 +1040,10 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
if (rc)
goto err;
+ rc = cxl_memdev_security_init(cxlmd);
+ if (rc)
+ goto err;
+
rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 67f4ab6daa34..c7a7887ebdcf 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -67,7 +67,7 @@ static int match_add_dports(struct pci_dev *pdev, void *data)
/**
* devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
- * @port: cxl_port whose ->uport is the upstream of dports to be enumerated
+ * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
*
* Returns a positive number of dports enumerated or a negative error
* code.
@@ -308,36 +308,17 @@ static void disable_hdm(void *_cxlhdm)
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
}
-int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
+static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
{
- void __iomem *hdm;
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
u32 global_ctrl;
- /*
- * If the hdm capability was not mapped there is nothing to enable and
- * the caller is responsible for what happens next. For example,
- * emulate a passthrough decoder.
- */
- if (IS_ERR(cxlhdm))
- return 0;
-
- hdm = cxlhdm->regs.hdm_decoder;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
-
- /*
- * If the HDM decoder capability was enabled on entry, skip
- * registering disable_hdm() since this decode capability may be
- * owned by platform firmware.
- */
- if (global_ctrl & CXL_HDM_DECODER_ENABLE)
- return 0;
-
writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
- return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm);
+ return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL);
int cxl_dvsec_rr_decode(struct device *dev, int d,
struct cxl_endpoint_dvsec_info *info)
@@ -511,7 +492,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
if (info->mem_enabled)
return 0;
- rc = devm_cxl_enable_hdm(port, cxlhdm);
+ rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
return rc;
@@ -622,7 +603,7 @@ static int cxl_cdat_read_table(struct device *dev,
*/
void read_cdat_data(struct cxl_port *port)
{
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct device *host = cxlmd->dev.parent;
struct device *dev = &port->dev;
struct pci_doe_mb *cdat_doe;
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index f8c38d997252..fc94f5240327 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -64,7 +64,7 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
+ struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
struct device *dev;
if (!port)
diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
new file mode 100644
index 000000000000..7684c843e5a5
--- /dev/null
+++ b/drivers/cxl/core/pmu.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Huawei. All rights reserved. */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <pmu.h>
+#include <cxl.h>
+#include "core.h"
+
+static void cxl_pmu_release(struct device *dev)
+{
+ struct cxl_pmu *pmu = to_cxl_pmu(dev);
+
+ kfree(pmu);
+}
+
+const struct device_type cxl_pmu_type = {
+ .name = "cxl_pmu",
+ .release = cxl_pmu_release,
+};
+
+static void remove_dev(void *dev)
+{
+ device_del(dev);
+}
+
+int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
+ int assoc_id, int index, enum cxl_pmu_type type)
+{
+ struct cxl_pmu *pmu;
+ struct device *dev;
+ int rc;
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->assoc_id = assoc_id;
+ pmu->index = index;
+ pmu->type = type;
+ pmu->base = regs->pmu;
+ dev = &pmu->dev;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = parent;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_pmu_type;
+ switch (pmu->type) {
+ case CXL_PMU_MEMDEV:
+ rc = dev_set_name(dev, "pmu_mem%d.%d", assoc_id, index);
+ break;
+ }
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ return devm_add_action_or_reset(parent, remove_dev, dev);
+
+err:
+ put_device(&pmu->dev);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, CXL);
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index e7c284c890bc..724be8448eb4 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -56,6 +56,8 @@ static int cxl_device_id(const struct device *dev)
return CXL_DEVICE_MEMORY_EXPANDER;
if (dev->type == CXL_REGION_TYPE())
return CXL_DEVICE_REGION;
+ if (dev->type == &cxl_pmu_type)
+ return CXL_DEVICE_PMU;
return 0;
}
@@ -117,9 +119,9 @@ static ssize_t target_type_show(struct device *dev,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
switch (cxld->target_type) {
- case CXL_DECODER_ACCELERATOR:
+ case CXL_DECODER_DEVMEM:
return sysfs_emit(buf, "accelerator\n");
- case CXL_DECODER_EXPANDER:
+ case CXL_DECODER_HOSTONLYMEM:
return sysfs_emit(buf, "expander\n");
}
return -ENXIO;
@@ -561,9 +563,9 @@ static void unregister_port(void *_port)
* unregistered while holding their parent port lock.
*/
if (!parent)
- lock_dev = port->uport;
+ lock_dev = port->uport_dev;
else if (is_cxl_root(parent))
- lock_dev = parent->uport;
+ lock_dev = parent->uport_dev;
else
lock_dev = &parent->dev;
@@ -583,7 +585,8 @@ static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
{
int rc;
- rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
+ rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
+ "uport");
if (rc)
return rc;
return devm_add_action_or_reset(host, cxl_unlink_uport, port);
@@ -605,7 +608,7 @@ static int devm_cxl_link_parent_dport(struct device *host,
if (!parent_dport)
return 0;
- rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport->kobj,
+ rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
"parent_dport");
if (rc)
return rc;
@@ -614,7 +617,7 @@ static int devm_cxl_link_parent_dport(struct device *host,
static struct lock_class_key cxl_port_key;
-static struct cxl_port *cxl_port_alloc(struct device *uport,
+static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
@@ -630,7 +633,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (rc < 0)
goto err;
port->id = rc;
- port->uport = uport;
+ port->uport_dev = uport_dev;
/*
* The top-level cxl_port "cxl_root" does not have a cxl_port as
@@ -658,12 +661,13 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (iter->host_bridge)
port->host_bridge = iter->host_bridge;
else if (parent_dport->rch)
- port->host_bridge = parent_dport->dport;
+ port->host_bridge = parent_dport->dport_dev;
else
- port->host_bridge = iter->uport;
- dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
+ port->host_bridge = iter->uport_dev;
+ dev_dbg(uport_dev, "host-bridge: %s\n",
+ dev_name(port->host_bridge));
} else
- dev->parent = uport;
+ dev->parent = uport_dev;
port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
@@ -686,8 +690,38 @@ err:
return ERR_PTR(rc);
}
+static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
+ resource_size_t component_reg_phys)
+{
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return 0;
+
+ *map = (struct cxl_register_map) {
+ .dev = dev,
+ .reg_type = CXL_REGLOC_RBI_COMPONENT,
+ .resource = component_reg_phys,
+ .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+ };
+
+ return cxl_setup_regs(map);
+}
+
+static inline int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys)
+{
+ return cxl_setup_comp_regs(&port->dev, &port->comp_map,
+ component_reg_phys);
+}
+
+static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
+{
+ return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+ component_reg_phys);
+}
+
static struct cxl_port *__devm_cxl_add_port(struct device *host,
- struct device *uport,
+ struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
@@ -695,12 +729,12 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
struct device *dev;
int rc;
- port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
+ port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
- if (is_cxl_memdev(uport))
+ if (is_cxl_memdev(uport_dev))
rc = dev_set_name(dev, "endpoint%d", port->id);
else if (parent_dport)
rc = dev_set_name(dev, "port%d", port->id);
@@ -709,6 +743,10 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
if (rc)
goto err;
+ rc = cxl_port_setup_regs(port, component_reg_phys);
+ if (rc)
+ goto err;
+
rc = device_add(dev);
if (rc)
goto err;
@@ -735,28 +773,29 @@ err:
/**
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
* @host: host device for devm operations
- * @uport: "physical" device implementing this upstream port
+ * @uport_dev: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
* @parent_dport: next hop up in the CXL memory decode hierarchy
*/
-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+struct cxl_port *devm_cxl_add_port(struct device *host,
+ struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
struct cxl_port *port, *parent_port;
- port = __devm_cxl_add_port(host, uport, component_reg_phys,
+ port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
parent_dport);
parent_port = parent_dport ? parent_dport->port : NULL;
if (IS_ERR(port)) {
- dev_dbg(uport, "Failed to add%s%s%s: %ld\n",
+ dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
parent_port ? " port to " : "",
parent_port ? dev_name(&parent_port->dev) : "",
parent_port ? "" : " root port",
PTR_ERR(port));
} else {
- dev_dbg(uport, "%s added%s%s%s\n",
+ dev_dbg(uport_dev, "%s added%s%s%s\n",
dev_name(&port->dev),
parent_port ? " to " : "",
parent_port ? dev_name(&parent_port->dev) : "",
@@ -773,33 +812,34 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
if (is_cxl_root(port))
return NULL;
- if (dev_is_pci(port->uport)) {
- struct pci_dev *pdev = to_pci_dev(port->uport);
+ if (dev_is_pci(port->uport_dev)) {
+ struct pci_dev *pdev = to_pci_dev(port->uport_dev);
return pdev->subordinate;
}
- return xa_load(&cxl_root_buses, (unsigned long)port->uport);
+ return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
}
EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
-static void unregister_pci_bus(void *uport)
+static void unregister_pci_bus(void *uport_dev)
{
- xa_erase(&cxl_root_buses, (unsigned long)uport);
+ xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
}
-int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
+int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus)
{
int rc;
- if (dev_is_pci(uport))
+ if (dev_is_pci(uport_dev))
return -EINVAL;
- rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
+ rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
+ GFP_KERNEL);
if (rc)
return rc;
- return devm_add_action_or_reset(host, unregister_pci_bus, uport);
+ return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
@@ -847,22 +887,22 @@ static struct cxl_dport *find_dport(struct cxl_port *port, int id)
return NULL;
}
-static int add_dport(struct cxl_port *port, struct cxl_dport *new)
+static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
{
struct cxl_dport *dup;
int rc;
device_lock_assert(&port->dev);
- dup = find_dport(port, new->port_id);
+ dup = find_dport(port, dport->port_id);
if (dup) {
dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n",
- new->port_id, dev_name(new->dport),
- dev_name(dup->dport));
+ dport->port_id, dev_name(dport->dport_dev),
+ dev_name(dup->dport_dev));
return -EBUSY;
}
- rc = xa_insert(&port->dports, (unsigned long)new->dport, new,
+ rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
GFP_KERNEL);
if (rc)
return rc;
@@ -895,8 +935,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data;
struct cxl_port *port = dport->port;
- xa_erase(&port->dports, (unsigned long) dport->dport);
- put_device(dport->dport);
+ xa_erase(&port->dports, (unsigned long) dport->dport_dev);
+ put_device(dport->dport_dev);
}
static void cxl_dport_unlink(void *data)
@@ -920,7 +960,7 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
int rc;
if (is_cxl_root(port))
- host = port->uport;
+ host = port->uport_dev;
else
host = &port->dev;
@@ -938,13 +978,29 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (!dport)
return ERR_PTR(-ENOMEM);
- dport->dport = dport_dev;
+ if (rcrb != CXL_RESOURCE_NONE) {
+ dport->rcrb.base = rcrb;
+ component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
+ CXL_RCRB_DOWNSTREAM);
+ if (component_reg_phys == CXL_RESOURCE_NONE) {
+ dev_warn(dport_dev, "Invalid Component Registers in RCRB");
+ return ERR_PTR(-ENXIO);
+ }
+
+ dport->rch = true;
+ }
+
+ if (component_reg_phys != CXL_RESOURCE_NONE)
+ dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
+ &component_reg_phys);
+
+ dport->dport_dev = dport_dev;
dport->port_id = port_id;
- dport->component_reg_phys = component_reg_phys;
dport->port = port;
- if (rcrb != CXL_RESOURCE_NONE)
- dport->rch = true;
- dport->rcrb = rcrb;
+
+ rc = cxl_dport_setup_regs(dport, component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
cond_cxl_root_lock(port);
rc = add_dport(port, dport);
@@ -1004,14 +1060,12 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
* @port: the cxl_port that references this dport
* @dport_dev: firmware or PCI device representing the dport
* @port_id: identifier for this dport in a decoder's target list
- * @component_reg_phys: optional location of CXL component registers
* @rcrb: mandatory location of a Root Complex Register Block
*
* See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
*/
struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev, int port_id,
- resource_size_t component_reg_phys,
resource_size_t rcrb)
{
struct cxl_dport *dport;
@@ -1022,7 +1076,7 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
}
dport = __devm_cxl_add_dport(port, dport_dev, port_id,
- component_reg_phys, rcrb);
+ CXL_RESOURCE_NONE, rcrb);
if (IS_ERR(dport)) {
dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
dev_name(&port->dev), PTR_ERR(dport));
@@ -1161,7 +1215,7 @@ static struct device *grandparent(struct device *dev)
static void delete_endpoint(void *data)
{
struct cxl_memdev *cxlmd = data;
- struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
+ struct cxl_port *endpoint = cxlmd->endpoint;
struct cxl_port *parent_port;
struct device *parent;
@@ -1176,6 +1230,7 @@ static void delete_endpoint(void *data)
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
+ cxlmd->endpoint = NULL;
device_unlock(parent);
put_device(parent);
out:
@@ -1187,7 +1242,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
struct device *dev = &cxlmd->dev;
get_device(&endpoint->dev);
- dev_set_drvdata(dev, endpoint);
+ cxlmd->endpoint = endpoint;
cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
@@ -1363,7 +1418,7 @@ out:
rc = PTR_ERR(port);
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport));
+ dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1425,7 +1480,8 @@ retry:
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport));
+ dev_name(&port->dev),
+ dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1465,6 +1521,13 @@ retry:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
+struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
+ struct cxl_dport **dport)
+{
+ return find_cxl_port(pdev->dev.parent, dport);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
+
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport)
{
@@ -1550,7 +1613,7 @@ static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
.start = 0,
.end = -1,
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index f822de44bee0..e115ba382e04 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -125,10 +125,38 @@ static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
return xa_load(&port->regions, (unsigned long)cxlr);
}
+static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
+{
+ if (!cpu_cache_has_invalidate_memregion()) {
+ if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
+ dev_warn_once(
+ &cxlr->dev,
+ "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
+ return 0;
+ } else {
+ dev_err(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
+ }
+ }
+
+ cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ return 0;
+}
+
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i;
+ int i, rc = 0;
+
+ /*
+ * Before region teardown attempt to flush, and if the flush
+ * fails cancel the region teardown for data consistency
+ * concerns
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -136,7 +164,6 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
- int rc = 0;
if (cxlds->rcd)
goto endpoint_reset;
@@ -155,14 +182,19 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
rc = cxld->reset(cxld);
if (rc)
return rc;
+ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
+ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
+ /* all decoders associated with this region have been torn down */
+ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+
return 0;
}
@@ -256,9 +288,19 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
goto out;
}
- if (commit)
+ /*
+ * Invalidate caches before region setup to drop any speculative
+ * consumption of this address space
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+ return rc;
+
+ if (commit) {
rc = cxl_region_decode_commit(cxlr);
- else {
+ if (rc == 0)
+ p->state = CXL_CONFIG_COMMIT;
+ } else {
p->state = CXL_CONFIG_RESET_PENDING;
up_write(&cxl_region_rwsem);
device_release_driver(&cxlr->dev);
@@ -268,18 +310,20 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
* The lock was dropped, so need to revalidate that the reset is
* still pending.
*/
- if (p->state == CXL_CONFIG_RESET_PENDING)
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ /*
+ * Revert to committed since there may still be active
+ * decoders associated with this region, or move forward
+ * to active to mark the reset successful
+ */
+ if (rc)
+ p->state = CXL_CONFIG_COMMIT;
+ else
+ p->state = CXL_CONFIG_ACTIVE;
+ }
}
- if (rc)
- goto out;
-
- if (commit)
- p->state = CXL_CONFIG_COMMIT;
- else if (p->state == CXL_CONFIG_RESET_PENDING)
- p->state = CXL_CONFIG_ACTIVE;
-
out:
up_write(&cxl_region_rwsem);
@@ -809,6 +853,18 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
return -EBUSY;
}
+ /*
+ * Endpoints should already match the region type, but backstop that
+ * assumption with an assertion. Switch-decoders change mapping-type
+ * based on what is mapped when they are assigned to a region.
+ */
+ dev_WARN_ONCE(&cxlr->dev,
+ port == cxled_to_port(cxled) &&
+ cxld->target_type != cxlr->type,
+ "%s:%s mismatch decoder type %d -> %d\n",
+ dev_name(&cxled_to_memdev(cxled)->dev),
+ dev_name(&cxld->dev), cxld->target_type, cxlr->type);
+ cxld->target_type = cxlr->type;
cxl_rr->decoder = cxld;
return 0;
}
@@ -906,10 +962,10 @@ static int cxl_port_attach_region(struct cxl_port *port,
dev_dbg(&cxlr->dev,
"%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxld->dev), dev_name(&cxlmd->dev),
dev_name(&cxled->cxld.dev), pos,
- ep ? ep->next ? dev_name(ep->next->uport) :
+ ep ? ep->next ? dev_name(ep->next->uport_dev) :
dev_name(&cxlmd->dev) :
"none",
cxl_rr->nr_eps, cxl_rr->nr_targets);
@@ -984,7 +1040,7 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
*/
if (pos < distance) {
dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
return -ENXIO;
}
@@ -994,7 +1050,7 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
if (ep->dport != ep_peer->dport) {
dev_dbg(&cxlr->dev,
"%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
dev_name(&cxlmd_peer->dev),
dev_name(&cxled_peer->cxld.dev));
@@ -1026,7 +1082,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
*/
if (!is_power_of_2(cxl_rr->nr_targets)) {
dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
cxl_rr->nr_targets);
return -EINVAL;
}
@@ -1076,7 +1132,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
rc = granularity_to_eig(parent_ig, &peig);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
- dev_name(parent_port->uport),
+ dev_name(parent_port->uport_dev),
dev_name(&parent_port->dev), parent_ig);
return rc;
}
@@ -1084,7 +1140,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
rc = ways_to_eiw(parent_iw, &peiw);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
- dev_name(parent_port->uport),
+ dev_name(parent_port->uport_dev),
dev_name(&parent_port->dev), parent_iw);
return rc;
}
@@ -1093,7 +1149,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
rc = ways_to_eiw(iw, &eiw);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
- dev_name(port->uport), dev_name(&port->dev), iw);
+ dev_name(port->uport_dev), dev_name(&port->dev), iw);
return rc;
}
@@ -1113,7 +1169,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
rc = eig_to_granularity(eig, &ig);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
256 << eig);
return rc;
}
@@ -1126,11 +1182,11 @@ static int cxl_port_setup_targets(struct cxl_port *port,
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
__func__, iw, ig, p->res);
dev_err(&cxlr->dev,
"%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
__func__, cxld->interleave_ways,
cxld->interleave_granularity,
(cxld->flags & CXL_DECODER_F_ENABLE) ?
@@ -1147,22 +1203,22 @@ static int cxl_port_setup_targets(struct cxl_port *port,
.end = p->res->end,
};
}
- dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+ dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
dev_name(&port->dev), iw, ig);
add_target:
if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
dev_dbg(&cxlr->dev,
"%s:%s: targets full trying to add %s:%s at %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
return -ENXIO;
}
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
- dev_name(port->uport), dev_name(&port->dev),
+ dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxlsd->cxld.dev),
- dev_name(ep->dport->dport),
+ dev_name(ep->dport->dport_dev),
cxl_rr->nr_targets_set);
return -ENXIO;
}
@@ -1172,8 +1228,8 @@ add_target:
out_target_set:
cxl_rr->nr_targets_set += inc;
dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
- dev_name(port->uport), dev_name(&port->dev),
- cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+ dev_name(port->uport_dev), dev_name(&port->dev),
+ cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
return 0;
@@ -1492,7 +1548,7 @@ static int cmp_decode_pos(const void *a, const void *b)
if (!dev) {
struct range *range = &cxled_a->cxld.hpa_range;
- dev_err(port->uport,
+ dev_err(port->uport_dev,
"failed to find decoder that maps %#llx-%#llx\n",
range->start, range->end);
goto err;
@@ -1507,14 +1563,15 @@ static int cmp_decode_pos(const void *a, const void *b)
put_device(dev);
if (a_pos < 0 || b_pos < 0) {
- dev_err(port->uport,
+ dev_err(port->uport_dev,
"failed to find shared decoder for %s and %s\n",
dev_name(cxlmd_a->dev.parent),
dev_name(cxlmd_b->dev.parent));
goto err;
}
- dev_dbg(port->uport, "%s comes %s %s\n", dev_name(cxlmd_a->dev.parent),
+ dev_dbg(port->uport_dev, "%s comes %s %s\n",
+ dev_name(cxlmd_a->dev.parent),
a_pos - b_pos < 0 ? "before" : "after",
dev_name(cxlmd_b->dev.parent));
@@ -1674,7 +1731,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -2059,11 +2115,11 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
if (rc)
goto err;
- rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
+ rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
if (rc)
return ERR_PTR(rc);
- dev_dbg(port->uport, "%s: created %s\n",
+ dev_dbg(port->uport_dev, "%s: created %s\n",
dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
return cxlr;
@@ -2103,7 +2159,7 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
return ERR_PTR(-EBUSY);
}
- return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_EXPANDER);
+ return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
static ssize_t create_pmem_region_store(struct device *dev,
@@ -2191,7 +2247,7 @@ static ssize_t delete_region_store(struct device *dev,
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
- devm_release_action(port->uport, unregister_region, cxlr);
+ devm_release_action(port->uport_dev, unregister_region, cxlr);
put_device(&cxlr->dev);
return len;
@@ -2356,7 +2412,8 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
if (rc == 1)
- rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport), &ctx);
+ rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
+ &ctx);
up_read(&cxl_region_rwsem);
return rc;
@@ -2732,7 +2789,7 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
err:
up_write(&cxl_region_rwsem);
- devm_release_action(port->uport, unregister_region, cxlr);
+ devm_release_action(port->uport_dev, unregister_region, cxlr);
return ERR_PTR(rc);
}
@@ -2803,30 +2860,6 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
-static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
-{
- if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
- return 0;
-
- if (!cpu_cache_has_invalidate_memregion()) {
- if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn_once(
- &cxlr->dev,
- "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
- } else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
- }
- }
-
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
- return 0;
-}
-
static int is_system_ram(struct resource *res, void *arg)
{
struct cxl_region *cxlr = arg;
@@ -2854,7 +2887,12 @@ static int cxl_region_probe(struct device *dev)
goto out;
}
- rc = cxl_region_invalidate_memregion(cxlr);
+ if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
+ dev_err(&cxlr->dev,
+ "failed to activate, re-commit region and retry\n");
+ rc = -ENXIO;
+ goto out;
+ }
/*
* From this point on any path that changes the region's state away from
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 1476a0299c9b..6281127b3e9d 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <cxlmem.h>
#include <cxlpci.h>
+#include <pmu.h>
#include "core.h"
@@ -199,11 +200,13 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
return ret_val;
}
-int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
- struct cxl_register_map *map, unsigned long map_mask)
+int cxl_map_component_regs(const struct cxl_register_map *map,
+ struct cxl_component_regs *regs,
+ unsigned long map_mask)
{
+ struct device *dev = map->dev;
struct mapinfo {
- struct cxl_reg_map *rmap;
+ const struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->component_map.hdm_decoder, &regs->hdm_decoder },
@@ -231,13 +234,13 @@ int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
}
EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
-int cxl_map_device_regs(struct device *dev,
- struct cxl_device_regs *regs,
- struct cxl_register_map *map)
+int cxl_map_device_regs(const struct cxl_register_map *map,
+ struct cxl_device_regs *regs)
{
+ struct device *dev = map->dev;
resource_size_t phys_addr = map->resource;
struct mapinfo {
- struct cxl_reg_map *rmap;
+ const struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->device_map.status, &regs->status, },
@@ -286,23 +289,30 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
}
/**
- * cxl_find_regblock() - Locate register blocks by type
+ * cxl_find_regblock_instance() - Locate a register block by type / index
* @pdev: The CXL PCI device to enumerate.
* @type: Register Block Indicator id
* @map: Enumeration output, clobbered on error
+ * @index: Index into which particular instance of a regblock wanted in the
+ * order found in register locator DVSEC.
*
* Return: 0 if register block enumerated, negative error code otherwise
*
* A CXL DVSEC may point to one or more register blocks, search for them
- * by @type.
+ * by @type and @index.
*/
-int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map)
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index)
{
u32 regloc_size, regblocks;
+ int instance = 0;
int regloc, i;
- map->resource = CXL_RESOURCE_NONE;
+ *map = (struct cxl_register_map) {
+ .dev = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
+
regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DVSEC_REG_LOCATOR);
if (!regloc)
@@ -323,20 +333,148 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
if (!cxl_decode_regblock(pdev, reg_lo, reg_hi, map))
continue;
- if (map->reg_type == type)
- return 0;
+ if (map->reg_type == type) {
+ if (index == instance)
+ return 0;
+ instance++;
+ }
}
map->resource = CXL_RESOURCE_NONE;
return -ENODEV;
}
+EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL);
+
+/**
+ * cxl_find_regblock() - Locate register blocks by type
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ * @map: Enumeration output, clobbered on error
+ *
+ * Return: 0 if register block enumerated, negative error code otherwise
+ *
+ * A CXL DVSEC may point to one or more register blocks, search for them
+ * by @type.
+ */
+int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map)
+{
+ return cxl_find_regblock_instance(pdev, type, map, 0);
+}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
-resource_size_t cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which)
+/**
+ * cxl_count_regblock() - Count instances of a given regblock type.
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ *
+ * Some regblocks may be repeated. Count how many instances.
+ *
+ * Return: count of matching regblocks.
+ */
+int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
+{
+ struct cxl_register_map map;
+ int rc, count = 0;
+
+ while (1) {
+ rc = cxl_find_regblock_instance(pdev, type, &map, count);
+ if (rc)
+ return count;
+ count++;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
+
+int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+
+ phys_addr = map->resource;
+ regs->pmu = devm_cxl_iomap_block(dev, phys_addr, CXL_PMU_REGMAP_SIZE);
+ if (!regs->pmu)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+
+static int cxl_map_regblock(struct cxl_register_map *map)
+{
+ struct device *dev = map->dev;
+
+ map->base = ioremap(map->resource, map->max_size);
+ if (!map->base) {
+ dev_err(dev, "failed to map registers\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+ return 0;
+}
+
+static void cxl_unmap_regblock(struct cxl_register_map *map)
+{
+ iounmap(map->base);
+ map->base = NULL;
+}
+
+static int cxl_probe_regs(struct cxl_register_map *map)
+{
+ struct cxl_component_reg_map *comp_map;
+ struct cxl_device_reg_map *dev_map;
+ struct device *dev = map->dev;
+ void __iomem *base = map->base;
+
+ switch (map->reg_type) {
+ case CXL_REGLOC_RBI_COMPONENT:
+ comp_map = &map->component_map;
+ cxl_probe_component_regs(dev, base, comp_map);
+ dev_dbg(dev, "Set up component registers\n");
+ break;
+ case CXL_REGLOC_RBI_MEMDEV:
+ dev_map = &map->device_map;
+ cxl_probe_device_regs(dev, base, dev_map);
+ if (!dev_map->status.valid || !dev_map->mbox.valid ||
+ !dev_map->memdev.valid) {
+ dev_err(dev, "registers not found: %s%s%s\n",
+ !dev_map->status.valid ? "status " : "",
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "Probing device registers...\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int cxl_setup_regs(struct cxl_register_map *map)
+{
+ int rc;
+
+ rc = cxl_map_regblock(map);
+ if (rc)
+ return rc;
+
+ rc = cxl_probe_regs(map);
+ cxl_unmap_regblock(map);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
+
+resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
+ enum cxl_rcrb which)
{
resource_size_t component_reg_phys;
+ resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
u16 cmd;
@@ -395,4 +533,12 @@ resource_size_t cxl_rcrb_to_component(struct device *dev,
return component_reg_phys;
}
-EXPORT_SYMBOL_NS_GPL(cxl_rcrb_to_component, CXL);
+
+resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
+ struct cxl_dport *dport)
+{
+ if (!dport->rch)
+ return CXL_RESOURCE_NONE;
+ return __rcrb_to_component(dev, &dport->rcrb, CXL_RCRB_UPSTREAM);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, CXL);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f93a28538962..76d92561af29 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -56,7 +56,7 @@
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
-#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
+#define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
@@ -176,14 +176,22 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
/* CXL 2.0 8.2.8.4 Mailbox Registers */
#define CXLDEV_MBOX_CAPS_OFFSET 0x00
#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
+#define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6)
+#define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7)
#define CXLDEV_MBOX_CTRL_OFFSET 0x04
#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
+#define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2)
#define CXLDEV_MBOX_CMD_OFFSET 0x08
#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
#define CXLDEV_MBOX_STATUS_OFFSET 0x10
+#define CXLDEV_MBOX_STATUS_BG_CMD BIT(0)
#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
+#define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32)
+#define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48)
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
/*
@@ -209,6 +217,10 @@ struct cxl_regs {
struct_group_tagged(cxl_device_regs, device_regs,
void __iomem *status, *mbox, *memdev;
);
+
+ struct_group_tagged(cxl_pmu_regs, pmu_regs,
+ void __iomem *pmu;
+ );
};
struct cxl_reg_map {
@@ -229,16 +241,23 @@ struct cxl_device_reg_map {
struct cxl_reg_map memdev;
};
+struct cxl_pmu_reg_map {
+ struct cxl_reg_map pmu;
+};
+
/**
* struct cxl_register_map - DVSEC harvested register block mapping parameters
+ * @dev: device for devm operations and logging
* @base: virtual base of the register-block-BAR + @block_offset
* @resource: physical resource base of the register block
* @max_size: maximum mapping size to perform register search
* @reg_type: see enum cxl_regloc_type
* @component_map: cxl_reg_map for component registers
* @device_map: cxl_reg_maps for device registers
+ * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
*/
struct cxl_register_map {
+ struct device *dev;
void __iomem *base;
resource_size_t resource;
resource_size_t max_size;
@@ -246,6 +265,7 @@ struct cxl_register_map {
union {
struct cxl_component_reg_map component_map;
struct cxl_device_reg_map device_map;
+ struct cxl_pmu_reg_map pmu_map;
};
};
@@ -253,23 +273,24 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_reg_map *map);
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_reg_map *map);
-int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
- struct cxl_register_map *map,
+int cxl_map_component_regs(const struct cxl_register_map *map,
+ struct cxl_component_regs *regs,
unsigned long map_mask);
-int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs,
- struct cxl_register_map *map);
+int cxl_map_device_regs(const struct cxl_register_map *map,
+ struct cxl_device_regs *regs);
+int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
+ struct cxl_register_map *map);
enum cxl_regloc_type;
+int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map);
-
-enum cxl_rcrb {
- CXL_RCRB_DOWNSTREAM,
- CXL_RCRB_UPSTREAM,
-};
-resource_size_t cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which);
+int cxl_setup_regs(struct cxl_register_map *map);
+struct cxl_dport;
+resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
+ struct cxl_dport *dport);
#define CXL_RESOURCE_NONE ((resource_size_t) -1)
#define CXL_TARGET_STRLEN 20
@@ -290,8 +311,8 @@ resource_size_t cxl_rcrb_to_component(struct device *dev,
#define CXL_DECODER_F_MASK GENMASK(5, 0)
enum cxl_decoder_type {
- CXL_DECODER_ACCELERATOR = 2,
- CXL_DECODER_EXPANDER = 3,
+ CXL_DECODER_DEVMEM = 2,
+ CXL_DECODER_HOSTONLYMEM = 3,
};
/*
@@ -463,17 +484,19 @@ struct cxl_region_params {
};
/*
- * Flag whether this region needs to have its HPA span synchronized with
- * CPU cache state at region activation time.
- */
-#define CXL_REGION_F_INCOHERENT 0
-
-/*
* Indicate whether this region has been assembled by autodetection or
* userspace assembly. Prevent endpoint decoders outside of automatic
* detection from being added to the region.
*/
-#define CXL_REGION_F_AUTO 1
+#define CXL_REGION_F_AUTO 0
+
+/*
+ * Require that a committed region successfully complete a teardown once
+ * any of its associated decoders have been torn down. This maintains
+ * the commit state for the region since there are committed decoders,
+ * but blocks cxl_region_probe().
+ */
+#define CXL_REGION_F_NEEDS_RESET 1
/**
* struct cxl_region - CXL region
@@ -541,7 +564,7 @@ struct cxl_dax_region {
* downstream port devices to construct a CXL memory
* decode hierarchy.
* @dev: this port's device
- * @uport: PCI or platform device implementing the upstream port capability
+ * @uport_dev: PCI or platform device implementing the upstream port capability
* @host_bridge: Shortcut to the platform attach point for this port
* @id: id for port device-name
* @dports: cxl_dport instances referenced by decoders
@@ -549,6 +572,7 @@ struct cxl_dax_region {
* @regions: cxl_region_ref instances, regions mapped by this port
* @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
+ * @comp_map: component register capability mappings
* @nr_dports: number of entries in @dports
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
* @commit_end: cursor to track highest committed decoder for commit ordering
@@ -560,7 +584,7 @@ struct cxl_dax_region {
*/
struct cxl_port {
struct device dev;
- struct device *uport;
+ struct device *uport_dev;
struct device *host_bridge;
int id;
struct xarray dports;
@@ -568,6 +592,7 @@ struct cxl_port {
struct xarray regions;
struct cxl_dport *parent_dport;
struct ida decoder_ida;
+ struct cxl_register_map comp_map;
int nr_dports;
int hdm_end;
int commit_end;
@@ -587,20 +612,25 @@ cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
return xa_load(&port->dports, (unsigned long)dport_dev);
}
+struct cxl_rcrb_info {
+ resource_size_t base;
+ u16 aer_cap;
+};
+
/**
* struct cxl_dport - CXL downstream port
- * @dport: PCI bridge or firmware device representing the downstream link
+ * @dport_dev: PCI bridge or firmware device representing the downstream link
+ * @comp_map: component register capability mappings
* @port_id: unique hardware identifier for dport in decoder target list
- * @component_reg_phys: downstream port component registers
- * @rcrb: base address for the Root Complex Register Block
+ * @rcrb: Data about the Root Complex Register Block layout
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
*/
struct cxl_dport {
- struct device *dport;
+ struct device *dport_dev;
+ struct cxl_register_map comp_map;
int port_id;
- resource_size_t component_reg_phys;
- resource_size_t rcrb;
+ struct cxl_rcrb_info rcrb;
bool rch;
struct cxl_port *port;
};
@@ -641,27 +671,30 @@ struct cxl_region_ref {
/*
* The platform firmware device hosting the root is also the top of the
* CXL port topology. All other CXL ports have another CXL port as their
- * parent and their ->uport / host device is out-of-line of the port
+ * parent and their ->uport_dev / host device is out-of-line of the port
* ancestry.
*/
static inline bool is_cxl_root(struct cxl_port *port)
{
- return port->uport == port->dev.parent;
+ return port->uport_dev == port->dev.parent;
}
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
struct pci_bus;
-int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
+int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus);
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+struct cxl_port *devm_cxl_add_port(struct device *host,
+ struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
+struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
+ struct cxl_dport **dport);
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
@@ -671,7 +704,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
resource_size_t component_reg_phys);
struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev, int port_id,
- resource_size_t component_reg_phys,
resource_size_t rcrb);
struct cxl_decoder *to_cxl_decoder(struct device *dev);
@@ -710,7 +742,6 @@ struct cxl_endpoint_dvsec_info {
struct cxl_hdm;
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
@@ -750,6 +781,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_REGION 6
#define CXL_DEVICE_PMEM_REGION 7
#define CXL_DEVICE_DAX_REGION 8
+#define CXL_DEVICE_PMU 9
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index a2845a7a69d8..79e99c873ca2 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -5,6 +5,7 @@
#include <uapi/linux/cxl_mem.h>
#include <linux/cdev.h>
#include <linux/uuid.h>
+#include <linux/rcuwait.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -38,6 +39,7 @@
* @detach_work: active memdev lost a port in its ancestry
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
+ * @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
*/
@@ -48,6 +50,7 @@ struct cxl_memdev {
struct work_struct detach_work;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
+ struct cxl_port *endpoint;
int id;
int depth;
};
@@ -72,16 +75,18 @@ cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = to_cxl_port(cxled->cxld.dev.parent);
- return to_cxl_memdev(port->uport);
+ return to_cxl_memdev(port->uport_dev);
}
bool is_cxl_memdev(const struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
- return is_cxl_memdev(port->uport);
+ return is_cxl_memdev(port->uport_dev);
}
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
+struct cxl_memdev_state;
+int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
@@ -108,6 +113,9 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
* variable sized output commands, it tells the exact number of bytes
* written.
* @min_out: (input) internal command output payload size validation
+ * @poll_count: (input) Number of timeouts to attempt.
+ * @poll_interval_ms: (input) Time between mailbox background command polling
+ * interval timeouts.
* @return_code: (output) Error code returned from hardware.
*
* This is the primary mechanism used to send commands to the hardware.
@@ -123,6 +131,8 @@ struct cxl_mbox_cmd {
size_t size_in;
size_t size_out;
size_t min_out;
+ int poll_count;
+ int poll_interval_ms;
u16 return_code;
};
@@ -195,7 +205,7 @@ static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd)
*/
#define CXL_CAPACITY_MULTIPLIER SZ_256M
-/**
+/*
* Event Interrupt Policy
*
* CXL rev 3.0 section 8.2.9.2.4; Table 8-52
@@ -215,8 +225,8 @@ struct cxl_event_interrupt_policy {
/**
* struct cxl_event_state - Event log driver state
*
- * @event_buf: Buffer to receive event data
- * @event_log_lock: Serialize event_buf and log use
+ * @buf: Buffer to receive event data
+ * @log_lock: Serialize event_buf and log use
*/
struct cxl_event_state {
struct cxl_get_event_payload *buf;
@@ -254,6 +264,115 @@ struct cxl_poison_state {
struct mutex lock; /* Protect reads of poison list */
};
+/*
+ * Get FW Info
+ * CXL rev 3.0 section 8.2.9.3.1; Table 8-56
+ */
+struct cxl_mbox_get_fw_info {
+ u8 num_slots;
+ u8 slot_info;
+ u8 activation_cap;
+ u8 reserved[13];
+ char slot_1_revision[16];
+ char slot_2_revision[16];
+ char slot_3_revision[16];
+ char slot_4_revision[16];
+} __packed;
+
+#define CXL_FW_INFO_SLOT_INFO_CUR_MASK GENMASK(2, 0)
+#define CXL_FW_INFO_SLOT_INFO_NEXT_MASK GENMASK(5, 3)
+#define CXL_FW_INFO_SLOT_INFO_NEXT_SHIFT 3
+#define CXL_FW_INFO_ACTIVATION_CAP_HAS_LIVE_ACTIVATE BIT(0)
+
+/*
+ * Transfer FW Input Payload
+ * CXL rev 3.0 section 8.2.9.3.2; Table 8-57
+ */
+struct cxl_mbox_transfer_fw {
+ u8 action;
+ u8 slot;
+ u8 reserved[2];
+ __le32 offset;
+ u8 reserved2[0x78];
+ u8 data[];
+} __packed;
+
+#define CXL_FW_TRANSFER_ACTION_FULL 0x0
+#define CXL_FW_TRANSFER_ACTION_INITIATE 0x1
+#define CXL_FW_TRANSFER_ACTION_CONTINUE 0x2
+#define CXL_FW_TRANSFER_ACTION_END 0x3
+#define CXL_FW_TRANSFER_ACTION_ABORT 0x4
+
+/*
+ * CXL rev 3.0 section 8.2.9.3.2 mandates 128-byte alignment for FW packages
+ * and for each part transferred in a Transfer FW command.
+ */
+#define CXL_FW_TRANSFER_ALIGNMENT 128
+
+/*
+ * Activate FW Input Payload
+ * CXL rev 3.0 section 8.2.9.3.3; Table 8-58
+ */
+struct cxl_mbox_activate_fw {
+ u8 action;
+ u8 slot;
+} __packed;
+
+#define CXL_FW_ACTIVATE_ONLINE 0x0
+#define CXL_FW_ACTIVATE_OFFLINE 0x1
+
+/* FW state bits */
+#define CXL_FW_STATE_BITS 32
+#define CXL_FW_CANCEL BIT(0)
+
+/**
+ * struct cxl_fw_state - Firmware upload / activation state
+ *
+ * @state: fw_uploader state bitmask
+ * @oneshot: whether the fw upload fits in a single transfer
+ * @num_slots: Number of FW slots available
+ * @cur_slot: Slot number currently active
+ * @next_slot: Slot number for the new firmware
+ */
+struct cxl_fw_state {
+ DECLARE_BITMAP(state, CXL_FW_STATE_BITS);
+ bool oneshot;
+ int num_slots;
+ int cur_slot;
+ int next_slot;
+};
+
+/**
+ * struct cxl_security_state - Device security state
+ *
+ * @state: state of last security operation
+ * @poll: polling for sanitization is enabled, device has no mbox irq support
+ * @poll_tmo_secs: polling timeout
+ * @poll_dwork: polling work item
+ * @sanitize_node: sanitation sysfs file to notify
+ */
+struct cxl_security_state {
+ unsigned long state;
+ bool poll;
+ int poll_tmo_secs;
+ struct delayed_work poll_dwork;
+ struct kernfs_node *sanitize_node;
+};
+
+/*
+ * enum cxl_devtype - delineate type-2 from a generic type-3 device
+ * @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or
+ * HDM-DB, no requirement that this device implements a
+ * mailbox, or other memory-device-standard manageability
+ * flows.
+ * @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with
+ * HDM-H and class-mandatory memory device registers
+ */
+enum cxl_devtype {
+ CXL_DEVTYPE_DEVMEM,
+ CXL_DEVTYPE_CLASSMEM,
+};
+
/**
* struct cxl_dev_state - The driver device state
*
@@ -267,6 +386,36 @@ struct cxl_poison_state {
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
+ * @dpa_res: Overall DPA resource tree for the device
+ * @pmem_res: Active Persistent memory capacity configuration
+ * @ram_res: Active Volatile memory capacity configuration
+ * @component_reg_phys: register base of component registers
+ * @serial: PCIe Device Serial Number
+ * @type: Generic Memory Class device or Vendor Specific Memory device
+ */
+struct cxl_dev_state {
+ struct device *dev;
+ struct cxl_memdev *cxlmd;
+ struct cxl_regs regs;
+ int cxl_dvsec;
+ bool rcd;
+ bool media_ready;
+ struct resource dpa_res;
+ struct resource pmem_res;
+ struct resource ram_res;
+ resource_size_t component_reg_phys;
+ u64 serial;
+ enum cxl_devtype type;
+};
+
+/**
+ * struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
+ *
+ * CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines
+ * common memory device functionality like the presence of a mailbox and
+ * the functionality related to that like Identify Memory Device and Get
+ * Partition Info
+ * @cxlds: Core driver state common across Type-2 and Type-3 devices
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
@@ -275,9 +424,6 @@ struct cxl_poison_state {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
- * @dpa_res: Overall DPA resource tree for the device
- * @pmem_res: Active Persistent memory capacity configuration
- * @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -286,54 +432,48 @@ struct cxl_poison_state {
* @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset
- * @component_reg_phys: register base of component registers
- * @info: Cached DVSEC information about the device.
- * @serial: PCIe Device Serial Number
* @event: event log driver state
* @poison: poison driver state info
+ * @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
- * See section 8.2.9.5.2 Capacity Configuration and Label Storage for
+ * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
-struct cxl_dev_state {
- struct device *dev;
- struct cxl_memdev *cxlmd;
-
- struct cxl_regs regs;
- int cxl_dvsec;
-
- bool rcd;
- bool media_ready;
+struct cxl_memdev_state {
+ struct cxl_dev_state cxlds;
size_t payload_size;
size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
-
- struct resource dpa_res;
- struct resource pmem_res;
- struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
u64 partition_align_bytes;
-
u64 active_volatile_bytes;
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
-
- resource_size_t component_reg_phys;
- u64 serial;
-
struct cxl_event_state event;
struct cxl_poison_state poison;
+ struct cxl_security_state security;
+ struct cxl_fw_state fw;
- int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_memdev_state *mds,
+ struct cxl_mbox_cmd *cmd);
};
+static inline struct cxl_memdev_state *
+to_cxl_memdev_state(struct cxl_dev_state *cxlds)
+{
+ if (cxlds->type != CXL_DEVTYPE_CLASSMEM)
+ return NULL;
+ return container_of(cxlds, struct cxl_memdev_state, cxlds);
+}
+
enum cxl_opcode {
CXL_MBOX_OP_INVALID = 0x0000,
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
@@ -342,6 +482,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_EVT_INT_POLICY = 0x0102,
CXL_MBOX_OP_SET_EVT_INT_POLICY = 0x0103,
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
+ CXL_MBOX_OP_TRANSFER_FW = 0x0201,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
@@ -362,6 +503,8 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
+ CXL_MBOX_OP_SANITIZE = 0x4400,
+ CXL_MBOX_OP_SECURE_ERASE = 0x4401,
CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,
@@ -692,18 +835,20 @@ enum {
CXL_PMEM_SEC_PASS_USER,
};
-int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
+int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *cmd);
-int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
-int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
-struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
-void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
-void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
-void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status);
-int cxl_set_timestamp(struct cxl_dev_state *cxlds);
-int cxl_poison_state_init(struct cxl_dev_state *cxlds);
+int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
+int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
+struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
+void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ unsigned long *cmds);
+void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ unsigned long *cmds);
+void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
+int cxl_set_timestamp(struct cxl_memdev_state *mds);
+int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr);
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
@@ -722,6 +867,8 @@ static inline void cxl_mem_active_dec(void)
}
#endif
+int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
+
struct cxl_hdm {
struct cxl_component_regs regs;
unsigned int decoder_count;
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 7c02e55b8042..0fa4799ea316 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -67,6 +67,7 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_COMPONENT,
CXL_REGLOC_RBI_VIRT,
CXL_REGLOC_RBI_MEMDEV,
+ CXL_REGLOC_RBI_PMU,
CXL_REGLOC_RBI_TYPES
};
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 519edd0eb196..317c7548e4e9 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -51,7 +51,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
struct cxl_port *parent_port = parent_dport->port;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
- resource_size_t component_reg_phys;
int rc;
/*
@@ -66,17 +65,8 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
ep->next = down;
}
- /*
- * The component registers for an RCD might come from the
- * host-bridge RCRB if they are not already mapped via the
- * typical register locator mechanism.
- */
- if (parent_dport->rch && cxlds->component_reg_phys == CXL_RESOURCE_NONE)
- component_reg_phys = cxl_rcrb_to_component(
- &cxlmd->dev, parent_dport->rcrb, CXL_RCRB_UPSTREAM);
- else
- component_reg_phys = cxlds->component_reg_phys;
- endpoint = devm_cxl_add_port(host, &cxlmd->dev, component_reg_phys,
+ endpoint = devm_cxl_add_port(host, &cxlmd->dev,
+ cxlds->component_reg_phys,
parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
@@ -117,6 +107,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent;
struct cxl_port *parent_port;
@@ -141,10 +132,10 @@ static int cxl_mem_probe(struct device *dev)
dentry = cxl_debugfs_create_dir(dev_name(dev));
debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
- if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds))
+ if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds))
debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
&cxl_poison_inject_fops);
- if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds))
+ if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds))
debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
&cxl_poison_clear_fops);
@@ -163,7 +154,7 @@ static int cxl_mem_probe(struct device *dev)
}
if (dport->rch)
- endpoint_parent = parent_port->uport;
+ endpoint_parent = parent_port->uport_dev;
else
endpoint_parent = &parent_port->dev;
@@ -227,9 +218,12 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
if (a == &dev_attr_trigger_poison_list.attr) {
struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(CXL_POISON_ENABLED_LIST,
- to_cxl_memdev(dev)->cxlds->poison.enabled_cmds))
+ mds->poison.enabled_cmds))
return 0;
}
return a->mode;
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 0872f2233ed0..1cb1494c28fe 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -13,6 +13,7 @@
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
+#include "pmu.h"
/**
* DOC: cxl pci
@@ -84,9 +85,92 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
+struct cxl_dev_id {
+ struct cxl_dev_state *cxlds;
+};
+
+static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
+ irq_handler_t handler, irq_handler_t thread_fn)
+{
+ struct device *dev = cxlds->dev;
+ struct cxl_dev_id *dev_id;
+
+ /* dev_id must be globally unique and must contain the cxlds */
+ dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
+ if (!dev_id)
+ return -ENOMEM;
+ dev_id->cxlds = cxlds;
+
+ return devm_request_threaded_irq(dev, irq, handler, thread_fn,
+ IRQF_SHARED | IRQF_ONESHOT,
+ NULL, dev_id);
+}
+
+static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
+{
+ u64 reg;
+
+ reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
+}
+
+static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
+{
+ u64 reg;
+ u16 opcode;
+ struct cxl_dev_id *dev_id = id;
+ struct cxl_dev_state *cxlds = dev_id->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ if (!cxl_mbox_background_complete(cxlds))
+ return IRQ_NONE;
+
+ reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ if (opcode == CXL_MBOX_OP_SANITIZE) {
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
+
+ dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ } else {
+ /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
+ rcuwait_wake_up(&mds->mbox_wait);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Sanitization operation polling mode.
+ */
+static void cxl_mbox_sanitize_work(struct work_struct *work)
+{
+ struct cxl_memdev_state *mds =
+ container_of(work, typeof(*mds), security.poll_dwork.work);
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+
+ mutex_lock(&mds->mbox_mutex);
+ if (cxl_mbox_background_complete(cxlds)) {
+ mds->security.poll_tmo_secs = 0;
+ put_device(cxlds->dev);
+
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
+
+ dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ } else {
+ int timeout = mds->security.poll_tmo_secs + 10;
+
+ mds->security.poll_tmo_secs = min(15 * 60, timeout);
+ queue_delayed_work(system_wq, &mds->security.poll_dwork,
+ timeout * HZ);
+ }
+ mutex_unlock(&mds->mbox_mutex);
+}
+
/**
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @cxlds: The device state to communicate with.
+ * @mds: The memory device driver data
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
@@ -106,16 +190,17 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
-static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
+static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *mbox_cmd)
{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
- lockdep_assert_held(&cxlds->mbox_mutex);
+ lockdep_assert_held(&mds->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -144,6 +229,16 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
return -EBUSY;
}
+ /*
+ * With sanitize polling, hardware might be done and the poller still
+ * not be in sync. Ensure no new command comes in until so. Keep the
+ * hardware semantics and only allow device health status.
+ */
+ if (mds->security.poll_tmo_secs > 0) {
+ if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
+ return -EBUSY;
+ }
+
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
mbox_cmd->opcode);
if (mbox_cmd->size_in) {
@@ -177,12 +272,80 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
+ /*
+ * Handle the background command in a synchronous manner.
+ *
+ * All other mailbox commands will serialize/queue on the mbox_mutex,
+ * which we currently hold. Furthermore this also guarantees that
+ * cxl_mbox_background_complete() checks are safe amongst each other,
+ * in that no new bg operation can occur in between.
+ *
+ * Background operations are timesliced in accordance with the nature
+ * of the command. In the event of timeout, the mailbox state is
+ * indeterminate until the next successful command submission and the
+ * driver can get back in sync with the hardware state.
+ */
+ if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
+ u64 bg_status_reg;
+ int i, timeout;
+
+ /*
+ * Sanitization is a special case which monopolizes the device
+ * and cannot be timesliced. Handle asynchronously instead,
+ * and allow userspace to poll(2) for completion.
+ */
+ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
+ if (mds->security.poll) {
+ /* hold the device throughout */
+ get_device(cxlds->dev);
+
+ /* give first timeout a second */
+ timeout = 1;
+ mds->security.poll_tmo_secs = timeout;
+ queue_delayed_work(system_wq,
+ &mds->security.poll_dwork,
+ timeout * HZ);
+ }
+
+ dev_dbg(dev, "Sanitization operation started\n");
+ goto success;
+ }
+
+ dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
+ mbox_cmd->opcode);
+
+ timeout = mbox_cmd->poll_interval_ms;
+ for (i = 0; i < mbox_cmd->poll_count; i++) {
+ if (rcuwait_wait_event_timeout(&mds->mbox_wait,
+ cxl_mbox_background_complete(cxlds),
+ TASK_UNINTERRUPTIBLE,
+ msecs_to_jiffies(timeout)) > 0)
+ break;
+ }
+
+ if (!cxl_mbox_background_complete(cxlds)) {
+ dev_err(dev, "timeout waiting for background (%d ms)\n",
+ timeout * mbox_cmd->poll_count);
+ return -ETIMEDOUT;
+ }
+
+ bg_status_reg = readq(cxlds->regs.mbox +
+ CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ mbox_cmd->return_code =
+ FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
+ bg_status_reg);
+ dev_dbg(dev,
+ "Mailbox background operation (0x%04x) completed\n",
+ mbox_cmd->opcode);
+ }
+
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
dev_dbg(dev, "Mailbox operation had an error: %s\n",
cxl_mbox_cmd_rc2str(mbox_cmd));
return 0; /* completed but caller must check return_code */
}
+success:
/* #7 */
cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
@@ -196,8 +359,9 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
* have requested less data than the hardware supplied even
* within spec.
*/
- size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
+ size_t n;
+ n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
} else {
@@ -207,20 +371,23 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
return 0;
}
-static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
+ struct cxl_mbox_cmd *cmd)
{
int rc;
- mutex_lock_io(&cxlds->mbox_mutex);
- rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
- mutex_unlock(&cxlds->mbox_mutex);
+ mutex_lock_io(&mds->mbox_mutex);
+ rc = __cxl_pci_mbox_send_cmd(mds, cmd);
+ mutex_unlock(&mds->mbox_mutex);
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ struct device *dev = cxlds->dev;
unsigned long timeout;
u64 md_status;
@@ -234,8 +401,7 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
} while (!time_after(jiffies, timeout));
if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
- cxl_err(cxlds->dev, md_status,
- "timeout awaiting mailbox ready");
+ cxl_err(dev, md_status, "timeout awaiting mailbox ready");
return -ETIMEDOUT;
}
@@ -246,12 +412,12 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
* source for future doorbell busy events.
*/
if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
- cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
+ cxl_err(dev, md_status, "timeout awaiting mailbox idle");
return -ETIMEDOUT;
}
- cxlds->mbox_send = cxl_pci_mbox_send;
- cxlds->payload_size =
+ mds->mbox_send = cxl_pci_mbox_send;
+ mds->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
@@ -261,121 +427,104 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
- cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
- if (cxlds->payload_size < 256) {
- dev_err(cxlds->dev, "Mailbox is too small (%zub)",
- cxlds->payload_size);
+ mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
+ if (mds->payload_size < 256) {
+ dev_err(dev, "Mailbox is too small (%zub)",
+ mds->payload_size);
return -ENXIO;
}
- dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
- cxlds->payload_size);
+ dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
- return 0;
-}
+ rcuwait_init(&mds->mbox_wait);
-static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
+ if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
+ u32 ctrl;
+ int irq, msgnum;
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- map->base = ioremap(map->resource, map->max_size);
- if (!map->base) {
- dev_err(dev, "failed to map registers\n");
- return -ENOMEM;
+ msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+ irq = pci_irq_vector(pdev, msgnum);
+ if (irq < 0)
+ goto mbox_poll;
+
+ if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
+ goto mbox_poll;
+
+ /* enable background command mbox irq support */
+ ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+ writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+
+ return 0;
}
- dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+mbox_poll:
+ mds->security.poll = true;
+ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+
+ dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
return 0;
}
-static void cxl_unmap_regblock(struct pci_dev *pdev,
- struct cxl_register_map *map)
+/*
+ * Assume that any RCIEP that emits the CXL memory expander class code
+ * is an RCD
+ */
+static bool is_cxl_restricted(struct pci_dev *pdev)
{
- iounmap(map->base);
- map->base = NULL;
+ return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
-static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
+static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
+ struct cxl_register_map *map)
{
- struct cxl_component_reg_map *comp_map;
- struct cxl_device_reg_map *dev_map;
- struct device *dev = &pdev->dev;
- void __iomem *base = map->base;
-
- switch (map->reg_type) {
- case CXL_REGLOC_RBI_COMPONENT:
- comp_map = &map->component_map;
- cxl_probe_component_regs(dev, base, comp_map);
- if (!comp_map->hdm_decoder.valid) {
- dev_err(dev, "HDM decoder registers not found\n");
- return -ENXIO;
- }
+ struct cxl_port *port;
+ struct cxl_dport *dport;
+ resource_size_t component_reg_phys;
- if (!comp_map->ras.valid)
- dev_dbg(dev, "RAS registers not found\n");
-
- dev_dbg(dev, "Set up component registers\n");
- break;
- case CXL_REGLOC_RBI_MEMDEV:
- dev_map = &map->device_map;
- cxl_probe_device_regs(dev, base, dev_map);
- if (!dev_map->status.valid || !dev_map->mbox.valid ||
- !dev_map->memdev.valid) {
- dev_err(dev, "registers not found: %s%s%s\n",
- !dev_map->status.valid ? "status " : "",
- !dev_map->mbox.valid ? "mbox " : "",
- !dev_map->memdev.valid ? "memdev " : "");
- return -ENXIO;
- }
+ *map = (struct cxl_register_map) {
+ .dev = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
- dev_dbg(dev, "Probing device registers...\n");
- break;
- default:
- break;
- }
+ port = cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return -EPROBE_DEFER;
+
+ component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
+
+ put_device(&port->dev);
+
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return -ENXIO;
+
+ map->resource = component_reg_phys;
+ map->reg_type = CXL_REGLOC_RBI_COMPONENT;
+ map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
return 0;
}
-static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map)
+static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map)
{
int rc;
rc = cxl_find_regblock(pdev, type, map);
- if (rc)
- return rc;
- rc = cxl_map_regblock(pdev, map);
+ /*
+ * If the Register Locator DVSEC does not exist, check if it
+ * is an RCH and try to extract the Component Registers from
+ * an RCRB.
+ */
+ if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
+ rc = cxl_rcrb_get_comp_regs(pdev, map);
+
if (rc)
return rc;
- rc = cxl_probe_regs(pdev, map);
- cxl_unmap_regblock(pdev, map);
-
- return rc;
-}
-
-/*
- * Assume that any RCIEP that emits the CXL memory expander class code
- * is an RCD
- */
-static bool is_cxl_restricted(struct pci_dev *pdev)
-{
- return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
-}
-
-/*
- * CXL v3.0 6.2.3 Table 6-4
- * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
- * mode, otherwise it's 68B flits mode.
- */
-static bool cxl_pci_flit_256(struct pci_dev *pdev)
-{
- u16 lnksta2;
-
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
- return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
+ return cxl_setup_regs(map);
}
static int cxl_pci_ras_unmask(struct pci_dev *pdev)
@@ -404,9 +553,8 @@ static int cxl_pci_ras_unmask(struct pci_dev *pdev)
addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
orig_val = readl(addr);
- mask = CXL_RAS_UNCORRECTABLE_MASK_MASK;
- if (!cxl_pci_flit_256(pdev))
- mask &= ~CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
+ mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
+ CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
val = orig_val & ~mask;
writel(val, addr);
dev_dbg(&pdev->dev,
@@ -433,18 +581,18 @@ static void free_event_buf(void *buf)
/*
* There is a single buffer for reading event logs from the mailbox. All logs
- * share this buffer protected by the cxlds->event_log_lock.
+ * share this buffer protected by the mds->event_log_lock.
*/
-static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
+static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
{
struct cxl_get_event_payload *buf;
- buf = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+ buf = kvmalloc(mds->payload_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- cxlds->event.buf = buf;
+ mds->event.buf = buf;
- return devm_add_action_or_reset(cxlds->dev, free_event_buf, buf);
+ return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
}
static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
@@ -469,14 +617,11 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
return 0;
}
-struct cxl_dev_id {
- struct cxl_dev_state *cxlds;
-};
-
static irqreturn_t cxl_event_thread(int irq, void *id)
{
struct cxl_dev_id *dev_id = id;
struct cxl_dev_state *cxlds = dev_id->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
u32 status;
do {
@@ -489,7 +634,7 @@ static irqreturn_t cxl_event_thread(int irq, void *id)
status &= CXLDEV_EVENT_STATUS_ALL;
if (!status)
break;
- cxl_mem_get_event_records(cxlds, status);
+ cxl_mem_get_event_records(mds, status);
cond_resched();
} while (status);
@@ -498,31 +643,21 @@ static irqreturn_t cxl_event_thread(int irq, void *id)
static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
{
- struct device *dev = cxlds->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct cxl_dev_id *dev_id;
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int irq;
if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
return -ENXIO;
- /* dev_id must be globally unique and must contain the cxlds */
- dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
- if (!dev_id)
- return -ENOMEM;
- dev_id->cxlds = cxlds;
-
irq = pci_irq_vector(pdev,
FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
if (irq < 0)
return irq;
- return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
- IRQF_SHARED | IRQF_ONESHOT, NULL,
- dev_id);
+ return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
}
-static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
+static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
struct cxl_mbox_cmd mbox_cmd = {
@@ -532,15 +667,15 @@ static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
};
int rc;
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
- dev_err(cxlds->dev, "Failed to get event interrupt policy : %d",
- rc);
+ dev_err(mds->cxlds.dev,
+ "Failed to get event interrupt policy : %d", rc);
return rc;
}
-static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
+static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
struct cxl_mbox_cmd mbox_cmd;
@@ -559,23 +694,24 @@ static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
.size_in = sizeof(*policy),
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0) {
- dev_err(cxlds->dev, "Failed to set event interrupt policy : %d",
+ dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
rc);
return rc;
}
/* Retrieve final interrupt settings */
- return cxl_event_get_int_policy(cxlds, policy);
+ return cxl_event_get_int_policy(mds, policy);
}
-static int cxl_event_irqsetup(struct cxl_dev_state *cxlds)
+static int cxl_event_irqsetup(struct cxl_memdev_state *mds)
{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
struct cxl_event_interrupt_policy policy;
int rc;
- rc = cxl_event_config_msgnums(cxlds, &policy);
+ rc = cxl_event_config_msgnums(mds, &policy);
if (rc)
return rc;
@@ -614,7 +750,7 @@ static bool cxl_event_int_is_fw(u8 setting)
}
static int cxl_event_config(struct pci_host_bridge *host_bridge,
- struct cxl_dev_state *cxlds)
+ struct cxl_memdev_state *mds)
{
struct cxl_event_interrupt_policy policy;
int rc;
@@ -626,11 +762,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (!host_bridge->native_cxl_error)
return 0;
- rc = cxl_mem_alloc_event_buf(cxlds);
+ rc = cxl_mem_alloc_event_buf(mds);
if (rc)
return rc;
- rc = cxl_event_get_int_policy(cxlds, &policy);
+ rc = cxl_event_get_int_policy(mds, &policy);
if (rc)
return rc;
@@ -638,15 +774,16 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
cxl_event_int_is_fw(policy.warn_settings) ||
cxl_event_int_is_fw(policy.failure_settings) ||
cxl_event_int_is_fw(policy.fatal_settings)) {
- dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n");
+ dev_err(mds->cxlds.dev,
+ "FW still in control of Event Logs despite _OSC settings\n");
return -EBUSY;
}
- rc = cxl_event_irqsetup(cxlds);
+ rc = cxl_event_irqsetup(mds);
if (rc)
return rc;
- cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+ cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
return 0;
}
@@ -654,10 +791,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+ struct cxl_memdev_state *mds;
+ struct cxl_dev_state *cxlds;
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- struct cxl_dev_state *cxlds;
- int rc;
+ int i, rc, pmu_count;
/*
* Double check the anonymous union trickery in struct cxl_regs
@@ -671,9 +809,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pci_set_master(pdev);
- cxlds = cxl_dev_state_create(&pdev->dev);
- if (IS_ERR(cxlds))
- return PTR_ERR(cxlds);
+ mds = cxl_memdev_state_create(&pdev->dev);
+ if (IS_ERR(mds))
+ return PTR_ERR(mds);
+ cxlds = &mds->cxlds;
pci_set_drvdata(pdev, cxlds);
cxlds->rcd = is_cxl_restricted(pdev);
@@ -684,11 +823,11 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_warn(&pdev->dev,
"Device DVSEC not present, skip CXL.mem init\n");
- rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
+ rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
if (rc)
return rc;
- rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
+ rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs);
if (rc)
return rc;
@@ -697,14 +836,16 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* still be useful for management functions so don't return an error.
*/
cxlds->component_reg_phys = CXL_RESOURCE_NONE;
- rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
+ else if (!map.component_map.ras.valid)
+ dev_dbg(&pdev->dev, "RAS registers not found\n");
cxlds->component_reg_phys = map.resource;
- rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
- &map, BIT(CXL_CM_CAP_CAP_ID_RAS));
+ rc = cxl_map_component_regs(&map, &cxlds->regs.component,
+ BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
@@ -714,31 +855,31 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
- rc = cxl_pci_setup_mailbox(cxlds);
+ rc = cxl_alloc_irq_vectors(pdev);
if (rc)
return rc;
- rc = cxl_enumerate_cmds(cxlds);
+ rc = cxl_pci_setup_mailbox(mds);
if (rc)
return rc;
- rc = cxl_set_timestamp(cxlds);
+ rc = cxl_enumerate_cmds(mds);
if (rc)
return rc;
- rc = cxl_poison_state_init(cxlds);
+ rc = cxl_set_timestamp(mds);
if (rc)
return rc;
- rc = cxl_dev_state_identify(cxlds);
+ rc = cxl_poison_state_init(mds);
if (rc)
return rc;
- rc = cxl_mem_create_range_info(cxlds);
+ rc = cxl_dev_state_identify(mds);
if (rc)
return rc;
- rc = cxl_alloc_irq_vectors(pdev);
+ rc = cxl_mem_create_range_info(mds);
if (rc)
return rc;
@@ -746,7 +887,34 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- rc = cxl_event_config(host_bridge, cxlds);
+ rc = cxl_memdev_setup_fw_upload(mds);
+ if (rc)
+ return rc;
+
+ pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
+ for (i = 0; i < pmu_count; i++) {
+ struct cxl_pmu_regs pmu_regs;
+
+ rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Could not find PMU regblock\n");
+ break;
+ }
+
+ rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Could not map PMU regs\n");
+ break;
+ }
+
+ rc = devm_cxl_pmu_add(cxlds->dev, &pmu_regs, cxlmd->id, i, CXL_PMU_MEMDEV);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Could not add PMU instance\n");
+ break;
+ }
+ }
+
+ rc = cxl_event_config(host_bridge, mds);
if (rc)
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 71cfa1fdf902..7cb8994f8809 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -15,9 +15,9 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
-static void clear_exclusive(void *cxlds)
+static void clear_exclusive(void *mds)
{
- clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
+ clear_exclusive_cxl_commands(mds, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
@@ -65,13 +65,13 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long flags = 0, cmd_mask = 0;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct nvdimm *nvdimm;
int rc;
- set_exclusive_cxl_commands(cxlds, exclusive_cmds);
- rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
+ set_exclusive_cxl_commands(mds, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
if (rc)
return rc;
@@ -100,22 +100,23 @@ static struct cxl_driver cxl_nvdimm_driver = {
},
};
-static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
+static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
if (sizeof(*cmd) > buf_len)
return -EINVAL;
- *cmd = (struct nd_cmd_get_config_size) {
- .config_size = cxlds->lsa_size,
- .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
+ *cmd = (struct nd_cmd_get_config_size){
+ .config_size = mds->lsa_size,
+ .max_xfer =
+ mds->payload_size - sizeof(struct cxl_mbox_set_lsa),
};
return 0;
}
-static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
+static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
@@ -140,13 +141,13 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
.payload_out = cmd->out_buf,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
cmd->status = 0;
return rc;
}
-static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
+static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
@@ -176,7 +177,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
.size_in = struct_size(set_lsa, data, cmd->in_length),
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
/*
* Set "firmware" status (4-packed bytes at the end of the input
@@ -194,18 +195,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
- return cxl_pmem_get_config_size(cxlds, buf, buf_len);
+ return cxl_pmem_get_config_size(mds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
- return cxl_pmem_get_config_data(cxlds, buf, buf_len);
+ return cxl_pmem_get_config_data(mds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
- return cxl_pmem_set_config_data(cxlds, buf, buf_len);
+ return cxl_pmem_set_config_data(mds, buf, buf_len);
default:
return -ENOTTY;
}
diff --git a/drivers/cxl/pmu.h b/drivers/cxl/pmu.h
new file mode 100644
index 000000000000..b1e9bcd9f28c
--- /dev/null
+++ b/drivers/cxl/pmu.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2023 Huawei
+ * CXL Specification rev 3.0 Setion 8.2.7 (CPMU Register Interface)
+ */
+#ifndef CXL_PMU_H
+#define CXL_PMU_H
+#include <linux/device.h>
+
+enum cxl_pmu_type {
+ CXL_PMU_MEMDEV,
+};
+
+#define CXL_PMU_REGMAP_SIZE 0xe00 /* Table 8-32 CXL 3.0 specification */
+struct cxl_pmu {
+ struct device dev;
+ void __iomem *base;
+ int assoc_id;
+ int index;
+ enum cxl_pmu_type type;
+};
+
+#define to_cxl_pmu(dev) container_of(dev, struct cxl_pmu, dev)
+struct cxl_pmu_regs;
+int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
+ int assoc_id, int idx, enum cxl_pmu_type type);
+
+#endif
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index c23b6164e1c0..6240e05b9542 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -60,17 +60,13 @@ static int discover_region(struct device *dev, void *root)
static int cxl_switch_port_probe(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
- int rc, nr_dports;
-
- nr_dports = devm_cxl_port_enumerate_dports(port);
- if (nr_dports < 0)
- return nr_dports;
+ int rc;
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- rc = devm_cxl_enable_hdm(port, cxlhdm);
- if (rc)
+ rc = devm_cxl_port_enumerate_dports(port);
+ if (rc < 0)
return rc;
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@@ -79,7 +75,7 @@ static int cxl_switch_port_probe(struct cxl_port *port)
return PTR_ERR(cxlhdm);
}
- if (nr_dports == 1) {
+ if (rc == 1) {
dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
return devm_cxl_add_passthrough_decoder(port);
}
@@ -91,7 +87,7 @@ static int cxl_switch_port_probe(struct cxl_port *port)
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
struct cxl_endpoint_dvsec_info info = { .port = port };
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
struct cxl_port *root;
@@ -102,8 +98,11 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
return rc;
cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm))
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
return PTR_ERR(cxlhdm);
+ }
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c
index 4ad4bda2d18e..21856a3f408e 100644
--- a/drivers/cxl/security.c
+++ b/drivers/cxl/security.c
@@ -14,7 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long security_flags = 0;
struct cxl_get_security_output {
__le32 flags;
@@ -29,11 +29,14 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
.payload_out = &out,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return 0;
sec_out = le32_to_cpu(out.flags);
+ /* cache security state */
+ mds->security.state = sec_out;
+
if (ptype == NVDIMM_MASTER) {
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
@@ -67,7 +70,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass;
@@ -84,7 +87,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
.payload_in = &set_pass,
};
- return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -93,7 +96,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd;
@@ -109,7 +112,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
.payload_in = &dis_pass,
};
- return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -128,12 +131,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY,
};
- return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
@@ -141,7 +144,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -153,7 +156,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
.payload_in = pass,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return rc;
@@ -166,7 +169,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase;
int rc;
@@ -182,7 +185,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
.payload_in = &erase,
};
- rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return rc;
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 227800053309..0ee96e6fc426 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -446,18 +446,33 @@ static void unregister_dev_dax(void *dev)
put_device(dev);
}
+static void dax_region_free(struct kref *kref)
+{
+ struct dax_region *dax_region;
+
+ dax_region = container_of(kref, struct dax_region, kref);
+ kfree(dax_region);
+}
+
+static void dax_region_put(struct dax_region *dax_region)
+{
+ kref_put(&dax_region->kref, dax_region_free);
+}
+
/* a return value >= 0 indicates this invocation invalidated the id */
static int __free_dev_dax_id(struct dev_dax *dev_dax)
{
- struct dax_region *dax_region = dev_dax->region;
struct device *dev = &dev_dax->dev;
+ struct dax_region *dax_region;
int rc = dev_dax->id;
device_lock_assert(dev);
- if (is_static(dax_region) || dev_dax->id < 0)
+ if (!dev_dax->dyn_id || dev_dax->id < 0)
return -1;
+ dax_region = dev_dax->region;
ida_free(&dax_region->ida, dev_dax->id);
+ dax_region_put(dax_region);
dev_dax->id = -1;
return rc;
}
@@ -473,6 +488,20 @@ static int free_dev_dax_id(struct dev_dax *dev_dax)
return rc;
}
+static int alloc_dev_dax_id(struct dev_dax *dev_dax)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ int id;
+
+ id = ida_alloc(&dax_region->ida, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ kref_get(&dax_region->kref);
+ dev_dax->dyn_id = true;
+ dev_dax->id = id;
+ return id;
+}
+
static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -560,20 +589,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = {
NULL,
};
-static void dax_region_free(struct kref *kref)
-{
- struct dax_region *dax_region;
-
- dax_region = container_of(kref, struct dax_region, kref);
- kfree(dax_region);
-}
-
-void dax_region_put(struct dax_region *dax_region)
-{
- kref_put(&dax_region->kref, dax_region_free);
-}
-EXPORT_SYMBOL_GPL(dax_region_put);
-
static void dax_region_unregister(void *region)
{
struct dax_region *dax_region = region;
@@ -625,7 +640,6 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
return NULL;
}
- kref_get(&dax_region->kref);
if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
return NULL;
return dax_region;
@@ -635,10 +649,12 @@ EXPORT_SYMBOL_GPL(alloc_dax_region);
static void dax_mapping_release(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
- struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+ struct device *parent = dev->parent;
+ struct dev_dax *dev_dax = to_dev_dax(parent);
ida_free(&dev_dax->ida, mapping->id);
kfree(mapping);
+ put_device(parent);
}
static void unregister_dax_mapping(void *data)
@@ -655,8 +671,7 @@ static void unregister_dax_mapping(void *data)
dev_dax->ranges[mapping->range_id].mapping = NULL;
mapping->range_id = -1;
- device_del(dev);
- put_device(dev);
+ device_unregister(dev);
}
static struct dev_dax_range *get_dax_range(struct device *dev)
@@ -778,6 +793,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
dev = &mapping->dev;
device_initialize(dev);
dev->parent = &dev_dax->dev;
+ get_device(dev->parent);
dev->type = &dax_mapping_type;
dev_set_name(dev, "mapping%d", mapping->id);
rc = device_add(dev);
@@ -1295,12 +1311,10 @@ static const struct attribute_group *dax_attribute_groups[] = {
static void dev_dax_release(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev;
put_dax(dax_dev);
free_dev_dax_id(dev_dax);
- dax_region_put(dax_region);
kfree(dev_dax->pgmap);
kfree(dev_dax);
}
@@ -1324,6 +1338,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
if (!dev_dax)
return ERR_PTR(-ENOMEM);
+ dev_dax->region = dax_region;
if (is_static(dax_region)) {
if (dev_WARN_ONCE(parent, data->id < 0,
"dynamic id specified to static region\n")) {
@@ -1339,13 +1354,11 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
goto err_id;
}
- rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
+ rc = alloc_dev_dax_id(dev_dax);
if (rc < 0)
goto err_id;
- dev_dax->id = rc;
}
- dev_dax->region = dax_region;
dev = &dev_dax->dev;
device_initialize(dev);
dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
@@ -1386,7 +1399,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
dev_dax->target_node = dax_region->target_node;
dev_dax->align = dax_region->align;
ida_init(&dev_dax->ida);
- kref_get(&dax_region->kref);
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 8cd79ab34292..1ccd23360124 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -9,7 +9,6 @@ struct dev_dax;
struct resource;
struct dax_device;
struct dax_region;
-void dax_region_put(struct dax_region *dax_region);
/* dax bus specific ioresource flags */
#define IORESOURCE_DAX_STATIC BIT(0)
@@ -49,13 +48,6 @@ void dax_driver_unregister(struct dax_device_driver *dax_drv);
void kill_dev_dax(struct dev_dax *dev_dax);
bool static_dev_dax(struct dev_dax *dev_dax);
-/*
- * While run_dax() is potentially a generic operation that could be
- * defined in include/linux/dax.h we don't want to grow any users
- * outside of drivers/dax/
- */
-void run_dax(struct dax_device *dax_dev);
-
#define MODULE_ALIAS_DAX_DEVICE(type) \
MODULE_ALIAS("dax:t" __stringify(type) "*")
#define DAX_DEVICE_MODALIAS_FMT "dax:t%d"
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index ccdf8de85bd5..8bc9d04034d6 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -13,7 +13,6 @@ static int cxl_dax_region_probe(struct device *dev)
struct cxl_region *cxlr = cxlr_dax->cxlr;
struct dax_region *dax_region;
struct dev_dax_data data;
- struct dev_dax *dev_dax;
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(cxlr_dax->hpa_range.start);
@@ -28,13 +27,8 @@ static int cxl_dax_region_probe(struct device *dev)
.id = -1,
.size = range_len(&cxlr_dax->hpa_range),
};
- dev_dax = devm_create_dev_dax(&data);
- if (IS_ERR(dev_dax))
- return PTR_ERR(dev_dax);
- /* child dev_dax instances now own the lifetime of the dax_region */
- dax_region_put(dax_region);
- return 0;
+ return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
}
static struct cxl_driver cxl_dax_region_driver = {
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 1c974b7caae6..27cf2daaaa79 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -52,7 +52,8 @@ struct dax_mapping {
* @region - parent region
* @dax_dev - core dax functionality
* @target_node: effective numa node if dev_dax memory range is onlined
- * @id: ida allocated id
+ * @dyn_id: is this a dynamic or statically created instance
+ * @id: ida allocated id when the dax_region is not static
* @ida: mapping id allocator
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
@@ -64,6 +65,7 @@ struct dev_dax {
struct dax_device *dax_dev;
unsigned int align;
int target_node;
+ bool dyn_id;
int id;
struct ida ida;
struct device dev;
@@ -76,6 +78,13 @@ struct dev_dax {
} *ranges;
};
+/*
+ * While run_dax() is potentially a generic operation that could be
+ * defined in include/linux/dax.h we don't want to grow any users
+ * outside of drivers/dax/
+ */
+void run_dax(struct dax_device *dax_dev);
+
static inline struct dev_dax *to_dev_dax(struct device *dev)
{
return container_of(dev, struct dev_dax, dev);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index af9930c03c9c..30665a3ff6ea 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -396,7 +396,7 @@ static void dev_dax_kill(void *dev_dax)
kill_dev_dax(dev_dax);
}
-int dev_dax_probe(struct dev_dax *dev_dax)
+static int dev_dax_probe(struct dev_dax *dev_dax)
{
struct dax_device *dax_dev = dev_dax->dax_dev;
struct device *dev = &dev_dax->dev;
@@ -471,7 +471,6 @@ int dev_dax_probe(struct dev_dax *dev_dax)
run_dax(dax_dev);
return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
}
-EXPORT_SYMBOL_GPL(dev_dax_probe);
static struct dax_device_driver device_dax_driver = {
.probe = dev_dax_probe,
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index e5fe8b39fb94..5d2ddef0f8f5 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -16,7 +16,6 @@ static int dax_hmem_probe(struct platform_device *pdev)
struct dax_region *dax_region;
struct memregion_info *mri;
struct dev_dax_data data;
- struct dev_dax *dev_dax;
/*
* @region_idle == true indicates that an administrative agent
@@ -38,13 +37,8 @@ static int dax_hmem_probe(struct platform_device *pdev)
.id = -1,
.size = region_idle ? 0 : range_len(&mri->range),
};
- dev_dax = devm_create_dev_dax(&data);
- if (IS_ERR(dev_dax))
- return PTR_ERR(dev_dax);
- /* child dev_dax instances now own the lifetime of the dax_region */
- dax_region_put(dax_region);
- return 0;
+ return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
}
static struct platform_driver dax_hmem_driver = {
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 7b36db6f1cbd..898ca9505754 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -99,7 +99,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!data->res_name)
goto err_res_name;
- rc = memory_group_register_static(numa_node, total_len);
+ rc = memory_group_register_static(numa_node, PFN_UP(total_len));
if (rc < 0)
goto err_reg_mgid;
data->mgid = rc;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index f050ea78bb83..ae0cb113a5d3 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -13,7 +13,6 @@ static struct dev_dax *__dax_pmem_probe(struct device *dev)
int rc, id, region_id;
resource_size_t offset;
struct nd_pfn_sb *pfn_sb;
- struct dev_dax *dev_dax;
struct dev_dax_data data;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
@@ -65,12 +64,8 @@ static struct dev_dax *__dax_pmem_probe(struct device *dev)
.pgmap = &pgmap,
.size = range_len(&range),
};
- dev_dax = devm_create_dev_dax(&data);
- /* child dev_dax instances now own the lifetime of the dax_region */
- dax_region_put(dax_region);
-
- return dev_dax;
+ return devm_create_dev_dax(&data);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index c4c4728a36e4..0da9232ea175 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -203,6 +203,8 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
+ int ret;
+
if (!dax_alive(dax_dev))
return -ENXIO;
/*
@@ -213,7 +215,8 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
if (nr_pages != 1)
return -EIO;
- return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+ ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+ return dax_mem2blk_err(ret);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f5f422f9b850..644c188d6a11 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -553,6 +553,7 @@ config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
select DMA_ENGINE
+ select SRAM
help
Support for ST-Ericsson DMA40 controller
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index a812b9b00e6b..fc7cdad37161 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -963,7 +963,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_terminate_all = axi_dmac_terminate_all;
dma_dev->device_synchronize = axi_dmac_synchronize;
dma_dev->dev = &pdev->dev;
- dma_dev->chancnt = 1;
dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 6937cc0c0b65..796b6caf0bab 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1466,7 +1466,6 @@ static int dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
/* DMA capabilities */
- dw->dma.chancnt = hdata->nr_channels;
dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile
index 8d45c0d5689d..83ab58f87760 100644
--- a/drivers/dma/dw-edma/Makefile
+++ b/drivers/dma/dw-edma/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_EDMA) += dw-edma.o
-dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o
-dw-edma-objs := dw-edma-core.o \
- dw-edma-v0-core.o $(dw-edma-y)
+dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o \
+ dw-hdma-v0-debugfs.o
+dw-edma-objs := dw-edma-core.o \
+ dw-edma-v0-core.o \
+ dw-hdma-v0-core.o $(dw-edma-y)
obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 7d2b73ef0872..68236247059d 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -18,6 +18,7 @@
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
+#include "dw-hdma-v0-core.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
@@ -183,6 +184,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
+ struct dw_edma *dw = chan->dw;
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
struct virt_dma_desc *vd;
@@ -200,7 +202,7 @@ static int dw_edma_start_transfer(struct dw_edma_chan *chan)
if (!child)
return 0;
- dw_edma_v0_core_start(child, !desc->xfer_sz);
+ dw_edma_core_start(dw, child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
dw_edma_free_burst(child);
list_del(&child->list);
@@ -287,7 +289,7 @@ static int dw_edma_device_terminate_all(struct dma_chan *dchan)
chan->configured = false;
} else if (chan->status == EDMA_ST_IDLE) {
chan->configured = false;
- } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
+ } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
/*
* The channel is in a false BUSY state, probably didn't
* receive or lost an interrupt
@@ -599,8 +601,6 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
struct virt_dma_desc *vd;
unsigned long flags;
- dw_edma_v0_core_clear_done_int(chan);
-
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
@@ -641,8 +641,6 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
struct virt_dma_desc *vd;
unsigned long flags;
- dw_edma_v0_core_clear_abort_int(chan);
-
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
@@ -654,63 +652,32 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
chan->status = EDMA_ST_IDLE;
}
-static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
+static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
{
struct dw_edma_irq *dw_irq = data;
- struct dw_edma *dw = dw_irq->dw;
- unsigned long total, pos, val;
- unsigned long off;
- u32 mask;
-
- if (write) {
- total = dw->wr_ch_cnt;
- off = 0;
- mask = dw_irq->wr_mask;
- } else {
- total = dw->rd_ch_cnt;
- off = dw->wr_ch_cnt;
- mask = dw_irq->rd_mask;
- }
-
- val = dw_edma_v0_core_status_done_int(dw, write ?
- EDMA_DIR_WRITE :
- EDMA_DIR_READ);
- val &= mask;
- for_each_set_bit(pos, &val, total) {
- struct dw_edma_chan *chan = &dw->chan[pos + off];
-
- dw_edma_done_interrupt(chan);
- }
-
- val = dw_edma_v0_core_status_abort_int(dw, write ?
- EDMA_DIR_WRITE :
- EDMA_DIR_READ);
- val &= mask;
- for_each_set_bit(pos, &val, total) {
- struct dw_edma_chan *chan = &dw->chan[pos + off];
-
- dw_edma_abort_interrupt(chan);
- }
- return IRQ_HANDLED;
-}
-
-static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
-{
- return dw_edma_interrupt(irq, data, true);
+ return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
+ dw_edma_done_interrupt,
+ dw_edma_abort_interrupt);
}
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
{
- return dw_edma_interrupt(irq, data, false);
+ struct dw_edma_irq *dw_irq = data;
+
+ return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
+ dw_edma_done_interrupt,
+ dw_edma_abort_interrupt);
}
static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
{
- dw_edma_interrupt(irq, data, true);
- dw_edma_interrupt(irq, data, false);
+ irqreturn_t ret = IRQ_NONE;
+
+ ret |= dw_edma_interrupt_write(irq, data);
+ ret |= dw_edma_interrupt_read(irq, data);
- return IRQ_HANDLED;
+ return ret;
}
static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
@@ -811,7 +778,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
vchan_init(&chan->vc, dma);
- dw_edma_v0_core_device_config(chan);
+ dw_edma_core_ch_config(chan);
}
/* Set DMA channel capabilities */
@@ -956,14 +923,19 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dw->chip = chip;
+ if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
+ dw_hdma_v0_core_register(dw);
+ else
+ dw_edma_v0_core_register(dw);
+
raw_spin_lock_init(&dw->lock);
dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
- dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
+ dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
- dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
+ dw_edma_core_ch_count(dw, EDMA_DIR_READ));
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
@@ -982,7 +954,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
- dw_edma_v0_core_off(dw);
+ dw_edma_core_off(dw);
/* Request IRQs */
err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
@@ -995,7 +967,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
goto err_irq_free;
/* Turn debugfs on */
- dw_edma_v0_core_debugfs_on(dw);
+ dw_edma_core_debugfs_on(dw);
chip->dw = dw;
@@ -1021,7 +993,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
return -ENODEV;
/* Disable eDMA */
- dw_edma_v0_core_off(dw);
+ dw_edma_core_off(dw);
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 0ab2b6dba880..71894b9e0b15 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -111,6 +111,21 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
+
+ const struct dw_edma_core_ops *core;
+};
+
+typedef void (*dw_edma_handler_t)(struct dw_edma_chan *);
+
+struct dw_edma_core_ops {
+ void (*off)(struct dw_edma *dw);
+ u16 (*ch_count)(struct dw_edma *dw, enum dw_edma_dir dir);
+ enum dma_status (*ch_status)(struct dw_edma_chan *chan);
+ irqreturn_t (*handle_int)(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
+ dw_edma_handler_t done, dw_edma_handler_t abort);
+ void (*start)(struct dw_edma_chunk *chunk, bool first);
+ void (*ch_config)(struct dw_edma_chan *chan);
+ void (*debugfs_on)(struct dw_edma *dw);
};
struct dw_edma_sg {
@@ -148,4 +163,47 @@ struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
return vc2dw_edma_chan(to_virt_chan(dchan));
}
+static inline
+void dw_edma_core_off(struct dw_edma *dw)
+{
+ dw->core->off(dw);
+}
+
+static inline
+u16 dw_edma_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
+{
+ return dw->core->ch_count(dw, dir);
+}
+
+static inline
+enum dma_status dw_edma_core_ch_status(struct dw_edma_chan *chan)
+{
+ return chan->dw->core->ch_status(chan);
+}
+
+static inline irqreturn_t
+dw_edma_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
+ dw_edma_handler_t done, dw_edma_handler_t abort)
+{
+ return dw_irq->dw->core->handle_int(dw_irq, dir, done, abort);
+}
+
+static inline
+void dw_edma_core_start(struct dw_edma *dw, struct dw_edma_chunk *chunk, bool first)
+{
+ dw->core->start(chunk, first);
+}
+
+static inline
+void dw_edma_core_ch_config(struct dw_edma_chan *chan)
+{
+ chan->dw->core->ch_config(chan);
+}
+
+static inline
+void dw_edma_core_debugfs_on(struct dw_edma *dw)
+{
+ dw->core->debugfs_on(dw);
+}
+
#endif /* _DW_EDMA_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 2b40f2b44f5e..1c6043751dc9 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -109,7 +109,7 @@ static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
return region.start;
}
-static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
+static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
.pci_address = dw_edma_pcie_address,
};
@@ -225,7 +225,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
- chip->ops = &dw_edma_pcie_core_ops;
+ chip->ops = &dw_edma_pcie_plat_ops;
chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 32f834a3848a..b38786f0ad79 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -7,7 +7,7 @@
*/
#include <linux/bitfield.h>
-
+#include <linux/irqreturn.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "dw-edma-core.h"
@@ -160,7 +160,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
/* eDMA management callbacks */
-void dw_edma_v0_core_off(struct dw_edma *dw)
+static void dw_edma_v0_core_off(struct dw_edma *dw)
{
SET_BOTH_32(dw, int_mask,
EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
@@ -169,7 +169,7 @@ void dw_edma_v0_core_off(struct dw_edma *dw)
SET_BOTH_32(dw, engine_en, 0);
}
-u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
+static u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
u32 num_ch;
@@ -186,7 +186,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
return (u16)num_ch;
}
-enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
+static enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
u32 tmp;
@@ -202,7 +202,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
return DMA_ERROR;
}
-void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
+static void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
@@ -210,7 +210,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
-void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
+static void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
@@ -218,18 +218,64 @@ void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
-u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
+static u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_DONE_INT_MASK,
GET_RW_32(dw, dir, int_status));
}
-u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
+static u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
GET_RW_32(dw, dir, int_status));
}
+static irqreturn_t
+dw_edma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
+ dw_edma_handler_t done, dw_edma_handler_t abort)
+{
+ struct dw_edma *dw = dw_irq->dw;
+ unsigned long total, pos, val;
+ irqreturn_t ret = IRQ_NONE;
+ struct dw_edma_chan *chan;
+ unsigned long off;
+ u32 mask;
+
+ if (dir == EDMA_DIR_WRITE) {
+ total = dw->wr_ch_cnt;
+ off = 0;
+ mask = dw_irq->wr_mask;
+ } else {
+ total = dw->rd_ch_cnt;
+ off = dw->wr_ch_cnt;
+ mask = dw_irq->rd_mask;
+ }
+
+ val = dw_edma_v0_core_status_done_int(dw, dir);
+ val &= mask;
+ for_each_set_bit(pos, &val, total) {
+ chan = &dw->chan[pos + off];
+
+ dw_edma_v0_core_clear_done_int(chan);
+ done(chan);
+
+ ret = IRQ_HANDLED;
+ }
+
+ val = dw_edma_v0_core_status_abort_int(dw, dir);
+ val &= mask;
+ for_each_set_bit(pos, &val, total) {
+ chan = &dw->chan[pos + off];
+
+ dw_edma_v0_core_clear_abort_int(chan);
+ abort(chan);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
u32 control, u32 size, u64 sar, u64 dar)
{
@@ -300,7 +346,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
-void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
@@ -371,7 +417,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
-int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
+static void dw_edma_v0_core_ch_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
u32 tmp = 0;
@@ -438,12 +484,25 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
-
- return 0;
}
/* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
+static void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
+
+static const struct dw_edma_core_ops dw_edma_v0_core = {
+ .off = dw_edma_v0_core_off,
+ .ch_count = dw_edma_v0_core_ch_count,
+ .ch_status = dw_edma_v0_core_ch_status,
+ .handle_int = dw_edma_v0_core_handle_int,
+ .start = dw_edma_v0_core_start,
+ .ch_config = dw_edma_v0_core_ch_config,
+ .debugfs_on = dw_edma_v0_core_debugfs_on,
+};
+
+void dw_edma_v0_core_register(struct dw_edma *dw)
+{
+ dw->core = &dw_edma_v0_core;
+}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index ab96a1f48080..04a882222f99 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -11,17 +11,7 @@
#include <linux/dma/edma.h>
-/* eDMA management callbacks */
-void dw_edma_v0_core_off(struct dw_edma *chan);
-u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir);
-enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan);
-void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan);
-void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan);
-u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir);
-u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir);
-void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
-int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
-/* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+/* eDMA core register */
+void dw_edma_v0_core_register(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
new file mode 100644
index 000000000000..00b735a0202a
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Cai Huoqing
+ * Synopsys DesignWare HDMA v0 core
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irqreturn.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "dw-edma-core.h"
+#include "dw-hdma-v0-core.h"
+#include "dw-hdma-v0-regs.h"
+#include "dw-hdma-v0-debugfs.h"
+
+enum dw_hdma_control {
+ DW_HDMA_V0_CB = BIT(0),
+ DW_HDMA_V0_TCB = BIT(1),
+ DW_HDMA_V0_LLP = BIT(2),
+ DW_HDMA_V0_LIE = BIT(3),
+ DW_HDMA_V0_RIE = BIT(4),
+ DW_HDMA_V0_CCS = BIT(8),
+ DW_HDMA_V0_LLE = BIT(9),
+};
+
+static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
+{
+ return dw->chip->reg_base;
+}
+
+static inline struct dw_hdma_v0_ch_regs __iomem *
+__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
+{
+ if (dir == EDMA_DIR_WRITE)
+ return &(__dw_regs(dw)->ch[ch].wr);
+ else
+ return &(__dw_regs(dw)->ch[ch].rd);
+}
+
+#define SET_CH_32(dw, dir, ch, name, value) \
+ writel(value, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define GET_CH_32(dw, dir, ch, name) \
+ readl(&(__dw_ch_regs(dw, dir, ch)->name))
+
+#define SET_BOTH_CH_32(dw, ch, name, value) \
+ do { \
+ writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \
+ writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \
+ } while (0)
+
+/* HDMA management callbacks */
+static void dw_hdma_v0_core_off(struct dw_edma *dw)
+{
+ int id;
+
+ for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
+ SET_BOTH_CH_32(dw, id, int_setup,
+ HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_CH_32(dw, id, int_clear,
+ HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_CH_32(dw, id, ch_en, 0);
+ }
+}
+
+static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
+{
+ u32 num_ch = 0;
+ int id;
+
+ for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
+ if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
+ num_ch++;
+ }
+
+ if (num_ch > HDMA_V0_MAX_NR_CH)
+ num_ch = HDMA_V0_MAX_NR_CH;
+
+ return (u16)num_ch;
+}
+
+static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+ u32 tmp;
+
+ tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK,
+ GET_CH_32(dw, chan->id, chan->dir, ch_stat));
+
+ if (tmp == 1)
+ return DMA_IN_PROGRESS;
+ else if (tmp == 3)
+ return DMA_COMPLETE;
+ else
+ return DMA_ERROR;
+}
+
+static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+
+ SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK);
+}
+
+static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+
+ SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK);
+}
+
+static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+
+ return GET_CH_32(dw, chan->dir, chan->id, int_stat);
+}
+
+static irqreturn_t
+dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
+ dw_edma_handler_t done, dw_edma_handler_t abort)
+{
+ struct dw_edma *dw = dw_irq->dw;
+ unsigned long total, pos, val;
+ irqreturn_t ret = IRQ_NONE;
+ struct dw_edma_chan *chan;
+ unsigned long off, mask;
+
+ if (dir == EDMA_DIR_WRITE) {
+ total = dw->wr_ch_cnt;
+ off = 0;
+ mask = dw_irq->wr_mask;
+ } else {
+ total = dw->rd_ch_cnt;
+ off = dw->wr_ch_cnt;
+ mask = dw_irq->rd_mask;
+ }
+
+ for_each_set_bit(pos, &mask, total) {
+ chan = &dw->chan[pos + off];
+
+ val = dw_hdma_v0_core_status_int(chan);
+ if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) {
+ dw_hdma_v0_core_clear_done_int(chan);
+ done(chan);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) {
+ dw_hdma_v0_core_clear_abort_int(chan);
+ abort(chan);
+
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
+ u32 control, u32 size, u64 sar, u64 dar)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
+
+ lli->control = control;
+ lli->transfer_size = size;
+ lli->sar.reg = sar;
+ lli->dar.reg = dar;
+ } else {
+ struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &lli->control);
+ writel(size, &lli->transfer_size);
+ writeq(sar, &lli->sar.reg);
+ writeq(dar, &lli->dar.reg);
+ }
+}
+
+static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ int i, u32 control, u64 pointer)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
+
+ llp->control = control;
+ llp->llp.reg = pointer;
+ } else {
+ struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &llp->control);
+ writeq(pointer, &llp->llp.reg);
+ }
+}
+
+static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
+{
+ struct dw_edma_burst *child;
+ struct dw_edma_chan *chan = chunk->chan;
+ u32 control = 0, i = 0;
+ int j;
+
+ if (chunk->cb)
+ control = DW_HDMA_V0_CB;
+
+ j = chunk->bursts_alloc;
+ list_for_each_entry(child, &chunk->burst->list, list) {
+ j--;
+ if (!j) {
+ control |= DW_HDMA_V0_LIE;
+ if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ control |= DW_HDMA_V0_RIE;
+ }
+
+ dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
+ }
+
+ control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
+ if (!chunk->cb)
+ control |= DW_HDMA_V0_CB;
+
+ dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
+}
+
+static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+{
+ struct dw_edma_chan *chan = chunk->chan;
+ struct dw_edma *dw = chan->dw;
+ u32 tmp;
+
+ dw_hdma_v0_core_write_chunk(chunk);
+
+ if (first) {
+ /* Enable engine */
+ SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
+ /* Interrupt enable&unmask - done, abort */
+ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
+ HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
+ HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+ SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
+ /* Channel control */
+ SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
+ /* Linked list */
+ /* llp is not aligned on 64bit -> keep 32bit accesses */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
+ }
+ /* Set consumer cycle */
+ SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+ HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+ /* Doorbell */
+ SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
+}
+
+static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+
+ /* MSI done addr - low, high */
+ SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo);
+ SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi);
+ /* MSI abort addr - low, high */
+ SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo);
+ SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi);
+ /* config MSI data */
+ SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data);
+}
+
+/* HDMA debugfs callbacks */
+static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw)
+{
+ dw_hdma_v0_debugfs_on(dw);
+}
+
+static const struct dw_edma_core_ops dw_hdma_v0_core = {
+ .off = dw_hdma_v0_core_off,
+ .ch_count = dw_hdma_v0_core_ch_count,
+ .ch_status = dw_hdma_v0_core_ch_status,
+ .handle_int = dw_hdma_v0_core_handle_int,
+ .start = dw_hdma_v0_core_start,
+ .ch_config = dw_hdma_v0_core_ch_config,
+ .debugfs_on = dw_hdma_v0_core_debugfs_on,
+};
+
+void dw_hdma_v0_core_register(struct dw_edma *dw)
+{
+ dw->core = &dw_hdma_v0_core;
+}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.h b/drivers/dma/dw-edma/dw-hdma-v0-core.h
new file mode 100644
index 000000000000..c373b4f0bd8a
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Cai Huoqing
+ * Synopsys DesignWare HDMA v0 core
+ *
+ * Author: Cai Huoqing <cai.huoqing@linux.dev>
+ */
+
+#ifndef _DW_HDMA_V0_CORE_H
+#define _DW_HDMA_V0_CORE_H
+
+#include <linux/dma/edma.h>
+
+/* HDMA core register */
+void dw_hdma_v0_core_register(struct dw_edma *dw);
+
+#endif /* _DW_HDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
new file mode 100644
index 000000000000..520c81978b08
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Cai Huoqing
+ * Synopsys DesignWare HDMA v0 debugfs
+ *
+ * Author: Cai Huoqing <cai.huoqing@linux.dev>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/bitfield.h>
+
+#include "dw-hdma-v0-debugfs.h"
+#include "dw-hdma-v0-regs.h"
+#include "dw-edma-core.h"
+
+#define REGS_ADDR(dw, name) \
+ ({ \
+ struct dw_hdma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
+ \
+ (void __iomem *)&__regs->name; \
+ })
+
+#define REGS_CH_ADDR(dw, name, _dir, _ch) \
+ ({ \
+ struct dw_hdma_v0_ch_regs __iomem *__ch_regs; \
+ \
+ if (_dir == EDMA_DIR_READ) \
+ __ch_regs = REGS_ADDR(dw, ch[_ch].rd); \
+ else \
+ __ch_regs = REGS_ADDR(dw, ch[_ch].wr); \
+ \
+ (void __iomem *)&__ch_regs->name; \
+ })
+
+#define CTX_REGISTER(dw, name, dir, ch) \
+ {#name, REGS_CH_ADDR(dw, name, dir, ch)}
+
+#define WRITE_STR "write"
+#define READ_STR "read"
+#define CHANNEL_STR "channel"
+#define REGISTERS_STR "registers"
+
+struct dw_hdma_debugfs_entry {
+ const char *name;
+ void __iomem *reg;
+};
+
+static int dw_hdma_debugfs_u32_get(void *data, u64 *val)
+{
+ struct dw_hdma_debugfs_entry *entry = data;
+ void __iomem *reg = entry->reg;
+
+ *val = readl(reg);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_hdma_debugfs_u32_get, NULL, "0x%08llx\n");
+
+static void dw_hdma_debugfs_create_x32(struct dw_edma *dw,
+ const struct dw_hdma_debugfs_entry ini[],
+ int nr_entries, struct dentry *dent)
+{
+ struct dw_hdma_debugfs_entry *entries;
+ int i;
+
+ entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
+ GFP_KERNEL);
+ if (!entries)
+ return;
+
+ for (i = 0; i < nr_entries; i++) {
+ entries[i] = ini[i];
+
+ debugfs_create_file_unsafe(entries[i].name, 0444, dent,
+ &entries[i], &fops_x32);
+ }
+}
+
+static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ u16 ch, struct dentry *dent)
+{
+ const struct dw_hdma_debugfs_entry debugfs_regs[] = {
+ CTX_REGISTER(dw, ch_en, dir, ch),
+ CTX_REGISTER(dw, doorbell, dir, ch),
+ CTX_REGISTER(dw, prefetch, dir, ch),
+ CTX_REGISTER(dw, handshake, dir, ch),
+ CTX_REGISTER(dw, llp.lsb, dir, ch),
+ CTX_REGISTER(dw, llp.msb, dir, ch),
+ CTX_REGISTER(dw, cycle_sync, dir, ch),
+ CTX_REGISTER(dw, transfer_size, dir, ch),
+ CTX_REGISTER(dw, sar.lsb, dir, ch),
+ CTX_REGISTER(dw, sar.msb, dir, ch),
+ CTX_REGISTER(dw, dar.lsb, dir, ch),
+ CTX_REGISTER(dw, dar.msb, dir, ch),
+ CTX_REGISTER(dw, watermark_en, dir, ch),
+ CTX_REGISTER(dw, control1, dir, ch),
+ CTX_REGISTER(dw, func_num, dir, ch),
+ CTX_REGISTER(dw, qos, dir, ch),
+ CTX_REGISTER(dw, ch_stat, dir, ch),
+ CTX_REGISTER(dw, int_stat, dir, ch),
+ CTX_REGISTER(dw, int_setup, dir, ch),
+ CTX_REGISTER(dw, int_clear, dir, ch),
+ CTX_REGISTER(dw, msi_stop.lsb, dir, ch),
+ CTX_REGISTER(dw, msi_stop.msb, dir, ch),
+ CTX_REGISTER(dw, msi_watermark.lsb, dir, ch),
+ CTX_REGISTER(dw, msi_watermark.msb, dir, ch),
+ CTX_REGISTER(dw, msi_abort.lsb, dir, ch),
+ CTX_REGISTER(dw, msi_abort.msb, dir, ch),
+ CTX_REGISTER(dw, msi_msgdata, dir, ch),
+ };
+ int nr_entries = ARRAY_SIZE(debugfs_regs);
+
+ dw_hdma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
+}
+
+static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+{
+ struct dentry *regs_dent, *ch_dent;
+ char name[16];
+ int i;
+
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
+
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
+
+ ch_dent = debugfs_create_dir(name, regs_dent);
+
+ dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
+ }
+}
+
+static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
+{
+ struct dentry *regs_dent, *ch_dent;
+ char name[16];
+ int i;
+
+ regs_dent = debugfs_create_dir(READ_STR, dent);
+
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
+
+ ch_dent = debugfs_create_dir(name, regs_dent);
+
+ dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
+ }
+}
+
+static void dw_hdma_debugfs_regs(struct dw_edma *dw)
+{
+ struct dentry *regs_dent;
+
+ regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
+
+ dw_hdma_debugfs_regs_wr(dw, regs_dent);
+ dw_hdma_debugfs_regs_rd(dw, regs_dent);
+}
+
+void dw_hdma_v0_debugfs_on(struct dw_edma *dw)
+{
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
+
+ dw_hdma_debugfs_regs(dw);
+}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h
new file mode 100644
index 000000000000..e6842c83777d
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Cai Huoqing
+ * Synopsys DesignWare HDMA v0 debugfs
+ *
+ * Author: Cai Huoqing <cai.huoqing@linux.dev>
+ */
+
+#ifndef _DW_HDMA_V0_DEBUG_FS_H
+#define _DW_HDMA_V0_DEBUG_FS_H
+
+#include <linux/dma/edma.h>
+
+#ifdef CONFIG_DEBUG_FS
+void dw_hdma_v0_debugfs_on(struct dw_edma *dw);
+#else
+static inline void dw_hdma_v0_debugfs_on(struct dw_edma *dw)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _DW_HDMA_V0_DEBUG_FS_H */
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
new file mode 100644
index 000000000000..a974abdf8aaf
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Cai Huoqing
+ * Synopsys DesignWare HDMA v0 reg
+ *
+ * Author: Cai Huoqing <cai.huoqing@linux.dev>
+ */
+
+#ifndef _DW_HDMA_V0_REGS_H
+#define _DW_HDMA_V0_REGS_H
+
+#include <linux/dmaengine.h>
+
+#define HDMA_V0_MAX_NR_CH 8
+#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
+#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
+#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
+#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
+#define HDMA_V0_ABORT_INT_MASK BIT(2)
+#define HDMA_V0_STOP_INT_MASK BIT(0)
+#define HDMA_V0_LINKLIST_EN BIT(0)
+#define HDMA_V0_CONSUMER_CYCLE_STAT BIT(1)
+#define HDMA_V0_CONSUMER_CYCLE_BIT BIT(0)
+#define HDMA_V0_DOORBELL_START BIT(0)
+#define HDMA_V0_CH_STATUS_MASK GENMASK(1, 0)
+
+struct dw_hdma_v0_ch_regs {
+ u32 ch_en; /* 0x0000 */
+ u32 doorbell; /* 0x0004 */
+ u32 prefetch; /* 0x0008 */
+ u32 handshake; /* 0x000c */
+ union {
+ u64 reg; /* 0x0010..0x0014 */
+ struct {
+ u32 lsb; /* 0x0010 */
+ u32 msb; /* 0x0014 */
+ };
+ } llp;
+ u32 cycle_sync; /* 0x0018 */
+ u32 transfer_size; /* 0x001c */
+ union {
+ u64 reg; /* 0x0020..0x0024 */
+ struct {
+ u32 lsb; /* 0x0020 */
+ u32 msb; /* 0x0024 */
+ };
+ } sar;
+ union {
+ u64 reg; /* 0x0028..0x002c */
+ struct {
+ u32 lsb; /* 0x0028 */
+ u32 msb; /* 0x002c */
+ };
+ } dar;
+ u32 watermark_en; /* 0x0030 */
+ u32 control1; /* 0x0034 */
+ u32 func_num; /* 0x0038 */
+ u32 qos; /* 0x003c */
+ u32 padding_1[16]; /* 0x0040..0x007c */
+ u32 ch_stat; /* 0x0080 */
+ u32 int_stat; /* 0x0084 */
+ u32 int_setup; /* 0x0088 */
+ u32 int_clear; /* 0x008c */
+ union {
+ u64 reg; /* 0x0090..0x0094 */
+ struct {
+ u32 lsb; /* 0x0090 */
+ u32 msb; /* 0x0094 */
+ };
+ } msi_stop;
+ union {
+ u64 reg; /* 0x0098..0x009c */
+ struct {
+ u32 lsb; /* 0x0098 */
+ u32 msb; /* 0x009c */
+ };
+ } msi_watermark;
+ union {
+ u64 reg; /* 0x00a0..0x00a4 */
+ struct {
+ u32 lsb; /* 0x00a0 */
+ u32 msb; /* 0x00a4 */
+ };
+ } msi_abort;
+ u32 msi_msgdata; /* 0x00a8 */
+ u32 padding_2[21]; /* 0x00ac..0x00fc */
+} __packed;
+
+struct dw_hdma_v0_ch {
+ struct dw_hdma_v0_ch_regs wr; /* 0x0000 */
+ struct dw_hdma_v0_ch_regs rd; /* 0x0100 */
+} __packed;
+
+struct dw_hdma_v0_regs {
+ struct dw_hdma_v0_ch ch[HDMA_V0_MAX_NR_CH]; /* 0x0000..0x0fa8 */
+} __packed;
+
+struct dw_hdma_v0_lli {
+ u32 control;
+ u32 transfer_size;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } sar;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } dar;
+} __packed;
+
+struct dw_hdma_v0_llp {
+ u32 control;
+ u32 reserved;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } llp;
+} __packed;
+
+#endif /* _DW_HDMA_V0_REGS_H */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 79d244011093..79d8957f9e60 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
}
/**
- * __cleanup - reclaim used descriptors
+ * __ioat_cleanup - reclaim used descriptors
* @ioat_chan: channel (ring) to clean
* @phys_complete: zeroed (or not) completion address (from status)
*/
-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
{
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent *desc;
@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
spin_lock_bh(&ioat_chan->cleanup_lock);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
ioat_quiesce(ioat_chan, 0);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
__ioat_restart_chan(ioat_chan);
}
@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
/* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list *t)
/* timer restarted in ioat_cleanup_preamble
* and IOAT_COMPLETION_ACK cleared
*/
- __cleanup(ioat_chan, phys_complete);
+ __ioat_cleanup(ioat_chan, phys_complete);
goto unlock_out;
}
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index 12725fa1655f..34b6416c3287 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -517,7 +517,6 @@ static int plx_dma_create(struct pci_dev *pdev)
plxdev->bar = pcim_iomap_table(pdev)[0];
dma = &plxdev->dma_dev;
- dma->chancnt = 1;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 3f926a653bd8..ace75d7b835a 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -45,6 +45,7 @@ config QCOM_HIDMA_MGMT
config QCOM_HIDMA
tristate "Qualcomm Technologies HIDMA Channel support"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA controller.
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 1e47d27e1f81..4c3eb972039d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1272,7 +1272,15 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->powered_remotely = of_property_read_bool(pdev->dev.of_node,
"qcom,powered-remotely");
- if (bdev->controlled_remotely || bdev->powered_remotely) {
+ if (bdev->controlled_remotely || bdev->powered_remotely)
+ bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
+ else
+ bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+
+ if (IS_ERR(bdev->bamclk))
+ return PTR_ERR(bdev->bamclk);
+
+ if (!bdev->bamclk) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
if (ret)
@@ -1284,14 +1292,6 @@ static int bam_dma_probe(struct platform_device *pdev)
dev_err(bdev->dev, "num-ees unspecified in dt\n");
}
- if (bdev->controlled_remotely || bdev->powered_remotely)
- bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
- else
- bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
-
- if (IS_ERR(bdev->bamclk))
- return PTR_ERR(bdev->bamclk);
-
ret = clk_prepare_enable(bdev->bamclk);
if (ret) {
dev_err(bdev->dev, "failed to prepare/enable clock\n");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 04d1c33afc12..344525c3a32f 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -214,7 +214,6 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
- dmadev->ddev.chancnt++;
return 0;
}
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 474d3ba8ec9f..2b639adb48ba 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1169,7 +1169,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
sdev->total_chns = chn_count;
- sdev->dma_dev.chancnt = chn_count;
INIT_LIST_HEAD(&sdev->dma_dev.channels);
INIT_LIST_HEAD(&sdev->dma_dev.global_node);
sdev->dma_dev.dev = &pdev->dev;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f093e08c23b1..825001bde42c 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -19,14 +19,43 @@
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_dma.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
-#include <linux/platform_data/dma-ste-dma40.h>
#include "dmaengine.h"
+#include "ste_dma40.h"
#include "ste_dma40_ll.h"
+/**
+ * struct stedma40_platform_data - Configuration struct for the dma device.
+ *
+ * @dev_tx: mapping between destination event line and io address
+ * @dev_rx: mapping between source event line and io address
+ * @disabled_channels: A vector, ending with -1, that marks physical channels
+ * that are for different reasons not available for the driver.
+ * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
+ * which avoids HW bug that exists in some versions of the controller.
+ * SoftLLI introduces relink overhead that could impact performace for
+ * certain use cases.
+ * @num_of_soft_lli_chans: The number of channels that needs to be configured
+ * to use SoftLLI.
+ * @use_esram_lcla: flag for mapping the lcla into esram region
+ * @num_of_memcpy_chans: The number of channels reserved for memcpy.
+ * @num_of_phy_chans: The number of physical channels implemented in HW.
+ * 0 means reading the number of channels from DMA HW but this is only valid
+ * for 'multiple of 4' channels, like 8.
+ */
+struct stedma40_platform_data {
+ int disabled_channels[STEDMA40_MAX_PHYS];
+ int *soft_lli_chans;
+ int num_of_soft_lli_chans;
+ bool use_esram_lcla;
+ int num_of_memcpy_chans;
+ int num_of_phy_chans;
+};
+
#define D40_NAME "dma40"
#define D40_PHY_CHAN -1
@@ -107,7 +136,7 @@ static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
};
/**
- * enum 40_command - The different commands and/or statuses.
+ * enum d40_command - The different commands and/or statuses.
*
* @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
* @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
@@ -525,8 +554,6 @@ struct d40_gen_dmac {
* @virtbase: The virtual base address of the DMA's register.
* @rev: silicon revision detected.
* @clk: Pointer to the DMA clock structure.
- * @phy_start: Physical memory start of the DMA registers.
- * @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
* @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
* transfers).
@@ -570,8 +597,6 @@ struct d40_base {
void __iomem *virtbase;
u8 rev:4;
struct clk *clk;
- phys_addr_t phy_start;
- resource_size_t phy_size;
int irq;
int num_memcpy_chans;
int num_phy_chans;
@@ -2268,7 +2293,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL;
}
-bool stedma40_filter(struct dma_chan *chan, void *data)
+static bool stedma40_filter(struct dma_chan *chan, void *data)
{
struct stedma40_chan_cfg *info = data;
struct d40_chan *d40c =
@@ -2287,7 +2312,6 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
return err == 0;
}
-EXPORT_SYMBOL(stedma40_filter);
static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
@@ -3100,64 +3124,57 @@ static int __init d40_phy_res_init(struct d40_base *base)
return num_phy_chans_avail;
}
-static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
+/* Called from the registered devm action */
+static void d40_drop_kmem_cache_action(void *d)
+{
+ struct kmem_cache *desc_slab = d;
+
+ kmem_cache_destroy(desc_slab);
+}
+
+static int __init d40_hw_detect_init(struct platform_device *pdev,
+ struct d40_base **retbase)
{
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
struct clk *clk;
void __iomem *virtbase;
- struct resource *res;
struct d40_base *base;
int num_log_chans;
int num_phy_chans;
int num_memcpy_chans;
- int clk_ret = -EINVAL;
int i;
u32 pid;
u32 cid;
u8 rev;
+ int ret;
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- d40_err(&pdev->dev, "No matching clock found\n");
- goto check_prepare_enabled;
- }
-
- clk_ret = clk_prepare_enable(clk);
- if (clk_ret) {
- d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
- goto disable_unprepare;
- }
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* Get IO for DMAC base address */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
- if (!res)
- goto disable_unprepare;
-
- if (request_mem_region(res->start, resource_size(res),
- D40_NAME " I/O base") == NULL)
- goto release_region;
-
- virtbase = ioremap(res->start, resource_size(res));
- if (!virtbase)
- goto release_region;
+ virtbase = devm_platform_ioremap_resource_byname(pdev, "base");
+ if (IS_ERR(virtbase))
+ return PTR_ERR(virtbase);
/* This is just a regular AMBA PrimeCell ID actually */
for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
+ pid |= (readl(virtbase + SZ_4K - 0x20 + 4 * i)
& 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
- cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
+ cid |= (readl(virtbase + SZ_4K - 0x10 + 4 * i)
& 255) << (i * 8);
if (cid != AMBA_CID) {
- d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
- goto unmap_io;
+ d40_err(dev, "Unknown hardware! No PrimeCell ID\n");
+ return -EINVAL;
}
if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
- d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
+ d40_err(dev, "Unknown designer! Got %x wanted %x\n",
AMBA_MANF_BITS(pid),
AMBA_VENDOR_ST);
- goto unmap_io;
+ return -EINVAL;
}
/*
* HW revision:
@@ -3170,8 +3187,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
*/
rev = AMBA_REV_BITS(pid);
if (rev < 2) {
- d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
- goto unmap_io;
+ d40_err(dev, "hardware revision: %d is not supported", rev);
+ return -EINVAL;
}
/* The number of physical channels on this HW */
@@ -3188,27 +3205,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
- dev_info(&pdev->dev,
- "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
- rev, &res->start, num_phy_chans, num_log_chans);
+ dev_info(dev,
+ "hardware rev: %d with %d physical and %d logical channels\n",
+ rev, num_phy_chans, num_log_chans);
- base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
- (num_phy_chans + num_log_chans + num_memcpy_chans) *
- sizeof(struct d40_chan), GFP_KERNEL);
+ base = devm_kzalloc(dev,
+ ALIGN(sizeof(struct d40_base), 4) +
+ (num_phy_chans + num_log_chans + num_memcpy_chans) *
+ sizeof(struct d40_chan), GFP_KERNEL);
- if (base == NULL)
- goto unmap_io;
+ if (!base)
+ return -ENOMEM;
base->rev = rev;
base->clk = clk;
base->num_memcpy_chans = num_memcpy_chans;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
- base->phy_start = res->start;
- base->phy_size = resource_size(res);
base->virtbase = virtbase;
base->plat_data = plat_data;
- base->dev = &pdev->dev;
+ base->dev = dev;
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
@@ -3242,76 +3258,57 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
}
- base->phy_res = kcalloc(num_phy_chans,
- sizeof(*base->phy_res),
- GFP_KERNEL);
+ base->phy_res = devm_kcalloc(dev, num_phy_chans,
+ sizeof(*base->phy_res),
+ GFP_KERNEL);
if (!base->phy_res)
- goto free_base;
+ return -ENOMEM;
- base->lookup_phy_chans = kcalloc(num_phy_chans,
- sizeof(*base->lookup_phy_chans),
- GFP_KERNEL);
+ base->lookup_phy_chans = devm_kcalloc(dev, num_phy_chans,
+ sizeof(*base->lookup_phy_chans),
+ GFP_KERNEL);
if (!base->lookup_phy_chans)
- goto free_phy_res;
+ return -ENOMEM;
- base->lookup_log_chans = kcalloc(num_log_chans,
- sizeof(*base->lookup_log_chans),
- GFP_KERNEL);
+ base->lookup_log_chans = devm_kcalloc(dev, num_log_chans,
+ sizeof(*base->lookup_log_chans),
+ GFP_KERNEL);
if (!base->lookup_log_chans)
- goto free_phy_chans;
+ return -ENOMEM;
- base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
+ base->reg_val_backup_chan = devm_kmalloc_array(dev, base->num_phy_chans,
sizeof(d40_backup_regs_chan),
GFP_KERNEL);
if (!base->reg_val_backup_chan)
- goto free_log_chans;
+ return -ENOMEM;
- base->lcla_pool.alloc_map = kcalloc(num_phy_chans
+ base->lcla_pool.alloc_map = devm_kcalloc(dev, num_phy_chans
* D40_LCLA_LINK_PER_EVENT_GRP,
sizeof(*base->lcla_pool.alloc_map),
GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
- goto free_backup_chan;
+ return -ENOMEM;
- base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
+ base->regs_interrupt = devm_kmalloc_array(dev, base->gen_dmac.il_size,
sizeof(*base->regs_interrupt),
GFP_KERNEL);
if (!base->regs_interrupt)
- goto free_map;
+ return -ENOMEM;
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
- if (base->desc_slab == NULL)
- goto free_regs;
-
-
- return base;
- free_regs:
- kfree(base->regs_interrupt);
- free_map:
- kfree(base->lcla_pool.alloc_map);
- free_backup_chan:
- kfree(base->reg_val_backup_chan);
- free_log_chans:
- kfree(base->lookup_log_chans);
- free_phy_chans:
- kfree(base->lookup_phy_chans);
- free_phy_res:
- kfree(base->phy_res);
- free_base:
- kfree(base);
- unmap_io:
- iounmap(virtbase);
- release_region:
- release_mem_region(res->start, resource_size(res));
- check_prepare_enabled:
- if (!clk_ret)
- disable_unprepare:
- clk_disable_unprepare(clk);
- if (!IS_ERR(clk))
- clk_put(clk);
- return NULL;
+ if (!base->desc_slab)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, d40_drop_kmem_cache_action,
+ base->desc_slab);
+ if (ret)
+ return ret;
+
+ *retbase = base;
+
+ return 0;
}
static void __init d40_hw_init(struct d40_base *base)
@@ -3451,14 +3448,14 @@ static int __init d40_lcla_allocate(struct d40_base *base)
return ret;
}
-static int __init d40_of_probe(struct platform_device *pdev,
+static int __init d40_of_probe(struct device *dev,
struct device_node *np)
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
const __be32 *list;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -3471,7 +3468,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_memcpy /= sizeof(*list);
if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
- d40_err(&pdev->dev,
+ d40_err(dev,
"Invalid number of memcpy channels specified (%d)\n",
num_memcpy);
return -EINVAL;
@@ -3486,7 +3483,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_disabled /= sizeof(*list);
if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
- d40_err(&pdev->dev,
+ d40_err(dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
return -EINVAL;
@@ -3497,35 +3494,30 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_disabled);
pdata->disabled_channels[num_disabled] = -1;
- pdev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
return 0;
}
static int __init d40_probe(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
- int ret = -ENOENT;
+ struct device_node *np_lcpa;
struct d40_base *base;
struct resource *res;
+ struct resource res_lcpa;
int num_reserved_chans;
u32 val;
+ int ret;
- if (!plat_data) {
- if (np) {
- if (d40_of_probe(pdev, np)) {
- ret = -ENOMEM;
- goto report_failure;
- }
- } else {
- d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
- goto report_failure;
- }
+ if (d40_of_probe(dev, np)) {
+ ret = -ENOMEM;
+ goto report_failure;
}
- base = d40_hw_detect_init(pdev);
- if (!base)
+ ret = d40_hw_detect_init(pdev, &base);
+ if (ret)
goto report_failure;
num_reserved_chans = d40_phy_res_init(base);
@@ -3535,37 +3527,38 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->interrupt_lock);
spin_lock_init(&base->execmd_lock);
- /* Get IO for logical channel parameter address */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
- if (!res) {
- ret = -ENOENT;
- d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
- goto destroy_cache;
+ /* Get IO for logical channel parameter address (LCPA) */
+ np_lcpa = of_parse_phandle(np, "sram", 0);
+ if (!np_lcpa) {
+ dev_err(dev, "no LCPA SRAM node\n");
+ ret = -EINVAL;
+ goto report_failure;
}
- base->lcpa_size = resource_size(res);
- base->phy_lcpa = res->start;
-
- if (request_mem_region(res->start, resource_size(res),
- D40_NAME " I/O lcpa") == NULL) {
- ret = -EBUSY;
- d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
- goto destroy_cache;
+ /* This is no device so read the address directly from the node */
+ ret = of_address_to_resource(np_lcpa, 0, &res_lcpa);
+ if (ret) {
+ dev_err(dev, "no LCPA SRAM resource\n");
+ goto report_failure;
}
+ base->lcpa_size = resource_size(&res_lcpa);
+ base->phy_lcpa = res_lcpa.start;
+ dev_info(dev, "found LCPA SRAM at %pad, size %pa\n",
+ &base->phy_lcpa, &base->lcpa_size);
/* We make use of ESRAM memory for this. */
val = readl(base->virtbase + D40_DREG_LCPA);
- if (res->start != val && val != 0) {
- dev_warn(&pdev->dev,
- "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
- __func__, val, &res->start);
+ if (base->phy_lcpa != val && val != 0) {
+ dev_warn(dev,
+ "[%s] Mismatch LCPA dma 0x%x, def %08x\n",
+ __func__, val, (u32)base->phy_lcpa);
} else
- writel(res->start, base->virtbase + D40_DREG_LCPA);
+ writel(base->phy_lcpa, base->virtbase + D40_DREG_LCPA);
- base->lcpa_base = ioremap(res->start, resource_size(res));
+ base->lcpa_base = devm_ioremap(dev, base->phy_lcpa, base->lcpa_size);
if (!base->lcpa_base) {
ret = -ENOMEM;
- d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
- goto destroy_cache;
+ d40_err(dev, "Failed to ioremap LCPA region\n");
+ goto report_failure;
}
/* If lcla has to be located in ESRAM we don't need to allocate */
if (base->plat_data->use_esram_lcla) {
@@ -3573,23 +3566,23 @@ static int __init d40_probe(struct platform_device *pdev)
"lcla_esram");
if (!res) {
ret = -ENOENT;
- d40_err(&pdev->dev,
+ d40_err(dev,
"No \"lcla_esram\" memory resource\n");
- goto destroy_cache;
+ goto report_failure;
}
- base->lcla_pool.base = ioremap(res->start,
- resource_size(res));
+ base->lcla_pool.base = devm_ioremap(dev, res->start,
+ resource_size(res));
if (!base->lcla_pool.base) {
ret = -ENOMEM;
- d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
- goto destroy_cache;
+ d40_err(dev, "Failed to ioremap LCLA region\n");
+ goto report_failure;
}
writel(res->start, base->virtbase + D40_DREG_LCLA);
} else {
ret = d40_lcla_allocate(base);
if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ d40_err(dev, "Failed to allocate LCLA area\n");
goto destroy_cache;
}
}
@@ -3600,7 +3593,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
- d40_err(&pdev->dev, "No IRQ defined\n");
+ d40_err(dev, "No IRQ defined\n");
goto destroy_cache;
}
@@ -3608,7 +3601,7 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
if (IS_ERR(base->lcpa_regulator)) {
- d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ d40_err(dev, "Failed to get lcpa_regulator\n");
ret = PTR_ERR(base->lcpa_regulator);
base->lcpa_regulator = NULL;
goto destroy_cache;
@@ -3616,7 +3609,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = regulator_enable(base->lcpa_regulator);
if (ret) {
- d40_err(&pdev->dev,
+ d40_err(dev,
"Failed to enable lcpa_regulator\n");
regulator_put(base->lcpa_regulator);
base->lcpa_regulator = NULL;
@@ -3639,31 +3632,23 @@ static int __init d40_probe(struct platform_device *pdev)
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
- d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ d40_err(dev, "Failed to set dma max seg size\n");
goto destroy_cache;
}
d40_hw_init(base);
- if (np) {
- ret = of_dma_controller_register(np, d40_xlate, NULL);
- if (ret)
- dev_err(&pdev->dev,
- "could not register of_dma_controller\n");
+ ret = of_dma_controller_register(np, d40_xlate, NULL);
+ if (ret) {
+ dev_err(dev,
+ "could not register of_dma_controller\n");
+ goto destroy_cache;
}
dev_info(base->dev, "initialized\n");
return 0;
- destroy_cache:
- kmem_cache_destroy(base->desc_slab);
- if (base->virtbase)
- iounmap(base->virtbase);
-
- if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
- iounmap(base->lcla_pool.base);
- base->lcla_pool.base = NULL;
- }
+ destroy_cache:
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -3675,32 +3660,13 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
- if (base->lcpa_base)
- iounmap(base->lcpa_base);
-
- if (base->phy_lcpa)
- release_mem_region(base->phy_lcpa,
- base->lcpa_size);
- if (base->phy_start)
- release_mem_region(base->phy_start,
- base->phy_size);
- if (base->clk) {
- clk_disable_unprepare(base->clk);
- clk_put(base->clk);
- }
-
if (base->lcpa_regulator) {
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
- kfree(base->lcla_pool.alloc_map);
- kfree(base->lookup_log_chans);
- kfree(base->lookup_phy_chans);
- kfree(base->phy_res);
- kfree(base);
report_failure:
- d40_err(&pdev->dev, "probe failed\n");
+ d40_err(dev, "probe failed\n");
return ret;
}
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/drivers/dma/ste_dma40.h
index 10641633facc..c697bfe16a01 100644
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ b/drivers/dma/ste_dma40.h
@@ -1,19 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
- */
-
#ifndef STE_DMA40_H
#define STE_DMA40_H
-#include <linux/dmaengine.h>
-#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-
/*
* Maxium size for a single dma descriptor
* Size is limited to 16 bits.
@@ -118,92 +107,4 @@ struct stedma40_chan_cfg {
int phy_channel;
};
-/**
- * struct stedma40_platform_data - Configuration struct for the dma device.
- *
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
- * @disabled_channels: A vector, ending with -1, that marks physical channels
- * that are for different reasons not available for the driver.
- * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
- * which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
- * certain use cases.
- * @num_of_soft_lli_chans: The number of channels that needs to be configured
- * to use SoftLLI.
- * @use_esram_lcla: flag for mapping the lcla into esram region
- * @num_of_memcpy_chans: The number of channels reserved for memcpy.
- * @num_of_phy_chans: The number of physical channels implemented in HW.
- * 0 means reading the number of channels from DMA HW but this is only valid
- * for 'multiple of 4' channels, like 8.
- */
-struct stedma40_platform_data {
- int disabled_channels[STEDMA40_MAX_PHYS];
- int *soft_lli_chans;
- int num_of_soft_lli_chans;
- bool use_esram_lcla;
- int num_of_memcpy_chans;
- int num_of_phy_chans;
-};
-
-#ifdef CONFIG_STE_DMA40
-
-/**
- * stedma40_filter() - Provides stedma40_chan_cfg to the
- * ste_dma40 dma driver via the dmaengine framework.
- * does some checking of what's provided.
- *
- * Never directly called by client. It used by dmaengine.
- * @chan: dmaengine handle.
- * @data: Must be of type: struct stedma40_chan_cfg and is
- * the configuration of the framework.
- *
- *
- */
-
-bool stedma40_filter(struct dma_chan *chan, void *data);
-
-/**
- * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
- * (=device)
- *
- * @chan: dmaengine handle
- * @addr: source or destination physicall address.
- * @size: bytes to transfer
- * @direction: direction of transfer
- * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
- */
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct scatterlist sg;
- sg_init_table(&sg, 1);
- sg.dma_address = addr;
- sg.length = size;
-
- return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
-}
-
-#else
-static inline bool stedma40_filter(struct dma_chan *chan, void *data)
-{
- return false;
-}
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- return NULL;
-}
-#endif
-
-#endif
+#endif /* STE_DMA40_H */
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index b5287c661eb7..4c489b126cb2 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -6,8 +6,9 @@
*/
#include <linux/kernel.h>
-#include <linux/platform_data/dma-ste-dma40.h>
+#include <linux/dmaengine.h>
+#include "ste_dma40.h"
#include "ste_dma40_ll.h"
static u8 d40_width_to_bits(enum dma_slave_buswidth width)
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index a488c2250623..1d5430fc5724 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -99,6 +99,8 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x461d),
PSIL_PDMA_XY_PKT(0x461e),
PSIL_PDMA_XY_PKT(0x461f),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0x4640),
/* PDMA_USART_G0 - UART0-1 */
PSIL_PDMA_XY_PKT(0x4700),
PSIL_PDMA_XY_PKT(0x4701),
@@ -161,6 +163,15 @@ static struct psil_ep j721s2_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0xc640),
+ PSIL_ETHERNET(0xc641),
+ PSIL_ETHERNET(0xc642),
+ PSIL_ETHERNET(0xc643),
+ PSIL_ETHERNET(0xc644),
+ PSIL_ETHERNET(0xc645),
+ PSIL_ETHERNET(0xc646),
+ PSIL_ETHERNET(0xc647),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b8329a23728d..eb4dc5fffe64 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4308,6 +4308,15 @@ static struct udma_soc_data am62a_dmss_csi_soc_data = {
},
};
+static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
+ .oes = {
+ .bcdma_tchan_data = 0x800,
+ .bcdma_tchan_ring = 0xa00,
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
static struct udma_match_data am62a_bcdma_csirx_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x3100,
@@ -4346,6 +4355,18 @@ static struct udma_match_data am64_pktdma_data = {
},
};
+static struct udma_match_data j721s2_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x2000,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4373,6 +4394,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am62a-dmss-bcdma-csirx",
.data = &am62a_bcdma_csirx_data,
},
+ {
+ .compatible = "ti,j721s2-dmss-bcdma-csi",
+ .data = &j721s2_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 290186e44e6b..0ef1971d22bb 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -185,6 +185,7 @@ config EXTCON_USBC_TUSB320
tristate "TI TUSB320 USB-C extcon support"
depends on I2C && TYPEC
select REGMAP_I2C
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
Say Y here to enable support for USB Type C cable detection extcon
support using a TUSB320.
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 180be768c215..a703a8315634 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -393,7 +393,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
- put_device(&adev->dev);
+ acpi_dev_put(adev);
if (IS_ERR(info->id_extcon))
return PTR_ERR(info->id_extcon);
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index e8b2671eb29b..e458ce0c45ab 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -369,7 +369,7 @@ static struct i2c_driver fsa9480_i2c_driver = {
.pm = &fsa9480_pm_ops,
.of_match_table = fsa9480_of_match,
},
- .probe_new = fsa9480_probe,
+ .probe = fsa9480_probe,
.id_table = fsa9480_id,
};
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 32f8b541770b..d339b8680445 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -18,7 +18,6 @@
#include <linux/mfd/palmas.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/workqueue.h>
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 017a07197f38..4616da7e5430 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -348,7 +348,7 @@ static struct i2c_driver ptn5150_i2c_driver = {
.name = "ptn5150",
.of_match_table = ptn5150_dt_match,
},
- .probe_new = ptn5150_i2c_probe,
+ .probe = ptn5150_i2c_probe,
.id_table = ptn5150_i2c_id,
};
module_i2c_driver(ptn5150_i2c_driver);
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index eb02cb962b5e..f72e90ceca53 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -123,7 +123,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- info->id_irq = platform_get_irq_byname(pdev, "usb_id");
+ info->id_irq = platform_get_irq_byname_optional(pdev, "usb_id");
if (info->id_irq > 0) {
ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
qcom_usb_irq_handler,
@@ -136,7 +136,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
}
- info->vbus_irq = platform_get_irq_byname(pdev, "usb_vbus");
+ info->vbus_irq = platform_get_irq_byname_optional(pdev, "usb_vbus");
if (info->vbus_irq > 0) {
ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
qcom_usb_irq_handler,
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index afc9b405d103..19bb49f13fb0 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -695,7 +695,7 @@ static struct i2c_driver rt8973a_muic_i2c_driver = {
.pm = &rt8973a_muic_pm_ops,
.of_match_table = rt8973a_dt_match,
},
- .probe_new = rt8973a_muic_i2c_probe,
+ .probe = rt8973a_muic_i2c_probe,
.remove = rt8973a_muic_i2c_remove,
.id_table = rt8973a_i2c_id,
};
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 8401e8b27788..c8c4b9ef72aa 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -840,7 +840,7 @@ static struct i2c_driver sm5502_muic_i2c_driver = {
.pm = &sm5502_muic_pm_ops,
.of_match_table = sm5502_dt_match,
},
- .probe_new = sm5022_muic_i2c_probe,
+ .probe = sm5022_muic_i2c_probe,
.id_table = sm5502_i2c_id,
};
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index b408ce989c22..4d08c2123e59 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/role.h>
#define TUSB320_REG8 0x8
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
@@ -26,16 +28,16 @@
#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1
#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2
#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3
-#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2)
+#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 1)
#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0
#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4
-#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5
-#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6
+#define TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG 0x5
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP 0x6
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP 0x7
#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0)
#define TUSB320_REG9 0x9
-#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6
-#define TUSB320_REG9_ATTACHED_STATE_MASK 0x3
+#define TUSB320_REG9_ATTACHED_STATE GENMASK(7, 6)
#define TUSB320_REG9_CABLE_DIRECTION BIT(5)
#define TUSB320_REG9_INTERRUPT_STATUS BIT(4)
@@ -78,6 +80,8 @@ struct tusb320_priv {
struct typec_capability cap;
enum typec_port_type port_type;
enum typec_pwr_opmode pwr_opmode;
+ struct fwnode_handle *connector_fwnode;
+ struct usb_role_switch *role_sw;
};
static const char * const tusb_attached_states[] = {
@@ -249,8 +253,7 @@ static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg)
{
int state, polarity;
- state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
- TUSB320_REG9_ATTACHED_STATE_MASK;
+ state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg);
polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION);
dev_dbg(priv->dev, "attached state: %s, polarity: %d\n",
@@ -276,32 +279,86 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
{
struct typec_port *port = priv->port;
struct device *dev = priv->dev;
- u8 mode, role, state;
+ int typec_mode;
+ enum usb_role usb_role;
+ enum typec_role pwr_role;
+ enum typec_data_role data_role;
+ u8 state, mode, accessory;
int ret, reg8;
bool ori;
+ ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
+ if (ret) {
+ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
+ return;
+ }
+
ori = reg9 & TUSB320_REG9_CABLE_DIRECTION;
typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE :
TYPEC_ORIENTATION_NORMAL);
- state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
- TUSB320_REG9_ATTACHED_STATE_MASK;
- if (state == TUSB320_ATTACHED_STATE_DFP)
- role = TYPEC_SOURCE;
- else
- role = TYPEC_SINK;
+ state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg9);
+ accessory = FIELD_GET(TUSB320_REG8_ACCESSORY_CONNECTED, reg8);
+
+ switch (state) {
+ case TUSB320_ATTACHED_STATE_DFP:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_HOST;
+ pwr_role = TYPEC_SOURCE;
+ data_role = TYPEC_HOST;
+ break;
+ case TUSB320_ATTACHED_STATE_UFP:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_DEVICE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
+ case TUSB320_ATTACHED_STATE_ACC:
+ /*
+ * Accessory detected. For debug accessories, just make some
+ * qualified guesses as to the role for lack of a better option.
+ */
+ if (accessory == TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO ||
+ accessory == TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG) {
+ typec_mode = TYPEC_MODE_AUDIO;
+ usb_role = USB_ROLE_NONE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
+ } else if (accessory ==
+ TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP) {
+ typec_mode = TYPEC_MODE_DEBUG;
+ pwr_role = TYPEC_SOURCE;
+ usb_role = USB_ROLE_HOST;
+ data_role = TYPEC_HOST;
+ break;
+ } else if (accessory ==
+ TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP) {
+ typec_mode = TYPEC_MODE_DEBUG;
+ pwr_role = TYPEC_SINK;
+ usb_role = USB_ROLE_DEVICE;
+ data_role = TYPEC_DEVICE;
+ break;
+ }
- typec_set_vconn_role(port, role);
- typec_set_pwr_role(port, role);
- typec_set_data_role(port, role == TYPEC_SOURCE ?
- TYPEC_HOST : TYPEC_DEVICE);
+ dev_warn(priv->dev, "unexpected ACCESSORY_CONNECTED state %d\n",
+ accessory);
- ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
- if (ret) {
- dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
- return;
+ fallthrough;
+ default:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_NONE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
}
+ typec_set_vconn_role(port, pwr_role);
+ typec_set_pwr_role(port, pwr_role);
+ typec_set_data_role(port, data_role);
+ typec_set_mode(port, typec_mode);
+ usb_role_switch_set_role(priv->role_sw, usb_role);
+
mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8);
if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF)
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
@@ -391,27 +448,25 @@ static int tusb320_typec_probe(struct i2c_client *client,
/* Type-C connector found. */
ret = typec_get_fw_cap(&priv->cap, connector);
if (ret)
- return ret;
+ goto err_put;
priv->port_type = priv->cap.type;
/* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
if (ret)
- return ret;
+ goto err_put;
ret = typec_find_pwr_opmode(cap_str);
if (ret < 0)
- return ret;
- if (ret == TYPEC_PWR_MODE_PD)
- return -EINVAL;
+ goto err_put;
priv->pwr_opmode = ret;
/* Initialize the hardware with the devicetree settings. */
ret = tusb320_set_adv_pwr_mode(priv);
if (ret)
- return ret;
+ goto err_put;
priv->cap.revision = USB_TYPEC_REV_1_1;
priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
@@ -422,10 +477,36 @@ static int tusb320_typec_probe(struct i2c_client *client,
priv->cap.fwnode = connector;
priv->port = typec_register_port(&client->dev, &priv->cap);
- if (IS_ERR(priv->port))
- return PTR_ERR(priv->port);
+ if (IS_ERR(priv->port)) {
+ ret = PTR_ERR(priv->port);
+ goto err_put;
+ }
+
+ /* Find any optional USB role switch that needs reporting to */
+ priv->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(priv->role_sw)) {
+ ret = PTR_ERR(priv->role_sw);
+ goto err_unreg;
+ }
+
+ priv->connector_fwnode = connector;
return 0;
+
+err_unreg:
+ typec_unregister_port(priv->port);
+
+err_put:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static void tusb320_typec_remove(struct tusb320_priv *priv)
+{
+ usb_role_switch_put(priv->role_sw);
+ typec_unregister_port(priv->port);
+ fwnode_handle_put(priv->connector_fwnode);
}
static int tusb320_probe(struct i2c_client *client)
@@ -438,7 +519,9 @@ static int tusb320_probe(struct i2c_client *client)
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
priv->dev = &client->dev;
+ i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
if (IS_ERR(priv->regmap))
@@ -489,10 +572,19 @@ static int tusb320_probe(struct i2c_client *client)
tusb320_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, priv);
+ if (ret)
+ tusb320_typec_remove(priv);
return ret;
}
+static void tusb320_remove(struct i2c_client *client)
+{
+ struct tusb320_priv *priv = i2c_get_clientdata(client);
+
+ tusb320_typec_remove(priv);
+}
+
static const struct of_device_id tusb320_extcon_dt_match[] = {
{ .compatible = "ti,tusb320", .data = &tusb320_ops, },
{ .compatible = "ti,tusb320l", .data = &tusb320l_ops, },
@@ -501,7 +593,8 @@ static const struct of_device_id tusb320_extcon_dt_match[] = {
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
static struct i2c_driver tusb320_extcon_driver = {
- .probe_new = tusb320_probe,
+ .probe = tusb320_probe,
+ .remove = tusb320_remove,
.driver = {
.name = "extcon-tusb320",
.of_match_table = tusb320_extcon_dt_match,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index d43ba8e7260d..6f7a60d2ed91 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
@@ -206,6 +207,14 @@ static const struct __extcon_info {
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: the array pointing to attr_name and attr_state for attr_g
+ * @usb_propval: the array of USB connector properties
+ * @chg_propval: the array of charger connector properties
+ * @jack_propval: the array of jack connector properties
+ * @disp_propval: the array of display connector properties
+ * @usb_bits: the bit array of the USB connector property capabilities
+ * @chg_bits: the bit array of the charger connector property capabilities
+ * @jack_bits: the bit array of the jack connector property capabilities
+ * @disp_bits: the bit array of the display connector property capabilities
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -222,20 +231,21 @@ struct extcon_cable {
union extcon_property_value jack_propval[EXTCON_PROP_JACK_CNT];
union extcon_property_value disp_propval[EXTCON_PROP_DISP_CNT];
- unsigned long usb_bits[BITS_TO_LONGS(EXTCON_PROP_USB_CNT)];
- unsigned long chg_bits[BITS_TO_LONGS(EXTCON_PROP_CHG_CNT)];
- unsigned long jack_bits[BITS_TO_LONGS(EXTCON_PROP_JACK_CNT)];
- unsigned long disp_bits[BITS_TO_LONGS(EXTCON_PROP_DISP_CNT)];
+ DECLARE_BITMAP(usb_bits, EXTCON_PROP_USB_CNT);
+ DECLARE_BITMAP(chg_bits, EXTCON_PROP_CHG_CNT);
+ DECLARE_BITMAP(jack_bits, EXTCON_PROP_JACK_CNT);
+ DECLARE_BITMAP(disp_bits, EXTCON_PROP_DISP_CNT);
};
static struct class *extcon_class;
+static DEFINE_IDA(extcon_dev_ids);
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
- int i = 0;
+ int i;
if (!edev->mutually_exclusive)
return 0;
@@ -362,12 +372,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
struct extcon_dev *edev = dev_get_drvdata(dev);
if (edev->max_supported == 0)
- return sprintf(buf, "%u\n", edev->state);
+ return sysfs_emit(buf, "%u\n", edev->state);
for (i = 0; i < edev->max_supported; i++) {
- count += sprintf(buf + count, "%s=%d\n",
- extcon_info[edev->supported_cable[i]].name,
- !!(edev->state & BIT(i)));
+ count += sysfs_emit_at(buf, count, "%s=%d\n",
+ extcon_info[edev->supported_cable[i]].name,
+ !!(edev->state & BIT(i)));
}
return count;
@@ -379,7 +389,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct extcon_dev *edev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", edev->name);
+ return sysfs_emit(buf, "%s\n", edev->name);
}
static DEVICE_ATTR_RO(name);
@@ -390,8 +400,8 @@ static ssize_t cable_name_show(struct device *dev,
attr_name);
int i = cable->cable_index;
- return sprintf(buf, "%s\n",
- extcon_info[cable->edev->supported_cable[i]].name);
+ return sysfs_emit(buf, "%s\n",
+ extcon_info[cable->edev->supported_cable[i]].name);
}
static ssize_t cable_state_show(struct device *dev,
@@ -402,8 +412,8 @@ static ssize_t cable_state_show(struct device *dev,
int i = cable->cable_index;
- return sprintf(buf, "%d\n",
- extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
+ return sysfs_emit(buf, "%d\n",
+ extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
}
/**
@@ -1012,12 +1022,13 @@ ATTRIBUTE_GROUPS(extcon);
static int create_extcon_class(void)
{
- if (!extcon_class) {
- extcon_class = class_create("extcon");
- if (IS_ERR(extcon_class))
- return PTR_ERR(extcon_class);
- extcon_class->dev_groups = extcon_groups;
- }
+ if (extcon_class)
+ return 0;
+
+ extcon_class = class_create("extcon");
+ if (IS_ERR(extcon_class))
+ return PTR_ERR(extcon_class);
+ extcon_class->dev_groups = extcon_groups;
return 0;
}
@@ -1070,6 +1081,156 @@ void extcon_dev_free(struct extcon_dev *edev)
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
+ * extcon_alloc_cables() - alloc the cables for extcon device
+ * @edev: extcon device which has cables
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_cables(struct extcon_dev *edev)
+{
+ int index;
+ char *str;
+ struct extcon_cable *cable;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!edev->max_supported)
+ return 0;
+
+ edev->cables = kcalloc(edev->max_supported, sizeof(*edev->cables),
+ GFP_KERNEL);
+ if (!edev->cables)
+ return -ENOMEM;
+
+ for (index = 0; index < edev->max_supported; index++) {
+ cable = &edev->cables[index];
+
+ str = kasprintf(GFP_KERNEL, "cable.%d", index);
+ if (!str) {
+ for (index--; index >= 0; index--) {
+ cable = &edev->cables[index];
+ kfree(cable->attr_g.name);
+ }
+
+ kfree(edev->cables);
+ return -ENOMEM;
+ }
+
+ cable->edev = edev;
+ cable->cable_index = index;
+ cable->attrs[0] = &cable->attr_name.attr;
+ cable->attrs[1] = &cable->attr_state.attr;
+ cable->attrs[2] = NULL;
+ cable->attr_g.name = str;
+ cable->attr_g.attrs = cable->attrs;
+
+ sysfs_attr_init(&cable->attr_name.attr);
+ cable->attr_name.attr.name = "name";
+ cable->attr_name.attr.mode = 0444;
+ cable->attr_name.show = cable_name_show;
+
+ sysfs_attr_init(&cable->attr_state.attr);
+ cable->attr_state.attr.name = "state";
+ cable->attr_state.attr.mode = 0444;
+ cable->attr_state.show = cable_state_show;
+ }
+
+ return 0;
+}
+
+/**
+ * extcon_alloc_muex() - alloc the mutual exclusive for extcon device
+ * @edev: extcon device
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_muex(struct extcon_dev *edev)
+{
+ char *name;
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!(edev->max_supported && edev->mutually_exclusive))
+ return 0;
+
+ /* Count the size of mutually_exclusive array */
+ for (index = 0; edev->mutually_exclusive[index]; index++)
+ ;
+
+ edev->attrs_muex = kcalloc(index + 1, sizeof(*edev->attrs_muex),
+ GFP_KERNEL);
+ if (!edev->attrs_muex)
+ return -ENOMEM;
+
+ edev->d_attrs_muex = kcalloc(index, sizeof(*edev->d_attrs_muex),
+ GFP_KERNEL);
+ if (!edev->d_attrs_muex) {
+ kfree(edev->attrs_muex);
+ return -ENOMEM;
+ }
+
+ for (index = 0; edev->mutually_exclusive[index]; index++) {
+ name = kasprintf(GFP_KERNEL, "0x%x",
+ edev->mutually_exclusive[index]);
+ if (!name) {
+ for (index--; index >= 0; index--)
+ kfree(edev->d_attrs_muex[index].attr.name);
+
+ kfree(edev->d_attrs_muex);
+ kfree(edev->attrs_muex);
+ return -ENOMEM;
+ }
+ sysfs_attr_init(&edev->d_attrs_muex[index].attr);
+ edev->d_attrs_muex[index].attr.name = name;
+ edev->d_attrs_muex[index].attr.mode = 0000;
+ edev->attrs_muex[index] = &edev->d_attrs_muex[index].attr;
+ }
+ edev->attr_g_muex.name = muex_name;
+ edev->attr_g_muex.attrs = edev->attrs_muex;
+
+ return 0;
+}
+
+/**
+ * extcon_alloc_groups() - alloc the groups for extcon device
+ * @edev: extcon device
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_groups(struct extcon_dev *edev)
+{
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!edev->max_supported)
+ return 0;
+
+ edev->extcon_dev_type.groups = kcalloc(edev->max_supported + 2,
+ sizeof(*edev->extcon_dev_type.groups),
+ GFP_KERNEL);
+ if (!edev->extcon_dev_type.groups)
+ return -ENOMEM;
+
+ edev->extcon_dev_type.name = dev_name(&edev->dev);
+ edev->extcon_dev_type.release = dummy_sysfs_dev_release;
+
+ for (index = 0; index < edev->max_supported; index++)
+ edev->extcon_dev_type.groups[index] = &edev->cables[index].attr_g;
+
+ if (edev->mutually_exclusive)
+ edev->extcon_dev_type.groups[index] = &edev->attr_g_muex;
+
+ edev->dev.type = &edev->extcon_dev_type;
+
+ return 0;
+}
+
+/**
* extcon_dev_register() - Register an new extcon device
* @edev: the extcon device to be registered
*
@@ -1085,19 +1246,16 @@ EXPORT_SYMBOL_GPL(extcon_dev_free);
*/
int extcon_dev_register(struct extcon_dev *edev)
{
- int ret, index = 0;
- static atomic_t edev_no = ATOMIC_INIT(-1);
+ int ret, index;
- if (!extcon_class) {
- ret = create_extcon_class();
- if (ret < 0)
- return ret;
- }
+ ret = create_extcon_class();
+ if (ret < 0)
+ return ret;
if (!edev || !edev->supported_cable)
return -EINVAL;
- for (; edev->supported_cable[index] != EXTCON_NONE; index++);
+ for (index = 0; edev->supported_cable[index] != EXTCON_NONE; index++);
edev->max_supported = index;
if (index > SUPPORTED_CABLE_MAX) {
@@ -1115,124 +1273,26 @@ int extcon_dev_register(struct extcon_dev *edev)
"extcon device name is null\n");
return -EINVAL;
}
- dev_set_name(&edev->dev, "extcon%lu",
- (unsigned long)atomic_inc_return(&edev_no));
-
- if (edev->max_supported) {
- char *str;
- struct extcon_cable *cable;
-
- edev->cables = kcalloc(edev->max_supported,
- sizeof(struct extcon_cable),
- GFP_KERNEL);
- if (!edev->cables) {
- ret = -ENOMEM;
- goto err_sysfs_alloc;
- }
- for (index = 0; index < edev->max_supported; index++) {
- cable = &edev->cables[index];
-
- str = kasprintf(GFP_KERNEL, "cable.%d", index);
- if (!str) {
- for (index--; index >= 0; index--) {
- cable = &edev->cables[index];
- kfree(cable->attr_g.name);
- }
- ret = -ENOMEM;
-
- goto err_alloc_cables;
- }
-
- cable->edev = edev;
- cable->cable_index = index;
- cable->attrs[0] = &cable->attr_name.attr;
- cable->attrs[1] = &cable->attr_state.attr;
- cable->attrs[2] = NULL;
- cable->attr_g.name = str;
- cable->attr_g.attrs = cable->attrs;
-
- sysfs_attr_init(&cable->attr_name.attr);
- cable->attr_name.attr.name = "name";
- cable->attr_name.attr.mode = 0444;
- cable->attr_name.show = cable_name_show;
-
- sysfs_attr_init(&cable->attr_state.attr);
- cable->attr_state.attr.name = "state";
- cable->attr_state.attr.mode = 0444;
- cable->attr_state.show = cable_state_show;
- }
- }
- if (edev->max_supported && edev->mutually_exclusive) {
- char *name;
-
- /* Count the size of mutually_exclusive array */
- for (index = 0; edev->mutually_exclusive[index]; index++)
- ;
-
- edev->attrs_muex = kcalloc(index + 1,
- sizeof(struct attribute *),
- GFP_KERNEL);
- if (!edev->attrs_muex) {
- ret = -ENOMEM;
- goto err_muex;
- }
-
- edev->d_attrs_muex = kcalloc(index,
- sizeof(struct device_attribute),
- GFP_KERNEL);
- if (!edev->d_attrs_muex) {
- ret = -ENOMEM;
- kfree(edev->attrs_muex);
- goto err_muex;
- }
-
- for (index = 0; edev->mutually_exclusive[index]; index++) {
- name = kasprintf(GFP_KERNEL, "0x%x",
- edev->mutually_exclusive[index]);
- if (!name) {
- for (index--; index >= 0; index--) {
- kfree(edev->d_attrs_muex[index].attr.
- name);
- }
- kfree(edev->d_attrs_muex);
- kfree(edev->attrs_muex);
- ret = -ENOMEM;
- goto err_muex;
- }
- sysfs_attr_init(&edev->d_attrs_muex[index].attr);
- edev->d_attrs_muex[index].attr.name = name;
- edev->d_attrs_muex[index].attr.mode = 0000;
- edev->attrs_muex[index] = &edev->d_attrs_muex[index]
- .attr;
- }
- edev->attr_g_muex.name = muex_name;
- edev->attr_g_muex.attrs = edev->attrs_muex;
+ ret = ida_alloc(&extcon_dev_ids, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- }
+ edev->id = ret;
- if (edev->max_supported) {
- edev->extcon_dev_type.groups =
- kcalloc(edev->max_supported + 2,
- sizeof(struct attribute_group *),
- GFP_KERNEL);
- if (!edev->extcon_dev_type.groups) {
- ret = -ENOMEM;
- goto err_alloc_groups;
- }
+ dev_set_name(&edev->dev, "extcon%d", edev->id);
- edev->extcon_dev_type.name = dev_name(&edev->dev);
- edev->extcon_dev_type.release = dummy_sysfs_dev_release;
+ ret = extcon_alloc_cables(edev);
+ if (ret < 0)
+ goto err_alloc_cables;
- for (index = 0; index < edev->max_supported; index++)
- edev->extcon_dev_type.groups[index] =
- &edev->cables[index].attr_g;
- if (edev->mutually_exclusive)
- edev->extcon_dev_type.groups[index] =
- &edev->attr_g_muex;
+ ret = extcon_alloc_muex(edev);
+ if (ret < 0)
+ goto err_alloc_muex;
- edev->dev.type = &edev->extcon_dev_type;
- }
+ ret = extcon_alloc_groups(edev);
+ if (ret < 0)
+ goto err_alloc_groups;
spin_lock_init(&edev->lock);
if (edev->max_supported) {
@@ -1277,13 +1337,14 @@ err_alloc_groups:
kfree(edev->d_attrs_muex);
kfree(edev->attrs_muex);
}
-err_muex:
+err_alloc_muex:
for (index = 0; index < edev->max_supported; index++)
kfree(edev->cables[index].attr_g.name);
-err_alloc_cables:
if (edev->max_supported)
kfree(edev->cables);
-err_sysfs_alloc:
+err_alloc_cables:
+ ida_free(&extcon_dev_ids, edev->id);
+
return ret;
}
EXPORT_SYMBOL_GPL(extcon_dev_register);
@@ -1306,12 +1367,13 @@ void extcon_dev_unregister(struct extcon_dev *edev)
list_del(&edev->entry);
mutex_unlock(&extcon_dev_list_lock);
- if (IS_ERR_OR_NULL(get_device(&edev->dev))) {
- dev_err(&edev->dev, "Failed to unregister extcon_dev (%s)\n",
- dev_name(&edev->dev));
+ if (!get_device(&edev->dev)) {
+ dev_err(&edev->dev, "Failed to unregister extcon_dev\n");
return;
}
+ ida_free(&extcon_dev_ids, edev->id);
+
device_unregister(&edev->dev);
if (edev->mutually_exclusive && edev->max_supported) {
@@ -1349,7 +1411,7 @@ struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(edev, &extcon_dev_list, entry)
- if (edev->dev.parent && edev->dev.parent->of_node == node)
+ if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
goto out;
edev = ERR_PTR(-EPROBE_DEFER);
out:
@@ -1367,21 +1429,17 @@ out:
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
- struct device_node *node;
+ struct device_node *node, *np = dev_of_node(dev);
struct extcon_dev *edev;
- if (!dev)
- return ERR_PTR(-EINVAL);
-
- if (!dev->of_node) {
+ if (!np) {
dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
- node = of_parse_phandle(dev->of_node, "extcon", index);
+ node = of_parse_phandle(np, "extcon", index);
if (!node) {
- dev_dbg(dev, "failed to get phandle in %pOF node\n",
- dev->of_node);
+ dev_dbg(dev, "failed to get phandle in %pOF node\n", np);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
index 93b5e0306966..946182687786 100644
--- a/drivers/extcon/extcon.h
+++ b/drivers/extcon/extcon.h
@@ -13,13 +13,14 @@
* are disabled.
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
* be attached simultaneously. The array should be
- * ending with NULL or be NULL (no mutually exclusive
- * cables). For example, if it is { 0x7, 0x30, 0}, then,
+ * ending with 0 or be NULL (no mutually exclusive cables).
+ * For example, if it is {0x7, 0x30, 0}, then,
* {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
* @dev: Device of this extcon.
+ * @id: Unique device ID of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
* @nh_all: Notifier for the state change events for all supported
@@ -27,7 +28,7 @@
* @nh: Notifier for the state change events from this extcon
* @entry: To support list of extcon devices so that users can
* search for extcon devices based on the extcon name.
- * @lock:
+ * @lock: Protects device state and serialises device registration
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
* customized for each extcon device.
@@ -46,6 +47,7 @@ struct extcon_dev {
/* Internal data. Please do not set. */
struct device dev;
+ unsigned int id;
struct raw_notifier_head nh_all;
struct raw_notifier_head *nh;
struct list_head entry;
diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig
new file mode 100644
index 000000000000..1599e069395f
--- /dev/null
+++ b/drivers/firewire/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_PCI=y
+CONFIG_FIREWIRE=y
+CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index ec00a6f70da8..0a6596b027db 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -18,6 +18,22 @@ config FIREWIRE
To compile this driver as a module, say M here: the module will be
called firewire-core.
+config FIREWIRE_KUNIT_UAPI_TEST
+ tristate "KUnit tests for layout of structure in UAPI" if !KUNIT_ALL_TESTS
+ depends on FIREWIRE && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit tests whether structures exposed to user
+ space have expected layout.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general, refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index e58c8c794778..b24b2879ac34 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -15,3 +15,6 @@ obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
+
+firewire-uapi-test-objs += uapi-test.o
+obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += firewire-uapi-test.o
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2c16ee8fd842..6274b86eb943 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -43,6 +43,7 @@
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
+#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
struct client {
u32 version;
@@ -169,7 +170,10 @@ struct outbound_transaction_event {
struct event event;
struct client *client;
struct outbound_transaction_resource r;
- struct fw_cdev_event_response response;
+ union {
+ struct fw_cdev_event_response without_tstamp;
+ struct fw_cdev_event_response2 with_tstamp;
+ } rsp;
};
struct inbound_transaction_event {
@@ -177,6 +181,7 @@ struct inbound_transaction_event {
union {
struct fw_cdev_event_request request;
struct fw_cdev_event_request2 request2;
+ struct fw_cdev_event_request3 with_tstamp;
} req;
};
@@ -199,12 +204,18 @@ struct outbound_phy_packet_event {
struct event event;
struct client *client;
struct fw_packet p;
- struct fw_cdev_event_phy_packet phy_packet;
+ union {
+ struct fw_cdev_event_phy_packet without_tstamp;
+ struct fw_cdev_event_phy_packet2 with_tstamp;
+ } phy_packet;
};
struct inbound_phy_packet_event {
struct event event;
- struct fw_cdev_event_phy_packet phy_packet;
+ union {
+ struct fw_cdev_event_phy_packet without_tstamp;
+ struct fw_cdev_event_phy_packet2 with_tstamp;
+ } phy_packet;
};
#ifdef CONFIG_COMPAT
@@ -534,41 +545,64 @@ static void release_transaction(struct client *client,
{
}
-static void complete_transaction(struct fw_card *card, int rcode,
- void *payload, size_t length, void *data)
+static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
+ u32 response_tstamp, void *payload, size_t length, void *data)
{
struct outbound_transaction_event *e = data;
- struct fw_cdev_event_response *rsp = &e->response;
struct client *client = e->client;
unsigned long flags;
- if (length < rsp->length)
- rsp->length = length;
- if (rcode == RCODE_COMPLETE)
- memcpy(rsp->data, payload, rsp->length);
-
spin_lock_irqsave(&client->lock, flags);
idr_remove(&client->resource_idr, e->r.resource.handle);
if (client->in_shutdown)
wake_up(&client->tx_flush_wait);
spin_unlock_irqrestore(&client->lock, flags);
- rsp->type = FW_CDEV_EVENT_RESPONSE;
- rsp->rcode = rcode;
+ switch (e->rsp.without_tstamp.type) {
+ case FW_CDEV_EVENT_RESPONSE:
+ {
+ struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
+
+ if (length < rsp->length)
+ rsp->length = length;
+ if (rcode == RCODE_COMPLETE)
+ memcpy(rsp->data, payload, rsp->length);
+
+ rsp->rcode = rcode;
+
+ // In the case that sizeof(*rsp) doesn't align with the position of the
+ // data, and the read is short, preserve an extra copy of the data
+ // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
+ // for short reads and some apps depended on it, this is both safe
+ // and prudent for compatibility.
+ if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
+ queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
+ else
+ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
- /*
- * In the case that sizeof(*rsp) doesn't align with the position of the
- * data, and the read is short, preserve an extra copy of the data
- * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
- * for short reads and some apps depended on it, this is both safe
- * and prudent for compatibility.
- */
- if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
- queue_event(client, &e->event, rsp, sizeof(*rsp),
- rsp->data, rsp->length);
- else
- queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
- NULL, 0);
+ break;
+ }
+ case FW_CDEV_EVENT_RESPONSE2:
+ {
+ struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
+
+ if (length < rsp->length)
+ rsp->length = length;
+ if (rcode == RCODE_COMPLETE)
+ memcpy(rsp->data, payload, rsp->length);
+
+ rsp->rcode = rcode;
+ rsp->request_tstamp = request_tstamp;
+ rsp->response_tstamp = response_tstamp;
+
+ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
/* Drop the idr's reference */
client_put(client);
@@ -579,6 +613,7 @@ static int init_request(struct client *client,
int destination_id, int speed)
{
struct outbound_transaction_event *e;
+ void *payload;
int ret;
if (request->tcode != TCODE_STREAM_DATA &&
@@ -592,14 +627,25 @@ static int init_request(struct client *client,
e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
-
e->client = client;
- e->response.length = request->length;
- e->response.closure = request->closure;
- if (request->data &&
- copy_from_user(e->response.data,
- u64_to_uptr(request->data), request->length)) {
+ if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
+ struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
+
+ rsp->type = FW_CDEV_EVENT_RESPONSE;
+ rsp->length = request->length;
+ rsp->closure = request->closure;
+ payload = rsp->data;
+ } else {
+ struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
+
+ rsp->type = FW_CDEV_EVENT_RESPONSE2;
+ rsp->length = request->length;
+ rsp->closure = request->closure;
+ payload = rsp->data;
+ }
+
+ if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
ret = -EFAULT;
goto failed;
}
@@ -609,10 +655,9 @@ static int init_request(struct client *client,
if (ret < 0)
goto failed;
- fw_send_request(client->device->card, &e->r.transaction,
- request->tcode, destination_id, request->generation,
- speed, request->offset, e->response.data,
- request->length, complete_transaction, e);
+ fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
+ destination_id, request->generation, speed, request->offset,
+ payload, request->length, complete_transaction, e);
return 0;
failed:
@@ -708,7 +753,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
req->handle = r->resource.handle;
req->closure = handler->closure;
event_size0 = sizeof(*req);
- } else {
+ } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
struct fw_cdev_event_request2 *req = &e->req.request2;
req->type = FW_CDEV_EVENT_REQUEST2;
@@ -722,6 +767,21 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
req->handle = r->resource.handle;
req->closure = handler->closure;
event_size0 = sizeof(*req);
+ } else {
+ struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
+
+ req->type = FW_CDEV_EVENT_REQUEST3;
+ req->tcode = tcode;
+ req->offset = offset;
+ req->source_node_id = source;
+ req->destination_node_id = destination;
+ req->card = card->index;
+ req->generation = generation;
+ req->length = length;
+ req->handle = r->resource.handle;
+ req->closure = handler->closure;
+ req->tstamp = fw_request_get_timestamp(request);
+ event_size0 = sizeof(*req);
}
queue_event(handler->client, &e->event,
@@ -1495,26 +1555,61 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
- struct client *e_client;
+ struct client *e_client = e->client;
+ u32 rcode;
switch (status) {
- /* expected: */
- case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
- /* should never happen with PHY packets: */
- case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
+ // expected:
+ case ACK_COMPLETE:
+ rcode = RCODE_COMPLETE;
+ break;
+ // should never happen with PHY packets:
+ case ACK_PENDING:
+ rcode = RCODE_COMPLETE;
+ break;
case ACK_BUSY_X:
case ACK_BUSY_A:
- case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
- case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
- case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
- /* stale generation; cancelled; on certain controllers: no ack */
- default: e->phy_packet.rcode = status; break;
+ case ACK_BUSY_B:
+ rcode = RCODE_BUSY;
+ break;
+ case ACK_DATA_ERROR:
+ rcode = RCODE_DATA_ERROR;
+ break;
+ case ACK_TYPE_ERROR:
+ rcode = RCODE_TYPE_ERROR;
+ break;
+ // stale generation; cancelled; on certain controllers: no ack
+ default:
+ rcode = status;
+ break;
+ }
+
+ switch (e->phy_packet.without_tstamp.type) {
+ case FW_CDEV_EVENT_PHY_PACKET_SENT:
+ {
+ struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
+
+ pp->rcode = rcode;
+ pp->data[0] = packet->timestamp;
+ queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
+ NULL, 0);
+ break;
+ }
+ case FW_CDEV_EVENT_PHY_PACKET_SENT2:
+ {
+ struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
+
+ pp->rcode = rcode;
+ pp->tstamp = packet->timestamp;
+ queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
+ NULL, 0);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ break;
}
- e->phy_packet.data[0] = packet->timestamp;
- e_client = e->client;
- queue_event(e->client, &e->event, &e->phy_packet,
- sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
client_put(e_client);
}
@@ -1528,7 +1623,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
if (!client->device->is_local)
return -ENOSYS;
- e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
+ e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -1541,10 +1636,24 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
e->p.header[2] = a->data[1];
e->p.header_length = 12;
e->p.callback = outbound_phy_packet_callback;
- e->phy_packet.closure = a->closure;
- e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
- if (is_ping_packet(a->data))
- e->phy_packet.length = 4;
+
+ if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
+ struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
+
+ pp->closure = a->closure;
+ pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
+ if (is_ping_packet(a->data))
+ pp->length = 4;
+ } else {
+ struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
+
+ pp->closure = a->closure;
+ pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
+ // Keep the data field so that application can match the response event to the
+ // request.
+ pp->length = sizeof(a->data);
+ memcpy(pp->data, a->data, sizeof(a->data));
+ }
card->driver->send_request(card, &e->p);
@@ -1583,14 +1692,29 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
if (e == NULL)
break;
- e->phy_packet.closure = client->phy_receiver_closure;
- e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
- e->phy_packet.rcode = RCODE_COMPLETE;
- e->phy_packet.length = 8;
- e->phy_packet.data[0] = p->header[1];
- e->phy_packet.data[1] = p->header[2];
- queue_event(client, &e->event,
- &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
+ if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
+ struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
+
+ pp->closure = client->phy_receiver_closure;
+ pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
+ pp->rcode = RCODE_COMPLETE;
+ pp->length = 8;
+ pp->data[0] = p->header[1];
+ pp->data[1] = p->header[2];
+ queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
+ } else {
+ struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
+
+ pp = &e->phy_packet.with_tstamp;
+ pp->closure = client->phy_receiver_closure;
+ pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
+ pp->rcode = RCODE_COMPLETE;
+ pp->length = 8;
+ pp->tstamp = p->timestamp;
+ pp->data[0] = p->header[1];
+ pp->data[1] = p->header[2];
+ queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
+ }
}
spin_unlock_irqrestore(&card->lock, flags);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aa597cda0d88..a3104e35412c 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* without actually having a link.
*/
create:
- device = kzalloc(sizeof(*device), GFP_ATOMIC);
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
if (device == NULL)
break;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index f40c81534381..88466b663482 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
- node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
+ node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL);
if (node == NULL)
return NULL;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index a9f70c96323e..130b95aca629 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -70,8 +70,8 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
return 1;
}
-static int close_transaction(struct fw_transaction *transaction,
- struct fw_card *card, int rcode)
+static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
+ u32 response_tstamp)
{
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
@@ -92,7 +92,12 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (t) {
- t->callback(card, rcode, NULL, 0, t->callback_data);
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
+ NULL, 0, t->callback_data);
+ }
return 0;
}
@@ -107,6 +112,8 @@ static int close_transaction(struct fw_transaction *transaction,
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction)
{
+ u32 tstamp;
+
/*
* Cancel the packet transmission if it's still queued. That
* will call the packet transmission callback which cancels
@@ -121,7 +128,17 @@ int fw_cancel_transaction(struct fw_card *card,
* if the transaction is still pending and remove it in that case.
*/
- return close_transaction(transaction, card, RCODE_CANCELLED);
+ if (transaction->packet.ack == 0) {
+ // The timestamp is reused since it was just read now.
+ tstamp = transaction->packet.timestamp;
+ } else {
+ u32 curr_cycle_time = 0;
+
+ (void)fw_card_read_cycle_time(card, &curr_cycle_time);
+ tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
+ }
+
+ return close_transaction(transaction, card, RCODE_CANCELLED, tstamp);
}
EXPORT_SYMBOL(fw_cancel_transaction);
@@ -140,7 +157,12 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags);
- t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp,
+ t->split_timeout_cycle, NULL, 0, t->callback_data);
+ }
}
static void start_split_transaction_timeout(struct fw_transaction *t,
@@ -162,6 +184,8 @@ static void start_split_transaction_timeout(struct fw_transaction *t,
spin_unlock_irqrestore(&card->lock, flags);
}
+static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -170,28 +194,32 @@ static void transmit_complete_callback(struct fw_packet *packet,
switch (status) {
case ACK_COMPLETE:
- close_transaction(t, card, RCODE_COMPLETE);
+ close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
break;
case ACK_PENDING:
+ {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
start_split_transaction_timeout(t, card);
break;
+ }
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
- close_transaction(t, card, RCODE_BUSY);
+ close_transaction(t, card, RCODE_BUSY, packet->timestamp);
break;
case ACK_DATA_ERROR:
- close_transaction(t, card, RCODE_DATA_ERROR);
+ close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp);
break;
case ACK_TYPE_ERROR:
- close_transaction(t, card, RCODE_TYPE_ERROR);
+ close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp);
break;
default:
/*
* In this case the ack is really a juju specific
* rcode, so just forward that to the callback.
*/
- close_transaction(t, card, status);
+ close_transaction(t, card, status, packet->timestamp);
break;
}
}
@@ -288,7 +316,8 @@ static int allocate_tlabel(struct fw_card *card)
}
/**
- * fw_send_request() - submit a request packet for transmission
+ * __fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction with or without time stamp.
* @card: interface to send the request at
* @t: transaction instance to which the request belongs
* @tcode: transaction code
@@ -298,7 +327,9 @@ static int allocate_tlabel(struct fw_card *card)
* @offset: 48bit wide offset into destination's address space
* @payload: data payload for the request subaction
* @length: length of the payload, in bytes
- * @callback: function to be called when the transaction is completed
+ * @callback: union of two functions whether to receive time stamp or not for response
+ * subaction.
+ * @with_tstamp: Whether to receive time stamp or not for response subaction.
* @callback_data: data to be passed to the transaction completion callback
*
* Submit a request packet into the asynchronous request transmission queue.
@@ -335,10 +366,10 @@ static int allocate_tlabel(struct fw_card *card)
* transaction completion and hence execution of @callback may happen even
* before fw_send_request() returns.
*/
-void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
- int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data)
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data)
{
unsigned long flags;
int tlabel;
@@ -353,7 +384,19 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
tlabel = allocate_tlabel(card);
if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
- callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+ if (!with_tstamp) {
+ callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+ } else {
+ // Timestamping on behalf of hardware.
+ u32 curr_cycle_time = 0;
+ u32 tstamp;
+
+ (void)fw_card_read_cycle_time(card, &curr_cycle_time);
+ tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
+
+ callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0,
+ callback_data);
+ }
return;
}
@@ -361,13 +404,12 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t->tlabel = tlabel;
t->card = card;
t->is_split_transaction = false;
- timer_setup(&t->split_timeout_timer,
- split_transaction_timeout_callback, 0);
+ timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0);
t->callback = callback;
+ t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
- fw_fill_request(&t->packet, tcode, t->tlabel,
- destination_id, card->node_id, generation,
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
@@ -377,7 +419,7 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
card->driver->send_request(card, &t->packet);
}
-EXPORT_SYMBOL(fw_send_request);
+EXPORT_SYMBOL_GPL(__fw_send_request);
struct transaction_callback_data {
struct completion done;
@@ -1047,7 +1089,12 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
*/
card->driver->cancel_packet(card, &t->packet);
- t->callback(card, rcode, data, data_length, t->callback_data);
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data,
+ data_length, t->callback_data);
+ }
}
EXPORT_SYMBOL(fw_core_handle_response);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index eafa4eaae737..2a05f411328f 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -247,6 +247,13 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
void fw_request_get(struct fw_request *request);
void fw_request_put(struct fw_request *request);
+// Convert the value of IEEE 1394 CYCLE_TIME register to the format of timeStamp field in
+// descriptors of 1394 OHCI.
+static inline u32 cycle_time_to_ohci_tstamp(u32 tstamp)
+{
+ return (tstamp & 0x0ffff000) >> 12;
+}
+
#define FW_PHY_CONFIG_NO_NODE_ID -1
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 538bd677c254..7a4d1a478e33 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -479,7 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
- int status;
+ int status, len;
switch (ether_type) {
case ETH_P_ARP:
@@ -533,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
}
skb->protocol = protocol;
}
+
+ len = skb->len;
status = netif_rx(skb);
if (status == NET_RX_DROP) {
net->stats.rx_errors++;
net->stats.rx_dropped++;
} else {
net->stats.rx_packets++;
- net->stats.rx_bytes += skb->len;
+ net->stats.rx_bytes += len;
}
return 0;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 17c9d825188b..7e88fd489741 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -677,6 +677,9 @@ static void ar_context_release(struct ar_context *ctx)
struct device *dev = ctx->ohci->card.device;
unsigned int i;
+ if (!ctx->buffer)
+ return;
+
vunmap(ctx->buffer);
for (i = 0; i < AR_BUFFERS; i++) {
@@ -1105,8 +1108,7 @@ static int context_add_buffer(struct context *ctx)
if (ctx->total_allocation >= 16*1024*1024)
return -ENOMEM;
- desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
- &bus_addr, GFP_ATOMIC);
+ desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@@ -1165,10 +1167,10 @@ static void context_release(struct context *ctx)
struct fw_card *card = &ctx->ohci->card;
struct descriptor_buffer *desc, *tmp;
- list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
- dma_free_coherent(card->device, PAGE_SIZE, desc,
- desc->buffer_bus -
- ((void *)&desc->buffer - (void *)desc));
+ list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
+ dmam_free_coherent(card->device, PAGE_SIZE, desc,
+ desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
+ }
}
/* Must be called with ohci->lock held */
@@ -1623,6 +1625,8 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
+static u32 get_cycle_time(struct fw_ohci *ohci);
+
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
@@ -1633,6 +1637,10 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
ctx->ohci->generation == packet->generation) {
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+ // Timestamping on behalf of the hardware.
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+
handle_local_request(ctx, packet);
return;
}
@@ -1640,9 +1648,12 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
ret = at_context_queue_packet(ctx, packet);
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
- if (ret < 0)
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ if (ret < 0) {
+ // Timestamping on behalf of the hardware.
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->callback(packet, &ctx->ohci->card, packet->ack);
+ }
}
static void detect_dead_context(struct fw_ohci *ohci,
@@ -2044,8 +2055,7 @@ static void bus_reset_work(struct work_struct *work)
spin_unlock_irq(&ohci->lock);
if (free_rom)
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- free_rom, free_rom_bus);
+ dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
log_selfids(ohci, generation, self_id_count);
@@ -2377,10 +2387,8 @@ static int ohci_enable(struct fw_card *card,
*/
if (config_rom) {
- ohci->next_config_rom =
- dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- &ohci->next_config_rom_bus,
- GFP_KERNEL);
+ ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ &ohci->next_config_rom_bus, GFP_KERNEL);
if (ohci->next_config_rom == NULL)
return -ENOMEM;
@@ -2472,9 +2480,8 @@ static int ohci_set_config_rom(struct fw_card *card,
* ohci->next_config_rom to NULL (see bus_reset_work).
*/
- next_config_rom =
- dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- &next_config_rom_bus, GFP_KERNEL);
+ next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ &next_config_rom_bus, GFP_KERNEL);
if (next_config_rom == NULL)
return -ENOMEM;
@@ -2507,9 +2514,10 @@ static int ohci_set_config_rom(struct fw_card *card,
spin_unlock_irq(&ohci->lock);
/* If we didn't use the DMA allocation, delete it. */
- if (next_config_rom != NULL)
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- next_config_rom, next_config_rom_bus);
+ if (next_config_rom != NULL) {
+ dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
+ next_config_rom_bus);
+ }
/*
* Now initiate a bus reset to have the changes take
@@ -2557,6 +2565,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
+
+ // Timestamping on behalf of the hardware.
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
+
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
@@ -3544,6 +3556,19 @@ static inline void pmac_ohci_on(struct pci_dev *dev) {}
static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
+static void release_ohci(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
+
+ pmac_ohci_off(pdev);
+
+ ar_context_release(&ohci->ar_response_ctx);
+ ar_context_release(&ohci->ar_request_ctx);
+
+ dev_notice(dev, "removed fw-ohci device\n");
+}
+
static int pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
@@ -3558,25 +3583,22 @@ static int pci_probe(struct pci_dev *dev,
return -ENOSYS;
}
- ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
- if (ohci == NULL) {
- err = -ENOMEM;
- goto fail;
- }
-
+ ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
+ if (ohci == NULL)
+ return -ENOMEM;
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
-
+ pci_set_drvdata(dev, ohci);
pmac_ohci_on(dev);
+ devres_add(&dev->dev, ohci);
- err = pci_enable_device(dev);
+ err = pcim_enable_device(dev);
if (err) {
dev_err(&dev->dev, "failed to enable OHCI hardware\n");
- goto fail_free;
+ return err;
}
pci_set_master(dev);
pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
- pci_set_drvdata(dev, ohci);
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
@@ -3586,22 +3608,15 @@ static int pci_probe(struct pci_dev *dev,
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
- err = -ENXIO;
- goto fail_disable;
+ return -ENXIO;
}
- err = pci_request_region(dev, 0, ohci_driver_name);
+ err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
if (err) {
- ohci_err(ohci, "MMIO resource unavailable\n");
- goto fail_disable;
- }
-
- ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
- if (ohci->registers == NULL) {
- ohci_err(ohci, "failed to remap registers\n");
- err = -ENXIO;
- goto fail_iomem;
+ ohci_err(ohci, "request and map MMIO resource unavailable\n");
+ return -ENXIO;
}
+ ohci->registers = pcim_iomap_table(dev)[0];
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if ((ohci_quirks[i].vendor == dev->vendor) &&
@@ -3622,34 +3637,30 @@ static int pci_probe(struct pci_dev *dev,
*/
BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
- ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
- PAGE_SIZE,
- &ohci->misc_buffer_bus,
- GFP_KERNEL);
- if (!ohci->misc_buffer) {
- err = -ENOMEM;
- goto fail_iounmap;
- }
+ ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
+ GFP_KERNEL);
+ if (!ohci->misc_buffer)
+ return -ENOMEM;
err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
OHCI1394_AsReqRcvContextControlSet);
if (err < 0)
- goto fail_misc_buf;
+ return err;
err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
OHCI1394_AsRspRcvContextControlSet);
if (err < 0)
- goto fail_arreq_ctx;
+ return err;
err = context_init(&ohci->at_request_ctx, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
- goto fail_arrsp_ctx;
+ return err;
err = context_init(&ohci->at_response_ctx, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
- goto fail_atreq_ctx;
+ return err;
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
@@ -3658,7 +3669,9 @@ static int pci_probe(struct pci_dev *dev,
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ if (!ohci->ir_context_list)
+ return -ENOMEM;
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
@@ -3671,12 +3684,9 @@ static int pci_probe(struct pci_dev *dev,
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = kzalloc(size, GFP_KERNEL);
-
- if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
- err = -ENOMEM;
- goto fail_contexts;
- }
+ ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ if (!ohci->it_context_list)
+ return -ENOMEM;
ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
@@ -3689,17 +3699,16 @@ static int pci_probe(struct pci_dev *dev,
if (!(ohci->quirks & QUIRK_NO_MSI))
pci_enable_msi(dev);
- if (request_irq(dev->irq, irq_handler,
- pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
- ohci_driver_name, ohci)) {
+ err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
+ if (err < 0) {
ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
- err = -EIO;
goto fail_msi;
}
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
- goto fail_irq;
+ goto fail_msi;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
@@ -3712,33 +3721,9 @@ static int pci_probe(struct pci_dev *dev,
return 0;
- fail_irq:
- free_irq(dev->irq, ohci);
fail_msi:
pci_disable_msi(dev);
- fail_contexts:
- kfree(ohci->ir_context_list);
- kfree(ohci->it_context_list);
- context_release(&ohci->at_response_ctx);
- fail_atreq_ctx:
- context_release(&ohci->at_request_ctx);
- fail_arrsp_ctx:
- ar_context_release(&ohci->ar_response_ctx);
- fail_arreq_ctx:
- ar_context_release(&ohci->ar_request_ctx);
- fail_misc_buf:
- dma_free_coherent(ohci->card.device, PAGE_SIZE,
- ohci->misc_buffer, ohci->misc_buffer_bus);
- fail_iounmap:
- pci_iounmap(dev, ohci->registers);
- fail_iomem:
- pci_release_region(dev, 0);
- fail_disable:
- pci_disable_device(dev);
- fail_free:
- kfree(ohci);
- pmac_ohci_off(dev);
- fail:
+
return err;
}
@@ -3763,30 +3748,10 @@ static void pci_remove(struct pci_dev *dev)
*/
software_reset(ohci);
- free_irq(dev->irq, ohci);
-
- if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->next_config_rom, ohci->next_config_rom_bus);
- if (ohci->config_rom)
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->config_rom, ohci->config_rom_bus);
- ar_context_release(&ohci->ar_request_ctx);
- ar_context_release(&ohci->ar_response_ctx);
- dma_free_coherent(ohci->card.device, PAGE_SIZE,
- ohci->misc_buffer, ohci->misc_buffer_bus);
- context_release(&ohci->at_request_ctx);
- context_release(&ohci->at_response_ctx);
- kfree(ohci->it_context_list);
- kfree(ohci->ir_context_list);
+
pci_disable_msi(dev);
- pci_iounmap(dev, ohci->registers);
- pci_release_region(dev, 0);
- pci_disable_device(dev);
- kfree(ohci);
- pmac_ohci_off(dev);
- dev_notice(&dev->dev, "removed fw-ohci device\n");
+ dev_notice(&dev->dev, "removing fw-ohci device\n");
}
#ifdef CONFIG_PM
diff --git a/drivers/firewire/uapi-test.c b/drivers/firewire/uapi-test.c
new file mode 100644
index 000000000000..2fcbede4fab1
--- /dev/null
+++ b/drivers/firewire/uapi-test.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// uapi_test.c - An application of Kunit to check layout of structures exposed to user space for
+// FireWire subsystem.
+//
+// Copyright (c) 2023 Takashi Sakamoto
+
+#include <kunit/test.h>
+#include <linux/firewire-cdev.h>
+
+// Known issue added at v2.6.27 kernel.
+static void structure_layout_event_response(struct kunit *test)
+{
+#if defined(CONFIG_X86_32)
+ // 4 bytes alignment for aggregate type including 8 bytes storage types.
+ KUNIT_EXPECT_EQ(test, 20, sizeof(struct fw_cdev_event_response));
+#else
+ // 8 bytes alignment for aggregate type including 8 bytes storage types.
+ KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_response));
+#endif
+
+ KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response, closure));
+ KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response, type));
+ KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response, rcode));
+ KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response, length));
+ KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response, data));
+}
+
+// Added at v6.5.
+static void structure_layout_event_request3(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 56, sizeof(struct fw_cdev_event_request3));
+
+ KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_request3, closure));
+ KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_request3, type));
+ KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_request3, tcode));
+ KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_request3, offset));
+ KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_request3, source_node_id));
+ KUNIT_EXPECT_EQ(test, 28, offsetof(struct fw_cdev_event_request3, destination_node_id));
+ KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_request3, card));
+ KUNIT_EXPECT_EQ(test, 36, offsetof(struct fw_cdev_event_request3, generation));
+ KUNIT_EXPECT_EQ(test, 40, offsetof(struct fw_cdev_event_request3, handle));
+ KUNIT_EXPECT_EQ(test, 44, offsetof(struct fw_cdev_event_request3, length));
+ KUNIT_EXPECT_EQ(test, 48, offsetof(struct fw_cdev_event_request3, tstamp));
+ KUNIT_EXPECT_EQ(test, 56, offsetof(struct fw_cdev_event_request3, data));
+}
+
+// Added at v6.5.
+static void structure_layout_event_response2(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 32, sizeof(struct fw_cdev_event_response2));
+
+ KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response2, closure));
+ KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response2, type));
+ KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response2, rcode));
+ KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response2, length));
+ KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response2, request_tstamp));
+ KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_response2, response_tstamp));
+ KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_response2, data));
+}
+
+// Added at v6.5.
+static void structure_layout_event_phy_packet2(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_phy_packet2));
+
+ KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_phy_packet2, closure));
+ KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_phy_packet2, type));
+ KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_phy_packet2, rcode));
+ KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_phy_packet2, length));
+ KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_phy_packet2, tstamp));
+ KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_phy_packet2, data));
+}
+
+static struct kunit_case structure_layout_test_cases[] = {
+ KUNIT_CASE(structure_layout_event_response),
+ KUNIT_CASE(structure_layout_event_request3),
+ KUNIT_CASE(structure_layout_event_response2),
+ KUNIT_CASE(structure_layout_event_phy_packet2),
+ {}
+};
+
+static struct kunit_suite structure_layout_test_suite = {
+ .name = "firewire-uapi-structure-layout",
+ .test_cases = structure_layout_test_cases,
+};
+kunit_test_suite(structure_layout_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 03708ab64e56..8d91997036e4 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -310,6 +310,7 @@ static const struct kobj_type dmi_system_event_log_ktype = {
.default_groups = dmi_sysfs_sel_groups,
};
+#ifdef CONFIG_HAS_IOPORT
typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
loff_t offset);
@@ -374,6 +375,7 @@ static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
return wrote;
}
+#endif
static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
const struct dmi_system_event_log *sel,
@@ -409,11 +411,13 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
memcpy(&sel, dh, sizeof(sel));
switch (sel.access_method) {
+#ifdef CONFIG_HAS_IOPORT
case DMI_SEL_ACCESS_METHOD_IO8:
case DMI_SEL_ACCESS_METHOD_IO2x8:
case DMI_SEL_ACCESS_METHOD_IO16:
return dmi_sel_raw_read_io(entry, &sel, state->buf,
state->pos, state->count);
+#endif
case DMI_SEL_ACCESS_METHOD_PHYS32:
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 80f4e2d14e04..2d674126160f 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -755,7 +755,7 @@ svc_create_memory_pool(struct platform_device *pdev,
end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
paddr = begin;
size = end - begin;
- va = memremap(paddr, size, MEMREMAP_WC);
+ va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
if (!va) {
dev_err(dev, "fail to remap shared memory\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 99606b34975e..8528850af889 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2014-2018 Xilinx, Inc.
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
diff --git a/drivers/firmware/xilinx/zynqmp-debug.h b/drivers/firmware/xilinx/zynqmp-debug.h
index 9929f8b433f5..e1515a93e9e9 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.h
+++ b/drivers/firmware/xilinx/zynqmp-debug.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2014-2018 Xilinx
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 9e585b5646df..f8c4eb2b43f8 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2014-2022 Xilinx, Inc.
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 77ea04d4edbe..bcb5d34b3b82 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -265,7 +265,7 @@ static const struct hwmon_ops thermal_hwmon_ops = {
.read = thermal_hwmon_read,
};
-static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
HWMON_T_MAX | HWMON_T_MAX_ALARM |
HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
@@ -465,7 +465,7 @@ static const struct hwmon_ops power_hwmon_ops = {
.write = power_hwmon_write,
};
-static const struct hwmon_channel_info *power_hwmon_info[] = {
+static const struct hwmon_channel_info * const power_hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
HWMON_P_MAX | HWMON_P_MAX_ALARM |
HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index d7e2f9f461bc..31af2e08c825 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -376,12 +376,11 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
u32 doorbell_reg, progress, status;
int ret, err;
- ret = regmap_update_bits(sec->m10bmc->regmap,
- csr_map->base + csr_map->doorbell,
- DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
- DRBL_RSU_REQUEST |
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_IDLE));
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
+ DRBL_RSU_REQUEST |
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_IDLE));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -450,11 +449,10 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
u32 doorbell_reg, status;
int ret;
- ret = regmap_update_bits(sec->m10bmc->regmap,
- csr_map->base + csr_map->doorbell,
- DRBL_HOST_STATUS,
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_WRITE_DONE));
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_WRITE_DONE));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -517,11 +515,10 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
if (rsu_prog(doorbell) != RSU_PROG_READY)
return FW_UPLOAD_ERR_BUSY;
- ret = regmap_update_bits(sec->m10bmc->regmap,
- csr_map->base + csr_map->doorbell,
- DRBL_HOST_STATUS,
- FIELD_PREP(DRBL_HOST_STATUS,
- HOST_STATUS_ABORT_RSU));
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_ABORT_RSU));
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -547,21 +544,28 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
if (ret != FW_UPLOAD_ERR_NONE)
goto unlock_flash;
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PREPARE);
+
ret = rsu_update_init(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- goto unlock_flash;
+ goto fw_state_exit;
ret = rsu_prog_ready(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- goto unlock_flash;
+ goto fw_state_exit;
if (sec->cancel_request) {
ret = rsu_cancel(sec);
- goto unlock_flash;
+ goto fw_state_exit;
}
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_WRITE);
+
return FW_UPLOAD_ERR_NONE;
+fw_state_exit:
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
unlock_flash:
if (sec->m10bmc->flash_bulk_ops)
sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
@@ -610,6 +614,8 @@ static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
if (sec->cancel_request)
return rsu_cancel(sec);
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM);
+
result = rsu_send_data(sec);
if (result != FW_UPLOAD_ERR_NONE)
return result;
@@ -653,6 +659,8 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
(void)rsu_cancel(sec);
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
if (sec->m10bmc->flash_bulk_ops)
sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
}
@@ -764,3 +772,4 @@ module_platform_driver(intel_m10bmc_sec_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index ae0da361e6c6..f8214cae9b6e 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -493,15 +493,15 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
if (err)
return err;
- /* Release 'PR' control back to the ICAP */
- zynq_fpga_write(priv, CTRL_OFFSET,
- zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
-
err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
intr_status & IXR_PCFG_DONE_MASK,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
+ /* Release 'PR' control back to the ICAP */
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
+
clk_disable(priv->clk);
if (err)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a84bd4a0c421..2f9c14aca73c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -286,6 +286,9 @@ extern int amdgpu_user_partt_mode;
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
+/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
+#define AMDGPU_SWCTF_EXTRA_DELAY 50
+
struct amdgpu_xcp_mgr;
struct amdgpu_device;
struct amdgpu_irq_src;
@@ -1277,9 +1280,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
-#define for_each_inst(i, inst_mask) \
- for (i = ffs(inst_mask) - 1; inst_mask; \
- inst_mask &= ~(1U << i), i = ffs(inst_mask) - 1)
+#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
+#define for_each_inst(i, inst_mask) \
+ for (i = ffs(inst_mask); i-- != 0; \
+ i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9ba4817a9148..f4e3c133a16c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1791,6 +1791,15 @@ const struct attribute_group amdgpu_vbios_version_attr_group = {
.attrs = amdgpu_vbios_version_attrs
};
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context)
+ return devm_device_add_group(adev->dev,
+ &amdgpu_vbios_version_attr_group);
+
+ return 0;
+}
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 4153d520e2a3..b639a80ee3fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -217,5 +217,6 @@ int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
void amdgpu_atombios_fini(struct amdgpu_device *adev);
int amdgpu_atombios_init(struct amdgpu_device *adev);
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ef4b9a41f20a..0b7f4c4d58e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -327,10 +327,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = igp_info->v11.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
@@ -345,10 +348,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = igp_info->v21.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d9503882ea97..040f4cb6ab2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -136,9 +136,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and two for the CS job */
- p->uf_entry.tv.num_shared = 3;
-
drm_gem_object_put(gobj);
size = amdgpu_bo_size(bo);
@@ -912,15 +909,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
mutex_lock(&p->bo_list->bo_list_mutex);
- /* One for TTM and one for the CS job */
+ /* One for TTM and one for each CS job */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->tv.num_shared = 2;
+ e->tv.num_shared = 1 + p->gang_size;
+ p->uf_entry.tv.num_shared = 1 + p->gang_size;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
+ /* Two for VM updates, one for TTM and one for each CS job */
+ p->vm_pd.tv.num_shared = 3 + p->gang_size;
+
if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
list_add(&p->uf_entry.tv.head, &p->validated);
@@ -1653,15 +1654,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
+
dma_fence_put(fence);
if (r < 0)
return r;
if (r == 0)
break;
-
- if (fence->error)
- return fence->error;
}
memset(wait, 0, sizeof(*wait));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e25f085ee886..a92c6189b4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2552,7 +2552,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
- if (amdgpu_mcbp) {
+ if (adev->gfx.mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -3673,6 +3673,23 @@ static const struct attribute *amdgpu_dev_attributes[] = {
NULL
};
+static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+{
+ if (amdgpu_mcbp == 1)
+ adev->gfx.mcbp = true;
+
+ if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
+ adev->gfx.num_gfx_rings)
+ adev->gfx.mcbp = true;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.mcbp = true;
+
+ if (adev->gfx.mcbp)
+ DRM_INFO("MCBP is enabled\n");
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -3824,9 +3841,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- if (amdgpu_mcbp)
- DRM_INFO("MCBP is enabled\n");
-
/*
* Reset domain needs to be present early, before XGMI hive discovered
* (if any) and intitialized to use reset sem and in_gpu reset flag
@@ -3852,6 +3866,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_device_set_mcbp(adev);
+
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
@@ -4018,6 +4034,11 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
r = amdgpu_pm_sysfs_init(adev);
if (r)
DRM_ERROR("registering pm sysfs failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 3b711babd4e2..0593ef8fe0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -180,7 +180,7 @@ uint amdgpu_dc_feature_mask = 2;
uint amdgpu_dc_debug_mask;
uint amdgpu_dc_visual_confirm;
int amdgpu_async_gfx_ring = 1;
-int amdgpu_mcbp;
+int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_mes_kiq;
@@ -634,10 +634,10 @@ module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
/**
* DOC: mcbp (int)
- * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled)
+ * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default))
*/
MODULE_PARM_DESC(mcbp,
- "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
+ "Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)");
module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/**
@@ -2899,12 +2899,10 @@ static struct pci_error_handlers amdgpu_pci_err_handler = {
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
-extern const struct attribute_group amdgpu_vbios_version_attr_group;
static const struct attribute_group *amdgpu_sysfs_groups[] = {
&amdgpu_vram_mgr_attr_group,
&amdgpu_gtt_mgr_attr_group,
- &amdgpu_vbios_version_attr_group,
NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ce0f7a8ad4b8..a4ff515ce896 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -434,6 +434,7 @@ struct amdgpu_gfx {
uint16_t xcc_mask;
uint32_t num_xcc_per_xcp;
struct mutex partition_mutex;
+ bool mcbp; /* mid command buffer preemption */
};
struct amdgpu_gfx_ras_reg_entry {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 3add4b4f0667..2ff2897fd1db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -255,7 +255,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1 << i) ||
+ !adev->jpeg.inst[i].ras_poison_irq.funcs)
continue;
r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e3531aa3c8bd..cca5a495611f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -805,7 +805,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
- if (amdgpu_mcbp)
+ if (adev->gfx.mcbp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
@@ -1247,7 +1247,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm;
}
- if (amdgpu_mcbp) {
+ if (adev->gfx.mcbp) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e15c27e05564..6d676bdd1505 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -839,6 +839,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
default:
@@ -2039,6 +2040,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
}
return 0;
@@ -3703,7 +3706,6 @@ static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
{
int ret = 0;
- struct psp_context *psp = &adev->psp;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3712,10 +3714,6 @@ int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
- if (!psp->adev) {
- psp->adev = adev;
- psp_v13_0_set_psp_funcs(psp);
- }
ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr);
if (ret)
dev_err(adev->dev, "Failed to create device file psp_vbflash");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 12010c988c8b..123bcf5c2bb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -116,7 +116,6 @@ static const struct file_operations amdgpu_rap_debugfs_ops = {
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
if (!adev->psp.rap_context.context.initialized)
@@ -124,5 +123,4 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
adev, &amdgpu_rap_debugfs_ops);
-#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4769a18304d7..8aaa427f8c0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2065,6 +2065,14 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
reset_context.method = AMD_RESET_METHOD_MODE2;
}
+
+ /* Fatal error occurs in poison mode, mode1 reset is used to
+ * recover gpu.
+ */
+ if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
+ ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ }
}
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
@@ -2955,9 +2963,12 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
dev_info(adev->dev, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 46bf1889a9d7..ffb49b2d533a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -340,6 +340,7 @@ enum amdgpu_ras_ret {
#define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2)
#define AMDGPU_RAS_GPU_RESET_MODE2_RESET (0x1 << 0)
+#define AMDGPU_RAS_GPU_RESET_MODE1_RESET (0x1 << 1)
struct amdgpu_ras_err_status_reg_entry {
uint32_t hwip;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 73516abef662..b779ee4bbaa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -423,6 +423,9 @@ void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mu
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
unsigned offset;
+ if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ return;
+
offset = ring->wptr & ring->buf_mask;
amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 78ec3420ef85..dacf281d2b21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -72,7 +72,7 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
int r;
/* don't enable OS preemption on SDMA under SRIOV */
- if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
if (ring->is_mes_queue) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index acbef1a24b9c..ae455aab5d29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1198,7 +1198,8 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
+ if (adev->vcn.harvest_config & (1 << i) ||
+ !adev->vcn.inst[i].ras_poison_irq.funcs)
continue;
r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 25b4d7f0bd35..41aa853a07d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -66,9 +66,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->cg_flags = 0;
adev->pg_flags = 0;
- /* enable mcbp for sriov */
- amdgpu_mcbp = 1;
-
/* Reduce kcq number to 2 to reduce latency */
if (amdgpu_num_kcq == -1)
amdgpu_num_kcq = 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 143d11afe0e5..291977b93b1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1771,18 +1771,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
+ struct amdgpu_bo *bo = before->bo_va->base.bo;
+
amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+ !before->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&before->bo_va->base);
} else {
kfree(before);
}
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
+ struct amdgpu_bo *bo = after->bo_va->base.bo;
+
amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+ !after->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&after->bo_va->base);
} else {
kfree(after);
}
@@ -2233,16 +2245,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
return r;
- /* Sanity checks */
- if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
- r = -EINVAL;
- goto unreserve_bo;
- }
-
/* Check if PD needs to be reinitialized and do it before
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
+ /* Sanity checks */
+ if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
+ r = -EINVAL;
+ goto unreserve_bo;
+ }
+
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index d733fa6e7477..d175e862f222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -132,6 +132,9 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
for (i = 0; i < MAX_XCP; ++i)
xcp_mgr->xcp[i].valid = false;
+ /* This is needed for figuring out memory id of xcp */
+ xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
+
for (i = 0; i < num_xcps; ++i) {
for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
@@ -157,7 +160,6 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
xcp_mgr->num_xcps = num_xcps;
amdgpu_xcp_update_partition_sched_list(adev);
- xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
return 0;
}
@@ -232,7 +234,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
ddev = adev_to_drm(adev);
- for (i = 0; i < MAX_XCP; i++) {
+ /* xcp #0 shares drm device setting with adev */
+ adev->xcp_mgr->xcp->ddev = ddev;
+
+ for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
if (ret)
return ret;
@@ -322,7 +327,7 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
if (!adev->xcp_mgr)
return 0;
- for (i = 0; i < MAX_XCP; i++) {
+ for (i = 1; i < MAX_XCP; i++) {
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
if (ret)
return ret;
@@ -339,7 +344,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
if (!adev->xcp_mgr)
return;
- for (i = 0; i < MAX_XCP; i++) {
+ for (i = 1; i < MAX_XCP; i++) {
p_ddev = adev->xcp_mgr->xcp[i].ddev;
drm_dev_unplug(p_ddev);
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index be984f8c71c7..44af8022b89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -8307,7 +8307,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
@@ -8482,7 +8482,7 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (ring->adev->gfx.mcbp)
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 690e121d9dda..3a7af59e83ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -5311,7 +5311,7 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c1ee54d4c3d3..9e3b835bdbb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -623,12 +623,28 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
int num_xccs_per_xcp)
{
- int ret;
-
- ret = psp_spatial_partition(&adev->psp, NUM_XCC(adev->gfx.xcc_mask) /
- num_xccs_per_xcp);
- if (ret)
- return ret;
+ int ret, i, num_xcc;
+ u32 tmp = 0;
+
+ if (adev->psp.funcs) {
+ ret = psp_spatial_partition(&adev->psp,
+ NUM_XCC(adev->gfx.xcc_mask) /
+ num_xccs_per_xcp);
+ if (ret)
+ return ret;
+ } else {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ for (i = 0; i < num_xcc; i++) {
+ tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
+ num_xccs_per_xcp);
+ tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
+ i % num_xccs_per_xcp);
+ WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
+ tmp);
+ }
+ ret = 0;
+ }
adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
@@ -1762,6 +1778,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
+ if (amdgpu_sriov_vf(adev) && adev->in_suspend)
+ amdgpu_ring_clear_ring(ring);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
@@ -1960,6 +1978,16 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
if (amdgpu_gfx_disable_kcq(adev, xcc_id))
DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
+ if (amdgpu_sriov_vf(adev)) {
+ /* must disable polling for SRIOV when hw finished, otherwise
+ * CPC engine may still keep fetching WB address which is already
+ * invalid after sw finished and trigger DMAR reading error in
+ * hypervisor side.
+ */
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
+ return;
+ }
+
/* Use deinitialize sequence from CAIL when unbinding device
* from driver, otherwise KIQ is hanging when binding back
*/
@@ -1984,7 +2012,8 @@ static int gfx_v9_4_3_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v9_4_3_init_golden_registers(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfx_v9_4_3_init_golden_registers(adev);
gfx_v9_4_3_constants_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index aa761ff3a5fa..4038455d7998 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -345,8 +345,8 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
-#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
-#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
+#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x0000000A // 1=1us, 9=1ms, 10=4ms
+#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms
static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
bool enable)
@@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
- data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
- data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
- data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ if (pci_is_thunderbolt_attached(adev->pdev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index ea5e12390d18..f413898dda37 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -578,6 +578,9 @@ static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
return;
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
+ return;
+
for_each_inst(i, inst_mask) {
f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
@@ -899,15 +902,12 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
WREG32_SDMA(i, regSDMA_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
-
- /* unhalt engine */
- temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
- temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
- WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b48bb5212488..259795098173 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1424,8 +1424,10 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
*/
static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9d4abfd8b55e..0b3dc754e06b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -138,9 +138,12 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(9, 4, 0): /* VEGA20 */
case IP_VERSION(9, 4, 1): /* ARCTURUS */
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
- case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
break;
+ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
+ kfd->device_info.event_interrupt_class =
+ &event_interrupt_class_v9_4_3;
+ break;
case IP_VERSION(10, 3, 1): /* VANGOGH */
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
@@ -518,6 +521,7 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x30) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
&& kfd->mec2_fw_version >= 0x28) ||
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b))))
@@ -598,6 +602,41 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
}
}
+static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
+ unsigned int kfd_node_idx)
+{
+ struct amdgpu_device *adev = node->adev;
+ uint32_t xcc_mask = node->xcc_mask;
+ uint32_t xcc, mapped_xcc;
+ /*
+ * Interrupt bitmap is setup for processing interrupts from
+ * different XCDs and AIDs.
+ * Interrupt bitmap is defined as follows:
+ * 1. Bits 0-15 - correspond to the NodeId field.
+ * Each bit corresponds to NodeId number. For example, if
+ * a KFD node has interrupt bitmap set to 0x7, then this
+ * KFD node will process interrupts with NodeId = 0, 1 and 2
+ * in the IH cookie.
+ * 2. Bits 16-31 - unused.
+ *
+ * Please note that the kfd_node_idx argument passed to this
+ * function is not related to NodeId field received in the
+ * IH cookie.
+ *
+ * In CPX mode, a KFD node will process an interrupt if:
+ * - the Node Id matches the corresponding bit set in
+ * Bits 0-15.
+ * - AND VMID reported in the interrupt lies within the
+ * VMID range of the node.
+ */
+ for_each_inst(xcc, xcc_mask) {
+ mapped_xcc = GET_INST(GC, xcc);
+ node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
+ }
+ dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
+ node->interrupt_bitmap);
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
@@ -797,6 +836,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
amdgpu_amdkfd_get_local_mem_info(kfd->adev,
&node->local_mem_info, node->xcp);
+ if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3))
+ kfd_setup_interrupt_bitmap(node, i);
+
/* Initialize the KFD node */
if (kfd_init_node(node)) {
dev_err(kfd_device, "Error initializing KFD node\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index d5c9f30552e3..f0731a6a5306 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -446,7 +446,36 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
}
}
+static bool event_interrupt_isr_v9_4_3(struct kfd_node *node,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+{
+ uint16_t node_id, vmid;
+
+ /*
+ * For GFX 9.4.3, process the interrupt if:
+ * - NodeID field in IH entry matches the corresponding bit
+ * set in interrupt_bitmap Bits 0-15.
+ * OR
+ * - If partition mode is CPX and interrupt came from
+ * Node_id 0,4,8,12, then check if the Bit (16 + client id)
+ * is set in interrupt bitmap Bits 16-31.
+ */
+ node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ if (kfd_irq_is_from_node(node, node_id, vmid))
+ return event_interrupt_isr_v9(node, ih_ring_entry,
+ patched_ihre, patched_flag);
+ return false;
+}
+
const struct kfd_event_interrupt_class event_interrupt_class_v9 = {
.interrupt_isr = event_interrupt_isr_v9,
.interrupt_wq = event_interrupt_wq_v9,
};
+
+const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = {
+ .interrupt_isr = event_interrupt_isr_v9_4_3,
+ .interrupt_wq = event_interrupt_wq_v9,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 7364a5d77c6e..d4c9ee3f9953 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1444,6 +1444,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
+extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3;
extern const struct kfd_event_interrupt_class event_interrupt_class_v10;
extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 3d3611705d41..a844e68211ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -2142,6 +2142,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
{
uint32_t irq_drain_fence[8];
+ uint8_t node_id = 0;
int r = 0;
if (!KFD_IS_SOC15(pdd->dev))
@@ -2154,6 +2155,14 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
KFD_IRQ_FENCE_CLIENTID;
irq_drain_fence[3] = pdd->process->pasid;
+ /*
+ * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
+ */
+ if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
+ node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
+ irq_drain_fence[3] |= node_id << 16;
+ }
+
/* ensure stale irqs scheduled KFD interrupts and send drain fence. */
if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
irq_drain_fence)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 9ad1a2186a24..ba9d69054119 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -123,16 +123,24 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
- if (gws)
- ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
- gws, &mem);
- else
- ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
- pqn->q->gws);
- if (unlikely(ret))
- return ret;
+ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) {
+ if (gws)
+ ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
+ gws, &mem);
+ else
+ ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
+ pqn->q->gws);
+ if (unlikely(ret))
+ return ret;
+ pqn->q->gws = mem;
+ } else {
+ /*
+ * Intentionally set GWS to a non-NULL value
+ * for GFX 9.4.3.
+ */
+ pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
+ }
- pqn->q->gws = mem;
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
@@ -164,7 +172,8 @@ void pqm_uninit(struct process_queue_manager *pqm)
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->gws)
+ if (pqn->q && pqn->q->gws &&
+ KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
kfd_procfs_del_queue(pqn->q);
@@ -446,8 +455,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q->gws) {
- amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
- pqn->q->gws);
+ if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
+ amdgpu_amdkfd_remove_gws_from_process(
+ pqm->process->kgd_process_info,
+ pqn->q->gws);
pdd->qpd.num_gws = 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 90b86a6ac7bd..61fc62f3e003 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -2107,6 +2107,10 @@ int kfd_topology_add_device(struct kfd_node *gpu)
if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
+ if (dev->gpu->adev->gmc.is_app_apu ||
+ dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
+
kfd_debug_print_topology();
kfd_notify_gpu_change(gpu_id, 1);
diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
index e3f3b0b93a59..10138676f27f 100644
--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
@@ -40,6 +40,7 @@
#define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf)
#define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1)
#define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff)
+#define SOC15_NODEID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) >> 16 & 0xff)
#define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4]))
#define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5]))
#define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6]))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 514f6785a020..ff0a217b9d56 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5063,11 +5063,7 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
s32 y, s32 width, s32 height,
int *i, bool ffu)
{
- if (*i > DC_MAX_DIRTY_RECTS)
- return;
-
- if (*i == DC_MAX_DIRTY_RECTS)
- goto out;
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
dirty_rect->x = x;
dirty_rect->y = y;
@@ -5083,7 +5079,6 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
plane->base.id, x, y, width, height);
-out:
(*i)++;
}
@@ -5170,6 +5165,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
*dirty_regions_changed = bb_changed;
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
@@ -5199,9 +5197,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
new_plane_state->crtc_h, &i, false);
}
- if (i > DC_MAX_DIRTY_RECTS)
- goto ffu;
-
flip_addrs->dirty_rect_count = i;
return;
@@ -7258,13 +7253,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
- /* most eDP supports only timings from its edid,
- * usually only detailed timings are available
- * from eDP edid. timings which are not from edid
- * may damage eDP
- */
- if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 5ea3284b2b77..d63ee636483b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -336,6 +336,153 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
return size;
}
+static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector)
+{
+ bool is_end_device = false;
+ struct drm_dp_mst_topology_mgr *mgr = NULL;
+ struct drm_dp_mst_port *port = NULL;
+
+ if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) {
+ mgr = &aconnector->mst_root->mst_mgr;
+ port = aconnector->mst_output_port;
+
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
+ port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV)
+ is_end_device = true;
+ drm_modeset_unlock(&mgr->base.lock);
+ }
+
+ return is_end_device;
+}
+
+/* Change MST link setting
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ * 3E8h = 10.0Gbps per lane
+ * 546h = 13.5Gbps per lane
+ * 7D0h = 20.0Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings
+ *
+ * for example, to force to 2 lane, 10.0GHz,
+ * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings
+ *
+ * Valid input will trigger hotplug event to get new link setting applied
+ * Invalid input will trigger training setting reset
+ *
+ * The usage can be referred to link_settings entry
+ *
+ */
+static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ const uint32_t wr_buf_size = 40;
+ /* 0: lane_count; 1: link_rate */
+ int max_param_num = 2;
+ uint8_t param_nums = 0;
+ long param[2];
+ bool valid_input = true;
+
+ if (!dp_mst_is_end_device(aconnector))
+ return -EINVAL;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -ENOSPC;
+
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ case LINK_RATE_UHBR10:
+ case LINK_RATE_UHBR13_5:
+ case LINK_RATE_UHBR20:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
+ mutex_unlock(&adev->dm.dc_lock);
+ return -EINVAL;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.use_link_rate_set = false;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ /* skip immediate retrain, and train to new link setting after hotplug event triggered */
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ mutex_lock(&aconnector->base.dev->mode_config.mutex);
+ aconnector->base.force = DRM_FORCE_OFF;
+ mutex_unlock(&aconnector->base.dev->mode_config.mutex);
+ drm_kms_helper_hotplug_event(aconnector->base.dev);
+
+ msleep(100);
+
+ mutex_lock(&aconnector->base.dev->mode_config.mutex);
+ aconnector->base.force = DRM_FORCE_UNSPECIFIED;
+ mutex_unlock(&aconnector->base.dev->mode_config.mutex);
+ drm_kms_helper_hotplug_event(aconnector->base.dev);
+
+ kfree(wr_buf);
+ return size;
+}
+
/* function: get current DP PHY settings: voltage swing, pre-emphasis,
* post-cursor2 (defined by VESA DP specification)
*
@@ -2668,6 +2815,12 @@ static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = {
.llseek = default_llseek
};
+static const struct file_operations dp_mst_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_mst_link_setting,
+ .llseek = default_llseek
+};
+
static const struct {
char *name;
const struct file_operations *fops;
@@ -2691,7 +2844,8 @@ static const struct {
{"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},
{"is_mst_connector", &dp_is_mst_connector_fops},
{"mst_progress_status", &dp_mst_progress_status_fops},
- {"is_dpia_link", &is_dpia_link_fops}
+ {"is_dpia_link", &is_dpia_link_fops},
+ {"mst_link_settings", &dp_mst_link_settings_debugfs_fops}
};
static const struct {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index cd20cfc04996..d9a482908380 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -44,6 +44,30 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+static u32 edid_extract_panel_id(struct edid *edid)
+{
+ return (u32)edid->mfg_id[0] << 24 |
+ (u32)edid->mfg_id[1] << 16 |
+ (u32)EDID_PRODUCT_ID(edid);
+}
+
+static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+{
+ uint32_t panel_id = edid_extract_panel_id(edid);
+
+ switch (panel_id) {
+ /* Workaround for some monitors which does not work well with FAMS */
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
+ DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.disable_fams = true;
+ break;
+ default:
+ return;
+ }
+}
+
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@@ -115,6 +139,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+ apply_edid_quirks(edid_buf, edid_caps);
+
kfree(sads);
kfree(sadb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index d647f68fd563..4f61d4f257cd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -24,6 +24,7 @@
*/
#include "amdgpu_dm_psr.h"
+#include "dc_dmub_srv.h"
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
@@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
- return true;
+ return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 6a811755e2e6..cb992aca760d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -541,9 +541,18 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ if (!clk_mgr_base->clks.p_state_change_support) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled) {
+ /* On DCN32x we will never have the functional UCLK min above the softmax
+ * since we calculate mode support based on softmax being the max UCLK
+ * frequency.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ } else {
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ }
+ }
}
/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
@@ -808,8 +817,7 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
if (!clk_mgr->smu_present)
return;
- dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->max_memclk_mhz);
}
/* Get current memclk states, update bounding box */
@@ -827,6 +835,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
&num_entries_per_clk->num_memclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz;
/* memclk must have at least one level */
num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1;
@@ -841,7 +850,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
} else {
num_levels = num_entries_per_clk->num_fclk_levels;
}
-
+ clk_mgr_base->bw_params->max_memclk_mhz =
+ clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
@@ -894,6 +904,25 @@ static bool dcn32_is_smu_present(struct clk_mgr *clk_mgr_base)
return clk_mgr->smu_present;
}
+static void dcn32_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+
+static void dcn32_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
static struct clk_mgr_funcs dcn32_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
@@ -904,6 +933,8 @@ static struct clk_mgr_funcs dcn32_funcs = {
.notify_wm_ranges = dcn32_notify_wm_ranges,
.set_hard_min_memclk = dcn32_set_hard_min_memclk,
.set_hard_max_memclk = dcn32_set_hard_max_memclk,
+ .set_max_memclk = dcn32_set_max_memclk,
+ .set_min_memclk = dcn32_set_min_memclk,
.get_memclk_states_from_smu = dcn32_get_memclk_states_from_smu,
.are_clock_states_equal = dcn32_are_clock_states_equal,
.enable_pme_wa = dcn32_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dd3a9d06c6e2..d133e4186a52 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1629,6 +1629,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
+ if (dc->debug.force_odm_combine)
+ return false;
+
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
@@ -3577,6 +3580,13 @@ static void commit_planes_for_stream_fast(struct dc *dc,
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
+ /* Clear update flags so next flip doesn't have redundant programming
+ * (if there's no stream update, the update flags are not cleared).
+ */
+ if (top_pipe_to_program->plane_state)
+ top_pipe_to_program->plane_state->update_flags.raw = 0;
+ if (top_pipe_to_program->stream)
+ top_pipe_to_program->stream->update_flags.raw = 0;
}
static void commit_planes_for_stream(struct dc *dc,
@@ -4233,6 +4243,117 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ int i = 0;
+
+ if (stream_update) {
+ fast_update[0].out_transfer_func = stream_update->out_transfer_func;
+ fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ fast_update[i].flip_addr = srf_updates[i].flip_addr;
+ fast_update[i].gamma = srf_updates[i].gamma;
+ fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
+ fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
+ fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ }
+}
+
+static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].flip_addr ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].input_csc_color_matrix ||
+ fast_update[i].coeff_reduction_factor)
+ return true;
+ }
+
+ return false;
+}
+
+static bool full_update_required(struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < surface_count; i++) {
+ if (srf_updates &&
+ (srf_updates[i].plane_info ||
+ srf_updates[i].scaling_info ||
+ (srf_updates[i].hdr_mult.value &&
+ srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ srf_updates[i].in_transfer_func ||
+ srf_updates[i].func_shaper ||
+ srf_updates[i].lut3d_func ||
+ srf_updates[i].blend_tf))
+ return true;
+ }
+
+ if (stream_update &&
+ (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update) ||
+ stream_update->hdr_static_metadata ||
+ stream_update->abm_level ||
+ stream_update->periodic_interrupt ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->dpms_off ||
+ stream_update->allow_freesync ||
+ stream_update->vrr_active_variable ||
+ stream_update->vrr_active_fixed ||
+ stream_update->gamut_remap ||
+ stream_update->output_color_space ||
+ stream_update->dither_option ||
+ stream_update->wb_update ||
+ stream_update->dsc_config ||
+ stream_update->mst_bw_update ||
+ stream_update->func_shaper ||
+ stream_update->lut3d_func ||
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust))
+ return true;
+
+ if (stream) {
+ stream_status = dc_stream_get_status(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+
+ return false;
+}
+
+static bool fast_update_only(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+ return fast_updates_exist(fast_update, surface_count)
+ && !full_update_required(srf_updates, surface_count, stream_update, stream);
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -4242,6 +4363,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
enum surface_update_type update_type;
int i;
struct mall_temp_config mall_temp_config;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
@@ -4250,6 +4372,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool force_minimal_pipe_splitting;
bool is_plane_addition;
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
@@ -4300,7 +4423,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
}
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) {
+ if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -4357,7 +4481,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4443,7 +4569,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) {
+ if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -4753,15 +4880,17 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl
*/
void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
{
- uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
- unsigned int softMax, maxDPM, funcMin;
+ unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
bool p_state_change_support;
- if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
+ if (!dc->config.dc_mode_clk_limit_support)
return;
softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
- maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
+ for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
+ if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
+ }
funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index d7d00fefaab9..cb2bf9a466f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -610,7 +610,7 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe = current_pipe;
while (current_mpc_pipe) {
- if (!current_mpc_pipe->bottom_pipe && !pipe_ctx->next_odm_pipe &&
+ if (!current_mpc_pipe->bottom_pipe && !current_mpc_pipe->next_odm_pipe &&
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 26d05e225088..63948170fd6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.239"
+#define DC_VER "3.2.241"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -416,7 +416,7 @@ struct dc_config {
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
bool use_old_fixed_vs_sequence;
- bool disable_subvp_drr;
+ bool dc_mode_clk_limit_support;
};
enum visual_confirm {
@@ -850,6 +850,7 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
+ bool disable_fams_gaming;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -1264,6 +1265,16 @@ struct dc_scaling_info {
struct scaling_taps scaling_quality;
};
+struct dc_fast_update {
+ const struct dc_flip_addrs *flip_addr;
+ const struct dc_gamma *gamma;
+ const struct colorspace_transform *gamut_remap_matrix;
+ const struct dc_csc_transform *input_csc_color_matrix;
+ const struct fixed31_32 *coeff_reduction_factor;
+ struct dc_transfer_func *out_transfer_func;
+ struct dc_csc_transform *output_csc_transform;
+};
+
struct dc_surface_update {
struct dc_plane_state *surface;
@@ -1525,6 +1536,7 @@ struct dc_link {
bool dpia_forced_tbt3_mode;
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c52c40b16387..c753c6f30dd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1011,3 +1011,10 @@ void dc_send_update_cursor_info_to_dmu(
dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
}
+
+bool dc_dmub_check_min_version(struct dmub_srv *srv)
+{
+ if (!srv->hw_funcs.is_psrsu_supported)
+ return true;
+ return srv->hw_funcs.is_psrsu_supported(srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a5196a9292b3..099f94b6107c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -86,4 +86,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
+bool dc_dmub_check_min_version(struct dmub_srv *srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index e6c06325742a..168cb7094c95 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -266,7 +266,24 @@
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
- type MASTER_COMM_CMD_REG_BYTE2
+ type MASTER_COMM_CMD_REG_BYTE2; \
+ type ABM1_HG_BIN_33_40_SHIFT_INDEX; \
+ type ABM1_HG_BIN_33_64_SHIFT_FLAG; \
+ type ABM1_HG_BIN_41_48_SHIFT_INDEX; \
+ type ABM1_HG_BIN_49_56_SHIFT_INDEX; \
+ type ABM1_HG_BIN_57_64_SHIFT_INDEX; \
+ type ABM1_HG_RESULT_DATA; \
+ type ABM1_HG_RESULT_INDEX; \
+ type ABM1_ACE_SLOPE_DATA; \
+ type ABM1_ACE_OFFSET_DATA; \
+ type ABM1_ACE_OFFSET_SLOPE_INDEX; \
+ type ABM1_ACE_THRES_INDEX; \
+ type ABM1_ACE_IGNORE_MASTER_LOCK_EN; \
+ type ABM1_ACE_READBACK_DB_REG_VALUE_EN; \
+ type ABM1_ACE_DBUF_REG_UPDATE_PENDING; \
+ type ABM1_ACE_LOCK; \
+ type ABM1_ACE_THRES_DATA_1; \
+ type ABM1_ACE_THRES_DATA_2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@@ -288,6 +305,16 @@ struct dce_abm_registers {
uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0;
+ uint32_t DC_ABM1_ACE_OFFSET_SLOPE_DATA;
+ uint32_t DC_ABM1_ACE_PWL_CNTL;
+ uint32_t DC_ABM1_HG_BIN_33_40_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_33_64_SHIFT_FLAG;
+ uint32_t DC_ABM1_HG_BIN_41_48_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_49_56_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_57_64_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_RESULT_DATA;
+ uint32_t DC_ABM1_HG_RESULT_INDEX;
+ uint32_t DC_ABM1_ACE_THRES_DATA;
uint32_t DC_ABM1_ACE_THRES_12;
uint32_t MASTER_COMM_CNTL_REG;
uint32_t MASTER_COMM_CMD_REG;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 808855886183..e115ff91aaaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -974,10 +974,12 @@ enum dc_status resource_map_phy_clock_resources(
|| dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
- else
- pipe_ctx->clock_source = find_matching_pll(
- &context->res_ctx, dc->res_pool,
- stream);
+ else {
+ if (stream && stream->link && stream->link->link_enc)
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
+ }
if (pipe_ctx->clock_source == NULL)
return DC_NO_CLOCK_SOURCE_RESOURCE;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7a00fe525dfb..3538973bd0c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float(
#define NUMBER_REGIONS 32
#define NUMBER_SW_SEGMENTS 16
-bool cm_helper_translate_curve_to_hw_format(
+#define DC_LOGGER \
+ ctx->logger
+
+bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
@@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format(
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
if (fixpoint == true) {
- rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
- rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
- rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
+ uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
+ uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
+ uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
+
+ if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
+ DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
+
+ rgb->delta_red_reg = red_clamp & 0x3ff;
+ rgb->delta_green_reg = green_clamp & 0x3ff;
+ rgb->delta_blue_reg = blue_clamp & 0x3ff;
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 3b8cd7410498..0a68b63d6126 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float(
bool fixpoint);
bool cm_helper_translate_curve_to_hw_format(
+ struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 20a1582be0b1..a50309039d08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1843,7 +1843,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
- else if (cm_helper_translate_curve_to_hw_format(
+ else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index eaf9e9ccad2a..4492bc2392b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -867,7 +867,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
params = &stream->out_transfer_func->pwl;
else if (pipe_ctx->stream->out_transfer_func->type ==
TF_TYPE_DISTRIBUTED_POINTS &&
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
@@ -896,7 +896,7 @@ bool dcn20_set_blend_lut(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
blend_lut = &dpp_base->regamma_params;
@@ -918,7 +918,7 @@ bool dcn20_set_shaper_3dlut(
if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
shaper_lut = &plane_state->in_shaper_func->pwl;
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
@@ -1764,8 +1764,9 @@ static void dcn20_program_pipe(
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d)
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 6a3d3a0ec0a3..701c7d8bc038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func(
dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
if (dwb_ogam_lut) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(dwbc->ctx,
in_transfer_func_dwb_ogam,
dwb_ogam_lut, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index b9753867d97b..bf8864bc8a99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -106,7 +106,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(stream->func_shaper,
+ cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 1a0284a068b2..abe4c12a10b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -725,7 +725,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.use_max_lb = true,
- .exit_idle_opt_for_cursor_updates = true
+ .exit_idle_opt_for_cursor_updates = true,
+ .enable_legacy_fast_update = false,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1986,11 +1987,10 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc,
if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context))
return false;
- // check if freesync enabled
if (!context->streams[0]->allow_freesync)
return false;
- if (context->streams[0]->vrr_active_variable)
+ if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming)
return false;
context->streams[0]->fpo_in_use = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 7dc065ea247a..5ad6a22ee47d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -95,7 +95,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.use_max_lb = true,
- .exit_idle_opt_for_cursor_updates = true
+ .exit_idle_opt_for_cursor_updates = true,
+ .enable_legacy_fast_update = false,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 6d9761395288..45956ef6f3f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1190,6 +1190,7 @@ static bool dcn303_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index cf23d7bc560a..0746ed31d1d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -332,7 +332,7 @@ static void dccg314_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 7a43f8868500..4d2820ffe468 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -337,13 +337,14 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
-void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
+unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
- get_odm_config(pipe_ctx, NULL);
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
@@ -361,11 +362,15 @@ void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
+ if (odm_combine_factor == 2)
+ *k2_div = PIXEL_RATE_DIV_BY_2;
}
}
if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA))
ASSERT(false);
+
+ return odm_combine_factor;
}
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
@@ -424,27 +429,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
}
-void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
-{
- struct dc_context *ctx = hws->ctx;
- union dmub_rb_cmd cmd;
-
- if (hws->ctx->dc->debug.disable_hubp_power_gate)
- return;
-
- PERF_TRACE();
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.domain_control.header.type = DMUB_CMD__VBIOS;
- cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
- cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
- cmd.domain_control.data.inst = hubp_inst;
- cmd.domain_control.data.power_gate = !power_on;
-
- dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
-
- PERF_TRACE();
-}
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
/* There are use cases where SYMCLK is referenced by OTG. For instance
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index 96035c75e0df..eafcc4ea6d24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -37,14 +37,12 @@ void dcn314_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool po
void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
-void dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
-void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
-
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 86d6a514dec0..ca8fe55c33b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -139,7 +139,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
- .hubp_pg_control = dcn314_hubp_pg_control,
+ .hubp_pg_control = dcn31_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index a840b008d660..6a9024aa3285 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -1883,13 +1883,6 @@ static bool dcn314_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
- /* Disable pipe power gating when unsupported */
- if (ctx->asic_id.hw_internal_rev == 0x01 ||
- ctx->asic_id.hw_internal_rev == 0x80) {
- dc->debug.disable_dpp_power_gate = true;
- dc->debug.disable_hubp_power_gate = true;
- }
-
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1910,6 +1903,14 @@ static bool dcn314_resource_construct(
dc->debug = debug_defaults_drv;
else
dc->debug = debug_defaults_diags;
+
+ /* Disable pipe power gating */
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+
+ /* Disable root clock optimization */
+ dc->debug.root_clock_optimization.u32All = 0;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index f1153941907e..df3a438abda8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -1610,7 +1610,7 @@ static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
{
if (SourcePixelFormat == dm_444_64)
return 8;
- else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
+ else if (SourcePixelFormat == dm_444_16)
return 2;
else if (SourcePixelFormat == dm_444_8)
return 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2d604f7ee782..ca5b4b28a664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -179,6 +179,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index c586468872e2..d52d5feeb311 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -448,7 +448,7 @@ bool dcn32_set_mpc_shaper_3dlut(
if (stream->func_shaper->type == TF_TYPE_HWPWL)
shaper_lut = &stream->func_shaper->pwl;
else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(stream->ctx,
stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
@@ -484,7 +484,7 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
lut_params = &dpp_base->regamma_params;
@@ -499,7 +499,7 @@ bool dcn32_set_mcm_luts(
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
// TODO: dpp_base replace
ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
lut_params = &dpp_base->shaper_params;
@@ -1141,16 +1141,14 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
}
}
-void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
+unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
- // For phantom pipes, use the same programming as the main pipes
- if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- stream = pipe_ctx->stream->mall_stream_config.paired_stream;
- }
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
@@ -1168,13 +1166,15 @@ void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if (dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA))
ASSERT(false);
+
+ return odm_combine_factor;
}
void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index bf9bffabe0c0..2d2628f31bed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -71,7 +71,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context);
void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
-void dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index c2490e16a66a..777b2fac20c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -56,6 +56,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 19f134caa8ad..1cc09799f92d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -732,6 +732,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dp_plus_plus_wa = true,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
+ .enable_legacy_fast_update = false,
};
static struct dce_aux *dcn32_aux_engine_create(
@@ -2214,6 +2215,7 @@ static bool dcn32_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index a9c41ef0751f..5be242a1b82c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -595,11 +595,10 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us))
return NULL;
- // check if freesync enabled
if (!fpo_candidate_stream->allow_freesync)
return NULL;
- if (fpo_candidate_stream->vrr_active_variable)
+ if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming)
return NULL;
return fpo_candidate_stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index ea204742ad35..a53478e15ce3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -730,6 +730,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_subvp_high_refresh = false,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
+ .enable_legacy_fast_update = false,
+ .disable_dc_mode_overwrite = true,
};
static struct dce_aux *dcn321_aux_engine_create(
@@ -1754,6 +1756,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 6266b0788387..7bf4bb7ad044 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -4356,12 +4356,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
-
- locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
- locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
- locals->EffectiveLBLatencyHidingSourceLinesChroma),
- locals->SwathHeightCPerState[i][j][k]);
+ if (locals->LinesInDETChroma) {
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma +
+ dml_min(locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] *
+ locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
+ } else {
+ locals->EffectiveDETLBLinesChroma = 0;
+ }
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index c9afddd11589..d9e049e7ff0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -33,7 +33,7 @@
#include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 800,
+ .VBlankNomDefaultUS = 668,
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index e2bb2b9971f3..a95034801712 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -485,24 +485,20 @@ static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
}
}
-void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
+static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
- float net_bw_of_new_state = 0;
dc_assert_fp_enabled();
- get_optimal_ntuple(entry);
-
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
- net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry);
- while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) {
+ while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
@@ -2349,6 +2345,63 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
}
+static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
+ struct _vcs_dpi_voltage_scaling_st *second_entry)
+{
+ struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
+ *first_entry = *second_entry;
+ *second_entry = temp_entry;
+}
+
+/*
+ * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
+ */
+static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ unsigned int start_index = 0;
+ unsigned int end_index = 0;
+ unsigned int current_bw = 0;
+
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ current_bw = table[i].net_bw_in_kbytes_sec;
+ start_index = i;
+ end_index = ++i;
+
+ while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
+ end_index = ++i;
+ }
+
+ if (start_index != end_index) {
+ for (int j = start_index; j < end_index; j++) {
+ for (int k = start_index; k < end_index; k++) {
+ if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
+ swap_table_entries(&table[k], &table[k+1]);
+ }
+ }
+ }
+
+ start_index = 0;
+ end_index = 0;
+
+ }
+}
+
+/*
+ * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
+ * and remove entries that do not
+ */
+static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
+ (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
+ remove_entry_from_table_at_index(table, num_entries, i);
+ }
+ }
+}
+
/*
* override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
* Input:
@@ -2480,6 +2533,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -2488,6 +2543,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
@@ -2496,6 +2553,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -2506,6 +2565,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
@@ -2515,6 +2576,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -2530,6 +2593,21 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
remove_entry_from_table_at_index(table, num_entries, i);
}
+ // Insert entry with all max dc limits without bandwidth matching
+ if (!disable_dc_mode_overwrite) {
+ struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
+
+ max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
+ max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
+ max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
+
+ max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
+ insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
+
+ sort_entries_with_same_bw(table, num_entries);
+ remove_inconsistent_entries(table, num_entries);
+ }
+
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index a4206b71d650..defbee866be6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -39,10 +39,6 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e);
-void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
- unsigned int *num_entries,
- struct _vcs_dpi_voltage_scaling_st *entry);
-
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *ref_pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index f0683fd9d3f0..b26fcf86014c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -207,24 +207,20 @@ static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *
return limiting_bw_kbytes_sec;
}
-void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
+static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
- float net_bw_of_new_state = 0;
dc_assert_fp_enabled();
- get_optimal_ntuple(entry);
-
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
- net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry);
- while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) {
+ while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
@@ -252,6 +248,63 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
+ struct _vcs_dpi_voltage_scaling_st *second_entry)
+{
+ struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
+ *first_entry = *second_entry;
+ *second_entry = temp_entry;
+}
+
+/*
+ * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
+ */
+static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ unsigned int start_index = 0;
+ unsigned int end_index = 0;
+ unsigned int current_bw = 0;
+
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ current_bw = table[i].net_bw_in_kbytes_sec;
+ start_index = i;
+ end_index = ++i;
+
+ while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
+ end_index = ++i;
+ }
+
+ if (start_index != end_index) {
+ for (int j = start_index; j < end_index; j++) {
+ for (int k = start_index; k < end_index; k++) {
+ if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
+ swap_table_entries(&table[k], &table[k+1]);
+ }
+ }
+ }
+
+ start_index = 0;
+ end_index = 0;
+
+ }
+}
+
+/*
+ * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
+ * and remove entries that do not follow this order
+ */
+static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
+ (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
+ remove_entry_from_table_at_index(table, num_entries, i);
+ }
+ }
+}
+
/*
* override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
* Input:
@@ -362,11 +415,11 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
if (max_clk_data.fclk_mhz == 0)
max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
- dcn3_2_soc.pct_ideal_sdp_bw_after_urgent /
- dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
+ dcn3_21_soc.pct_ideal_sdp_bw_after_urgent /
+ dcn3_21_soc.pct_ideal_fabric_bw_after_urgent;
if (max_clk_data.phyclk_mhz == 0)
- max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
+ max_clk_data.phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
*num_entries = 0;
entry.dispclk_mhz = max_clk_data.dispclk_mhz;
@@ -374,8 +427,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.dppclk_mhz = max_clk_data.dppclk_mhz;
entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
entry.phyclk_mhz = max_clk_data.phyclk_mhz;
- entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
- entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
+ entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
+ entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
// Insert all the DCFCLK STAs
for (i = 0; i < num_dcfclk_stas; i++) {
@@ -383,6 +436,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -391,6 +446,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
@@ -399,6 +456,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -409,6 +468,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
@@ -418,6 +479,8 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -433,6 +496,23 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
remove_entry_from_table_at_index(table, num_entries, i);
}
+ // Insert entry with all max dc limits without bandwitch matching
+ if (!disable_dc_mode_overwrite) {
+ struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
+
+ max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
+ max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
+ max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
+
+ max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
+ dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
+
+ sort_entries_with_same_bw(table, num_entries);
+ remove_inconsistent_entries(table, num_entries);
+ }
+
+
+
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
index e8fad9b4be69..c6623b3705ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
@@ -29,10 +29,6 @@
#include "dml/display_mode_vba.h"
-void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
- unsigned int *num_entries,
- struct _vcs_dpi_voltage_scaling_st *entry);
-
void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index ff0246a9458f..fb17f8868cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -167,6 +167,7 @@ struct _vcs_dpi_voltage_scaling_st {
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
+ float net_bw_in_kbytes_sec;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 6faf40fa5c69..ecb7bcc39469 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -230,6 +230,7 @@ struct clk_bw_params {
unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
+ unsigned int max_memclk_mhz;
struct clk_limit_table clk_table;
struct wm_table wm_table;
struct dummy_pstate_entry dummy_pstate_table[4];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index a151865a3a20..4ca4192c1e12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -156,7 +156,7 @@ struct hwseq_private_funcs {
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
- void (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
+ unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
unsigned int *k1_div,
unsigned int *k2_div);
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ba95facc4ee8..ef8739df91bc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -82,8 +82,15 @@ bool dp_parse_link_loss_status(
}
/* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+ if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
+ (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) {
+ sink_status_changed = true;
+ } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+ sink_status_changed = true;
+ }
+
+ if (sink_status_changed) {
DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
@@ -201,6 +208,25 @@ void dp_handle_link_loss(struct dc_link *link)
}
}
+static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
+{
+ enum dc_status retval;
+ union lane_align_status_updated dpcd_lane_status_updated;
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_LANE_ALIGN_STATUS_UPDATED,
+ &dpcd_lane_status_updated.raw,
+ sizeof(union lane_align_status_updated));
+
+ if (retval == DC_OK) {
+ irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b =
+ dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b;
+ irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b =
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b;
+ }
+}
+
enum dc_status dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
@@ -242,6 +268,13 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+
+ /*
+ * This display doesn't have correct values in DPCD200Eh.
+ * Read and check DPCD204h instead.
+ */
+ if (link->wa_flags.read_dpcd204h_on_irq_hpd)
+ read_dpcd204h_on_irq_hpd(link, irq_data);
}
return retval;
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 7c9a2b34bd05..4585e0419da6 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -367,6 +367,8 @@ struct dmub_srv_hw_funcs {
bool (*is_supported)(struct dmub_srv *dmub);
+ bool (*is_psrsu_supported)(struct dmub_srv *dmub);
+
bool (*is_hw_init)(struct dmub_srv *dmub);
void (*enable_dmub_boot_options)(struct dmub_srv *dmub,
@@ -492,7 +494,7 @@ struct dmub_notification {
* of a firmware to know if feature or functionality is supported or present.
*/
#define DMUB_FW_VERSION(major, minor, revision) \
- ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
+ ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8))
/**
* dmub_srv_create() - creates the DMUB service.
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index ebf7aeec4029..5e952541e72d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -302,6 +302,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
return supported;
}
+bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
+{
+ return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
+}
+
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 7d5c10ee539b..89c5a948b67d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -221,6 +221,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
+bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
+
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
index 48a06dbd9be7..f161aeb7e7c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
@@ -60,3 +60,8 @@ const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
};
+
+bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
+{
+ return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
index 674267a2940e..f213bd82c911 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
@@ -30,4 +30,6 @@
extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
+bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN314_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9e9a6a44a7ac..bdaf43892f47 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -226,14 +226,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN314:
case DMUB_ASIC_DCN315:
case DMUB_ASIC_DCN316:
- if (asic == DMUB_ASIC_DCN314)
+ if (asic == DMUB_ASIC_DCN314) {
dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
- else if (asic == DMUB_ASIC_DCN315)
+ funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
+ } else if (asic == DMUB_ASIC_DCN315) {
dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
- else if (asic == DMUB_ASIC_DCN316)
+ } else if (asic == DMUB_ASIC_DCN316) {
dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
- else
+ } else {
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
+ funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
+ }
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index a57952b93e73..9ef88a0b1b57 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -35,44 +35,6 @@
#include <linux/pm_runtime.h>
#include <asm/processor.h>
-static const struct cg_flag_name clocks[] = {
- {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
- {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
- {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
- {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
- {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
- {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
- {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
- {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
- {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
- {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
- {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
- {0, NULL},
-};
-
static const struct hwmon_temp_label {
enum PP_HWMON_TEMP channel;
const char *label;
@@ -2110,6 +2072,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -2120,7 +2083,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
}
} else if (DEVICE_ATTR_IS(pp_features)) {
- if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
@@ -3684,6 +3649,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
return 0;
}
+static const struct cg_flag_name clocks[] = {
+ {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
+ {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
+ {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
+ {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
+ {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
+ {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
+ {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
+ {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
+ {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
+ {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
+ {0, NULL},
+};
+
static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
{
int i;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index d178f3f44081..42172b00be66 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal {
int max_mem_crit_temp;
/* memory max emergency(shutdown) temp */
int max_mem_emergency_temp;
+ /* SWCTF threshold */
+ int sw_ctf_threshold;
/* was last interrupt low to high or high to low */
bool high_to_low;
/* interrupt source */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 11b7b4cffaae..ff360c699171 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -26,6 +26,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/reboot.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
#include "power_state.h"
@@ -91,6 +92,45 @@ static int pp_early_init(void *handle)
return 0;
}
+static void pp_swctf_delayed_work_handler(struct work_struct *work)
+{
+ struct pp_hwmgr *hwmgr =
+ container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct amdgpu_dpm_thermal *range =
+ &adev->pm.dpm.thermal;
+ uint32_t gpu_temperature, size;
+ int ret;
+
+ /*
+ * If the hotspot/edge temperature is confirmed as below SW CTF setting point
+ * after the delay enforced, nothing will be done.
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
+ */
+ if (range->sw_ctf_threshold &&
+ hwmgr->hwmgr_func->read_sensor) {
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ &gpu_temperature,
+ &size);
+ /*
+ * For some legacy ASICs, hotspot temperature retrieving might be not
+ * supported. Check the edge temperature instead then.
+ */
+ if (ret == -EOPNOTSUPP)
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
+ AMDGPU_PP_SENSOR_EDGE_TEMP,
+ &gpu_temperature,
+ &size);
+ if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
+ return;
+ }
+
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+}
+
static int pp_sw_init(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -101,6 +141,10 @@ static int pp_sw_init(void *handle)
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
+ if (!ret)
+ INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
+ pp_swctf_delayed_work_handler);
+
return ret;
}
@@ -135,6 +179,8 @@ static int pp_hw_fini(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
+
hwmgr_hw_fini(hwmgr);
return 0;
@@ -221,6 +267,8 @@ static int pp_suspend(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
+
return hwmgr_suspend(hwmgr);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 981dc8c7112d..90452b66e107 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
TEMP_RANGE_MAX,
TEMP_RANGE_MIN,
TEMP_RANGE_MAX,
- TEMP_RANGE_MAX};
+ TEMP_RANGE_MAX,
+ 0};
struct amdgpu_device *adev = hwmgr->adev;
if (!hwmgr->not_vf)
@@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+ adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e10cc5e7928e..6841a4bce186 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5432,6 +5432,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = thermal_data->max;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index bfe80ac0ad8c..d0b1ab6c4523 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
- } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ } else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
/*
* HW CTF just occurred. Shutdown to prevent further damage.
@@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev,
orderly_poweroff(true);
}
} else if (client_id == SOC15_IH_CLIENTID_THM) {
- if (src_id == 0) {
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
- } else
+ if (src_id == 0)
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ else
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99cd2e63afdd..c51dd4c74fe9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5241,6 +5241,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct phm_ppt_v2_information *pp_table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
@@ -5257,6 +5260,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
+ tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
+ thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
+ else
+ thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
+ thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index e9db137cd1c6..1937be1cf5b4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2763,6 +2763,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
@@ -2781,6 +2783,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
index ed3dff0b52d2..ae342c58cd3e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
@@ -192,7 +192,9 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 0d4d4811527c..4e19ccbdb807 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -4206,6 +4206,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
@@ -4224,6 +4226,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index f4f4efdbda79..e9737ca8418a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -263,7 +263,9 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index f1580a26a850..612d66aeaab9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -811,6 +811,8 @@ struct pp_hwmgr {
bool gfxoff_state_changed_by_workload;
uint32_t pstate_sclk_peak;
uint32_t pstate_mclk_peak;
+
+ struct delayed_work swctf_delayed_work;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
index a5f2227a3971..0ffc2347829d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
@@ -131,6 +131,7 @@ struct PP_TemperatureRange {
int mem_min;
int mem_crit_max;
int mem_emergency_max;
+ int sw_ctf_threshold;
};
struct PP_StateValidationBlock {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 4dea79a0c5b5..ce41a8309582 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/reboot.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -1078,6 +1079,34 @@ static void smu_interrupt_work_fn(struct work_struct *work)
smu->ppt_funcs->interrupt_work(smu);
}
+static void smu_swctf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu =
+ container_of(work, struct smu_context, swctf_delayed_work.work);
+ struct smu_temperature_range *range =
+ &smu->thermal_range;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t hotspot_tmp, size;
+
+ /*
+ * If the hotspot temperature is confirmed as below SW CTF setting point
+ * after the delay enforced, nothing will be done.
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
+ */
+ if (range->software_shutdown_temp &&
+ smu->ppt_funcs->read_sensor &&
+ !smu->ppt_funcs->read_sensor(smu,
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ &hotspot_tmp,
+ &size) &&
+ hotspot_tmp / 1000 < range->software_shutdown_temp)
+ return;
+
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1120,6 +1149,9 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ INIT_DELAYED_WORK(&smu->swctf_delayed_work,
+ smu_swctf_delayed_work_handler);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1600,6 +1632,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return ret;
}
+ cancel_delayed_work_sync(&smu->swctf_delayed_work);
+
ret = smu_disable_dpms(smu);
if (ret) {
dev_err(adev->dev, "Fail to disable dpm features!\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 09469c750a96..6e2069dcb6b9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -573,6 +573,8 @@ struct smu_context
u32 debug_param_reg;
u32 debug_msg_reg;
u32 debug_resp_reg;
+
+ struct delayed_work swctf_delayed_work;
};
struct i2c_adapter;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 275f708db636..c94d825a871b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1654,7 +1654,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
- int ret = 0, size = 0;
+ int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
@@ -1675,15 +1675,15 @@ static int navi10_force_clk_levels(struct smu_context *smu,
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
- return size;
+ return 0;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
- return size;
+ return 0;
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
- return size;
+ return 0;
break;
case SMU_DCEFCLK:
dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
@@ -1693,7 +1693,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return 0;
}
static int navi10_populate_umd_state_clk(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index e1ef88ee1ed3..aa4a5498a12f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1412,13 +1412,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e52c563f0dac..3856da6c3f3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1353,13 +1353,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a6083957ae51..124287cbbff8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1710,6 +1710,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
+ range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index a92ea4601ea4..6ef12252beb5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -200,7 +200,6 @@ struct PPTable_t {
};
#define SMUQ10_TO_UINT(x) ((x) >> 10)
-#define SMUQ16_TO_UINT(x) ((x) >> 16)
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -1994,8 +1993,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_socket_power =
SMUQ10_TO_UINT(metrics->SocketPower);
+ /* Energy is reported in 15.625mJ units */
gpu_metrics->energy_accumulator =
- SMUQ16_TO_UINT(metrics->SocketEnergyAcc);
+ SMUQ10_TO_UINT(metrics->SocketEnergyAcc);
gpu_metrics->current_gfxclk =
SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 0600fdcd06ef..719447ce86e7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2435,7 +2435,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLB, val);
+ XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_SSC_ENABLE_PLLB, val);
}
static u32 intel_cx0_get_powerdown_update(u8 lane_mask)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index be1a87bde0c9..df38632c6237 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -6,6 +6,9 @@
#ifndef __INTEL_DISPLAY_POWER_H__
#define __INTEL_DISPLAY_POWER_H__
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "intel_wakeref.h"
enum aux_ch;
@@ -16,6 +19,7 @@ enum port;
struct drm_i915_private;
struct i915_power_well;
struct intel_encoder;
+struct seq_file;
/*
* Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 1118ee9d224c..5ad04cd42c15 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1252,10 +1252,18 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
POWER_DOMAIN_INIT);
#define XELPD_DC_OFF_PORT_POWER_DOMAINS \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
POWER_DOMAIN_PORT_DDI_LANES_TC1, \
POWER_DOMAIN_PORT_DDI_LANES_TC2, \
POWER_DOMAIN_PORT_DDI_LANES_TC3, \
POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_IO_C, \
+ POWER_DOMAIN_AUX_IO_D, \
+ POWER_DOMAIN_AUX_IO_E, \
POWER_DOMAIN_AUX_C, \
POWER_DOMAIN_AUX_D, \
POWER_DOMAIN_AUX_E, \
@@ -1272,14 +1280,6 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
XELPD_PW_B_POWER_DOMAINS, \
XELPD_PW_C_POWER_DOMAINS, \
XELPD_PW_D_POWER_DOMAINS, \
- POWER_DOMAIN_PORT_DDI_LANES_C, \
- POWER_DOMAIN_PORT_DDI_LANES_D, \
- POWER_DOMAIN_PORT_DDI_LANES_E, \
- POWER_DOMAIN_VGA, \
- POWER_DOMAIN_AUDIO_PLAYBACK, \
- POWER_DOMAIN_AUX_IO_C, \
- POWER_DOMAIN_AUX_IO_D, \
- POWER_DOMAIN_AUX_IO_E, \
XELPD_DC_OFF_PORT_POWER_DOMAINS
I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index e494df379e6c..1015bba4af01 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -12,6 +12,8 @@
struct drm_i915_private;
struct i915_power_well;
+struct i915_power_well_ops;
+struct intel_encoder;
#define for_each_power_well(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 5ed450111f77..34fabadefaf6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -2358,7 +2358,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&i915->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
- hdcp->content_type = (u8)conn_state->content_type;
+ hdcp->content_type = (u8)conn_state->hdcp_content_type;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index d58ed9b62e67..56c17283ba2d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -933,9 +933,9 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
}
io_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->uapi.adjusted_mode, io_wake_time);
+ &crtc_state->hw.adjusted_mode, io_wake_time);
fast_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->uapi.adjusted_mode, fast_wake_time);
+ &crtc_state->hw.adjusted_mode, fast_wake_time);
if (io_wake_lines > max_wake_lines ||
fast_wake_lines > max_wake_lines)
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 0f7db617425a..8750cb0d8d9d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -81,7 +81,7 @@
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 01b75529311c..ee9f83af7cf6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -606,7 +606,7 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
if (unlikely(ret))
return ret;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
- } else if (slpc->min_freq_softlimit != slpc->min_freq) {
+ } else {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 09d4bbcdcdbf..4de6a4e8280d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -118,15 +118,31 @@ static void mock_gt_probe(struct drm_i915_private *i915)
i915->gt[0]->name = "Mock GT";
}
+static const struct intel_device_info mock_info = {
+ .__runtime.graphics.ip.ver = -1,
+ .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M),
+ .__runtime.memory_regions = REGION_SMEM,
+ .__runtime.platform_engine_mask = BIT(0),
+
+ /* simply use legacy cache level for mock device */
+ .max_pat_index = 3,
+ .cachelevel_to_pat = {
+ [I915_CACHE_NONE] = 0,
+ [I915_CACHE_LLC] = 1,
+ [I915_CACHE_L3_LLC] = 2,
+ [I915_CACHE_WT] = 3,
+ },
+};
+
struct drm_i915_private *mock_gem_device(void)
{
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
- struct intel_device_info *i915_info;
struct pci_dev *pdev;
- unsigned int i;
int ret;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -159,15 +175,18 @@ struct drm_i915_private *mock_gem_device(void)
pci_set_drvdata(pdev, i915);
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
+ /* Set up device info and initial runtime info. */
+ intel_device_info_driver_create(i915, pdev->device, &mock_info);
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev));
-
- i915_params_copy(&i915->params, &i915_modparams);
-
intel_runtime_pm_init_early(&i915->runtime_pm);
/* wakeref tracking has significant overhead */
i915->runtime_pm.no_wakeref_tracking = true;
@@ -175,21 +194,6 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- RUNTIME_INFO(i915)->graphics.ip.ver = -1;
-
- RUNTIME_INFO(i915)->page_sizes =
- I915_GTT_PAGE_SIZE_4K |
- I915_GTT_PAGE_SIZE_64K |
- I915_GTT_PAGE_SIZE_2M;
-
- RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
-
- /* simply use legacy cache level for mock device */
- i915_info = (struct intel_device_info *)INTEL_INFO(i915);
- i915_info->max_pat_index = 3;
- for (i = 0; i < I915_MAX_CACHE_LEVEL; i++)
- i915_info->cachelevel_to_pat[i] = i;
-
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
@@ -223,7 +227,6 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(to_gt(i915));
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
- RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 3cc9fb0d4f5d..dc276c346fd1 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -2139,9 +2139,9 @@ static const struct panel_desc starry_himax83102_j02_desc = {
static const struct drm_display_mode starry_ili9882t_default_mode = {
.clock = 165280,
.hdisplay = 1200,
- .hsync_start = 1200 + 32,
- .hsync_end = 1200 + 32 + 30,
- .htotal = 1200 + 32 + 30 + 32,
+ .hsync_start = 1200 + 72,
+ .hsync_end = 1200 + 72 + 30,
+ .htotal = 1200 + 72 + 30 + 72,
.vdisplay = 1920,
.vsync_start = 1920 + 68,
.vsync_end = 1920 + 68 + 2,
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 38e572faff43..da6a7abd584f 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -32,8 +32,6 @@
#include <linux/hsi/hsi.h>
#include <linux/hsi/ssi_protocol.h>
-void ssi_waketest(struct hsi_client *cl, unsigned int enable);
-
#define SSIP_TXQUEUE_LEN 100
#define SSIP_MAX_MTU 65535
#define SSIP_DEFAULT_MTU 4000
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 26f2c3c01297..84ba8b875199 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/hsi/ssi_protocol.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index b9495b720f1b..c78d9c9f7371 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -151,23 +151,17 @@ static int ssi_div_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
-static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
+static void ssi_debug_add_port(struct omap_ssi_port *omap_port,
struct dentry *dir)
{
struct hsi_port *port = to_hsi_port(omap_port->dev);
dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
- if (!dir)
- return -ENOMEM;
omap_port->dir = dir;
debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
dir = debugfs_create_dir("sst", dir);
- if (!dir)
- return -ENOMEM;
debugfs_create_file_unsafe("divisor", 0644, dir, port,
&ssi_sst_div_fops);
-
- return 0;
}
#endif
@@ -1217,11 +1211,7 @@ static int ssi_port_probe(struct platform_device *pd)
pm_runtime_enable(omap_port->pdev);
#ifdef CONFIG_DEBUG_FS
- err = ssi_debug_add_port(omap_port, omap_ssi->dir);
- if (err < 0) {
- pm_runtime_disable(omap_port->pdev);
- goto error;
- }
+ ssi_debug_add_port(omap_port, omap_ssi->dir);
#endif
hsi_add_clients_from_dt(port, np);
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index 6512f4bec79a..6500ca548f9c 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -794,3 +794,4 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_hwmon_ids);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX 10 BMC hardware monitor");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 9d14954da94f..fa06325f5a7c 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1191,9 +1191,9 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
{
if (data->num_attributes >= data->max_attributes - 1) {
int new_max_attrs = data->max_attributes + PMBUS_ATTR_ALLOC_SIZE;
- void *new_attrs = devm_krealloc(data->dev, data->group.attrs,
- new_max_attrs * sizeof(void *),
- GFP_KERNEL);
+ void *new_attrs = devm_krealloc_array(data->dev, data->group.attrs,
+ new_max_attrs, sizeof(void *),
+ GFP_KERNEL);
if (!new_attrs)
return -ENOMEM;
data->group.attrs = new_attrs;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 1fb3a2550e29..dfe82952671b 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -174,7 +174,7 @@ static struct platform_driver omap_hwspinlock_driver = {
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
- .of_match_table = of_match_ptr(omap_hwspinlock_of_match),
+ .of_match_table = omap_hwspinlock_of_match,
},
};
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 2b5bbfffbc4f..06f0a7594169 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -236,4 +236,15 @@ config CORESIGHT_TPDA
To compile this driver as a module, choose M here: the module will be
called coresight-tpda.
+
+config CORESIGHT_DUMMY
+ tristate "Dummy driver support"
+ help
+ Enables support for dummy driver. Dummy driver can be used for
+ CoreSight sources/sinks that are owned and configured by some
+ other subsystem and use Linux drivers to configure rest of trace
+ path.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-dummy.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 33bcc3f7b8ae..995d3b2c76df 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o
+obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index bc90a03f478f..3949ded0d4fa 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -395,13 +395,18 @@ static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
}
-static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
+static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
+ void *data)
{
int rc;
u32 control, mode;
- struct etr_buf *etr_buf = data;
+ struct etr_buf *etr_buf = NULL;
struct device *dev = &drvdata->csdev->dev;
struct coresight_device *csdev = drvdata->csdev;
+ struct coresight_device *etrdev;
+ union coresight_dev_subtype etr_subtype = {
+ .sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM
+ };
if (catu_wait_for_ready(drvdata))
dev_warn(dev, "Timeout while waiting for READY\n");
@@ -416,6 +421,13 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
if (rc)
return rc;
+ etrdev = coresight_find_input_type(
+ csdev->pdata, CORESIGHT_DEV_TYPE_SINK, etr_subtype);
+ if (etrdev) {
+ etr_buf = tmc_etr_get_buffer(etrdev, cs_mode, data);
+ if (IS_ERR(etr_buf))
+ return PTR_ERR(etr_buf);
+ }
control |= BIT(CATU_CONTROL_ENABLE);
if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
@@ -441,13 +453,14 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return 0;
}
-static int catu_enable(struct coresight_device *csdev, void *data)
+static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
{
int rc;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
CS_UNLOCK(catu_drvdata->base);
- rc = catu_enable_hw(catu_drvdata, data);
+ rc = catu_enable_hw(catu_drvdata, mode, data);
CS_LOCK(catu_drvdata->base);
return rc;
}
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index d3bf82c0de1d..118fcf27854d 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
+#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -112,40 +113,24 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
-static int coresight_find_link_inport(struct coresight_device *csdev,
- struct coresight_device *parent)
+static struct coresight_connection *
+coresight_find_out_connection(struct coresight_device *src_dev,
+ struct coresight_device *dest_dev)
{
int i;
struct coresight_connection *conn;
- for (i = 0; i < parent->pdata->nr_outport; i++) {
- conn = &parent->pdata->conns[i];
- if (conn->child_dev == csdev)
- return conn->child_port;
+ for (i = 0; i < src_dev->pdata->nr_outconns; i++) {
+ conn = src_dev->pdata->out_conns[i];
+ if (conn->dest_dev == dest_dev)
+ return conn;
}
- dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
- dev_name(&parent->dev), dev_name(&csdev->dev));
+ dev_err(&src_dev->dev,
+ "couldn't find output connection, src_dev: %s, dest_dev: %s\n",
+ dev_name(&src_dev->dev), dev_name(&dest_dev->dev));
- return -ENODEV;
-}
-
-static int coresight_find_link_outport(struct coresight_device *csdev,
- struct coresight_device *child)
-{
- int i;
- struct coresight_connection *conn;
-
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
- conn = &csdev->pdata->conns[i];
- if (conn->child_dev == child)
- return conn->outport;
- }
-
- dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
- dev_name(&csdev->dev), dev_name(&child->dev));
-
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
@@ -252,63 +237,47 @@ void coresight_disclaim_device(struct coresight_device *csdev)
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device);
-/* enable or disable an associated CTI device of the supplied CS device */
-static int
-coresight_control_assoc_ectdev(struct coresight_device *csdev, bool enable)
+/*
+ * Add a helper as an output device. This function takes the @coresight_mutex
+ * because it's assumed that it's called from the helper device, outside of the
+ * core code where the mutex would already be held. Don't add new calls to this
+ * from inside the core code, instead try to add the new helper to the DT and
+ * ACPI where it will be picked up and linked automatically.
+ */
+void coresight_add_helper(struct coresight_device *csdev,
+ struct coresight_device *helper)
{
- int ect_ret = 0;
- struct coresight_device *ect_csdev = csdev->ect_dev;
- struct module *mod;
+ int i;
+ struct coresight_connection conn = {};
+ struct coresight_connection *new_conn;
- if (!ect_csdev)
- return 0;
- if ((!ect_ops(ect_csdev)->enable) || (!ect_ops(ect_csdev)->disable))
- return 0;
+ mutex_lock(&coresight_mutex);
+ conn.dest_fwnode = fwnode_handle_get(dev_fwnode(&helper->dev));
+ conn.dest_dev = helper;
+ conn.dest_port = conn.src_port = -1;
+ conn.src_dev = csdev;
- mod = ect_csdev->dev.parent->driver->owner;
- if (enable) {
- if (try_module_get(mod)) {
- ect_ret = ect_ops(ect_csdev)->enable(ect_csdev);
- if (ect_ret) {
- module_put(mod);
- } else {
- get_device(ect_csdev->dev.parent);
- csdev->ect_enabled = true;
- }
- } else
- ect_ret = -ENODEV;
- } else {
- if (csdev->ect_enabled) {
- ect_ret = ect_ops(ect_csdev)->disable(ect_csdev);
- put_device(ect_csdev->dev.parent);
- module_put(mod);
- csdev->ect_enabled = false;
- }
- }
+ /*
+ * Check for duplicates because this is called every time a helper
+ * device is re-loaded. Existing connections will get re-linked
+ * automatically.
+ */
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i)
+ if (csdev->pdata->out_conns[i]->dest_fwnode == conn.dest_fwnode)
+ goto unlock;
- /* output warning if ECT enable is preventing trace operation */
- if (ect_ret)
- dev_info(&csdev->dev, "Associated ECT device (%s) %s failed\n",
- dev_name(&ect_csdev->dev),
- enable ? "enable" : "disable");
- return ect_ret;
-}
+ new_conn = coresight_add_out_conn(csdev->dev.parent, csdev->pdata,
+ &conn);
+ if (!IS_ERR(new_conn))
+ coresight_add_in_conn(new_conn);
-/*
- * Set the associated ect / cti device while holding the coresight_mutex
- * to avoid a race with coresight_enable that may try to use this value.
- */
-void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
- struct coresight_device *ect_csdev)
-{
- mutex_lock(&coresight_mutex);
- csdev->ect_dev = ect_csdev;
+unlock:
mutex_unlock(&coresight_mutex);
}
-EXPORT_SYMBOL_GPL(coresight_set_assoc_ectdev_mutex);
+EXPORT_SYMBOL_GPL(coresight_add_helper);
static int coresight_enable_sink(struct coresight_device *csdev,
- u32 mode, void *data)
+ enum cs_mode mode, void *data)
{
int ret;
@@ -319,14 +288,10 @@ static int coresight_enable_sink(struct coresight_device *csdev,
if (!sink_ops(csdev)->enable)
return -EINVAL;
- ret = coresight_control_assoc_ectdev(csdev, true);
- if (ret)
- return ret;
ret = sink_ops(csdev)->enable(csdev, mode, data);
- if (ret) {
- coresight_control_assoc_ectdev(csdev, false);
+ if (ret)
return ret;
- }
+
csdev->enable = true;
return 0;
@@ -342,7 +307,6 @@ static void coresight_disable_sink(struct coresight_device *csdev)
ret = sink_ops(csdev)->disable(csdev);
if (ret)
return;
- coresight_control_assoc_ectdev(csdev, false);
csdev->enable = false;
}
@@ -352,32 +316,26 @@ static int coresight_enable_link(struct coresight_device *csdev,
{
int ret = 0;
int link_subtype;
- int inport, outport;
+ struct coresight_connection *inconn, *outconn;
if (!parent || !child)
return -EINVAL;
- inport = coresight_find_link_inport(csdev, parent);
- outport = coresight_find_link_outport(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev);
+ outconn = coresight_find_out_connection(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0)
- return inport;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
- return outport;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn))
+ return PTR_ERR(inconn);
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && IS_ERR(outconn))
+ return PTR_ERR(outconn);
if (link_ops(csdev)->enable) {
- ret = coresight_control_assoc_ectdev(csdev, true);
- if (!ret) {
- ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret)
- coresight_control_assoc_ectdev(csdev, false);
- }
+ ret = link_ops(csdev)->enable(csdev, inconn, outconn);
+ if (!ret)
+ csdev->enable = true;
}
- if (!ret)
- csdev->enable = true;
-
return ret;
}
@@ -385,78 +343,125 @@ static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int i, nr_conns;
+ int i;
int link_subtype;
- int inport, outport;
+ struct coresight_connection *inconn, *outconn;
if (!parent || !child)
return;
- inport = coresight_find_link_inport(csdev, parent);
- outport = coresight_find_link_outport(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev);
+ outconn = coresight_find_out_connection(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- nr_conns = csdev->pdata->nr_inport;
- } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- nr_conns = csdev->pdata->nr_outport;
- } else {
- nr_conns = 1;
- }
-
if (link_ops(csdev)->disable) {
- link_ops(csdev)->disable(csdev, inport, outport);
- coresight_control_assoc_ectdev(csdev, false);
+ link_ops(csdev)->disable(csdev, inconn, outconn);
}
- for (i = 0; i < nr_conns; i++)
- if (atomic_read(&csdev->refcnt[i]) != 0)
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
+ for (i = 0; i < csdev->pdata->nr_inconns; i++)
+ if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) !=
+ 0)
+ return;
+ } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++)
+ if (atomic_read(&csdev->pdata->out_conns[i]->src_refcnt) !=
+ 0)
+ return;
+ } else {
+ if (atomic_read(&csdev->refcnt) != 0)
return;
+ }
csdev->enable = false;
}
-static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
+int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
{
int ret;
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
- ret = coresight_control_assoc_ectdev(csdev, true);
+ ret = source_ops(csdev)->enable(csdev, data, mode);
if (ret)
return ret;
- ret = source_ops(csdev)->enable(csdev, NULL, mode);
- if (ret) {
- coresight_control_assoc_ectdev(csdev, false);
- return ret;
- }
}
csdev->enable = true;
}
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(coresight_enable_source);
+
+static bool coresight_is_helper(struct coresight_device *csdev)
+{
+ return csdev->type == CORESIGHT_DEV_TYPE_HELPER;
+}
+
+static int coresight_enable_helper(struct coresight_device *csdev,
+ enum cs_mode mode, void *data)
+{
+ int ret;
+
+ if (!helper_ops(csdev)->enable)
+ return 0;
+ ret = helper_ops(csdev)->enable(csdev, mode, data);
+ if (ret)
+ return ret;
+ csdev->enable = true;
return 0;
}
+static void coresight_disable_helper(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (!helper_ops(csdev)->disable)
+ return;
+
+ ret = helper_ops(csdev)->disable(csdev, NULL);
+ if (ret)
+ return;
+ csdev->enable = false;
+}
+
+static void coresight_disable_helpers(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *helper;
+
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
+ helper = csdev->pdata->out_conns[i]->dest_dev;
+ if (helper && coresight_is_helper(helper))
+ coresight_disable_helper(helper);
+ }
+}
+
/**
* coresight_disable_source - Drop the reference count by 1 and disable
* the device if there are no users left.
*
* @csdev: The coresight device to disable
+ * @data: Opaque data to pass on to the disable function of the source device.
+ * For example in perf mode this is a pointer to the struct perf_event.
*
* Returns true if the device has been disabled.
*/
-static bool coresight_disable_source(struct coresight_device *csdev)
+bool coresight_disable_source(struct coresight_device *csdev, void *data)
{
- if (atomic_dec_return(csdev->refcnt) == 0) {
+ if (atomic_dec_return(&csdev->refcnt) == 0) {
if (source_ops(csdev)->disable)
- source_ops(csdev)->disable(csdev, NULL);
- coresight_control_assoc_ectdev(csdev, false);
+ source_ops(csdev)->disable(csdev, data);
+ coresight_disable_helpers(csdev);
csdev->enable = false;
}
return !csdev->enable;
}
+EXPORT_SYMBOL_GPL(coresight_disable_source);
/*
* coresight_disable_path_from : Disable components in the given path beyond
@@ -507,6 +512,9 @@ static void coresight_disable_path_from(struct list_head *path,
default:
break;
}
+
+ /* Disable all helpers adjacent along the path last */
+ coresight_disable_helpers(csdev);
}
}
@@ -516,9 +524,28 @@ void coresight_disable_path(struct list_head *path)
}
EXPORT_SYMBOL_GPL(coresight_disable_path);
-int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
+static int coresight_enable_helpers(struct coresight_device *csdev,
+ enum cs_mode mode, void *data)
{
+ int i, ret = 0;
+ struct coresight_device *helper;
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
+ helper = csdev->pdata->out_conns[i]->dest_dev;
+ if (!helper || !coresight_is_helper(helper))
+ continue;
+
+ ret = coresight_enable_helper(helper, mode, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int coresight_enable_path(struct list_head *path, enum cs_mode mode,
+ void *sink_data)
+{
int ret = 0;
u32 type;
struct coresight_node *nd;
@@ -528,6 +555,10 @@ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
csdev = nd->csdev;
type = csdev->type;
+ /* Enable all helpers adjacent to the path first */
+ ret = coresight_enable_helpers(csdev, mode, sink_data);
+ if (ret)
+ goto err;
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
@@ -602,10 +633,10 @@ coresight_find_enabled_sink(struct coresight_device *csdev)
/*
* Recursively explore each port found on this element.
*/
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
struct coresight_device *child_dev;
- child_dev = csdev->pdata->conns[i].child_dev;
+ child_dev = csdev->pdata->out_conns[i]->dest_dev;
if (child_dev)
sink = coresight_find_enabled_sink(child_dev);
if (sink)
@@ -718,11 +749,11 @@ static int coresight_grab_device(struct coresight_device *csdev)
{
int i;
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
struct coresight_device *child;
- child = csdev->pdata->conns[i].child_dev;
- if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ child = csdev->pdata->out_conns[i]->dest_dev;
+ if (child && coresight_is_helper(child))
if (!coresight_get_ref(child))
goto err;
}
@@ -732,8 +763,8 @@ err:
for (i--; i >= 0; i--) {
struct coresight_device *child;
- child = csdev->pdata->conns[i].child_dev;
- if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ child = csdev->pdata->out_conns[i]->dest_dev;
+ if (child && coresight_is_helper(child))
coresight_put_ref(child);
}
return -ENODEV;
@@ -748,11 +779,11 @@ static void coresight_drop_device(struct coresight_device *csdev)
int i;
coresight_put_ref(csdev);
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
struct coresight_device *child;
- child = csdev->pdata->conns[i].child_dev;
- if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ child = csdev->pdata->out_conns[i]->dest_dev;
+ if (child && coresight_is_helper(child))
coresight_put_ref(child);
}
}
@@ -790,10 +821,10 @@ static int _coresight_build_path(struct coresight_device *csdev,
}
/* Not a sink - recursively explore each port found on this element */
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
struct coresight_device *child_dev;
- child_dev = csdev->pdata->conns[i].child_dev;
+ child_dev = csdev->pdata->out_conns[i]->dest_dev;
if (child_dev &&
_coresight_build_path(child_dev, sink, path) == 0) {
found = true;
@@ -959,11 +990,11 @@ coresight_find_sink(struct coresight_device *csdev, int *depth)
* Not a sink we want - or possible child sink may be better.
* recursively explore each port found on this element.
*/
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
struct coresight_device *child_dev, *sink = NULL;
int child_depth = curr_depth;
- child_dev = csdev->pdata->conns[i].child_dev;
+ child_dev = csdev->pdata->out_conns[i]->dest_dev;
if (child_dev)
sink = coresight_find_sink(child_dev, &child_depth);
@@ -1093,7 +1124,7 @@ int coresight_enable(struct coresight_device *csdev)
* source is already enabled.
*/
if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
goto out;
}
@@ -1114,7 +1145,7 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto err_path;
- ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+ ret = coresight_enable_source(csdev, CS_MODE_SYSFS, NULL);
if (ret)
goto err_source;
@@ -1171,7 +1202,7 @@ void coresight_disable(struct coresight_device *csdev)
if (ret)
goto out;
- if (!csdev->enable || !coresight_disable_source(csdev))
+ if (!csdev->enable || !coresight_disable_source(csdev, NULL))
goto out;
switch (csdev->subtype.source_subtype) {
@@ -1296,18 +1327,16 @@ static struct device_type coresight_dev_type[] = {
},
{
.name = "helper",
- },
- {
- .name = "ect",
- },
+ }
};
+/* Ensure the enum matches the names and groups */
+static_assert(ARRAY_SIZE(coresight_dev_type) == CORESIGHT_DEV_TYPE_MAX);
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
fwnode_handle_put(csdev->dev.fwnode);
- kfree(csdev->refcnt);
kfree(csdev);
}
@@ -1315,45 +1344,57 @@ static int coresight_orphan_match(struct device *dev, void *data)
{
int i, ret = 0;
bool still_orphan = false;
- struct coresight_device *csdev, *i_csdev;
+ struct coresight_device *dst_csdev = data;
+ struct coresight_device *src_csdev = to_coresight_device(dev);
struct coresight_connection *conn;
-
- csdev = data;
- i_csdev = to_coresight_device(dev);
-
- /* No need to check oneself */
- if (csdev == i_csdev)
- return 0;
+ bool fixup_self = (src_csdev == dst_csdev);
/* Move on to another component if no connection is orphan */
- if (!i_csdev->orphan)
+ if (!src_csdev->orphan)
return 0;
/*
- * Circle throuch all the connection of that component. If we find
- * an orphan connection whose name matches @csdev, link it.
+ * Circle through all the connections of that component. If we find
+ * an orphan connection whose name matches @dst_csdev, link it.
*/
- for (i = 0; i < i_csdev->pdata->nr_outport; i++) {
- conn = &i_csdev->pdata->conns[i];
+ for (i = 0; i < src_csdev->pdata->nr_outconns; i++) {
+ conn = src_csdev->pdata->out_conns[i];
- /* Skip the port if FW doesn't describe it */
- if (!conn->child_fwnode)
+ /* Skip the port if it's already connected. */
+ if (conn->dest_dev)
continue;
- /* We have found at least one orphan connection */
- if (conn->child_dev == NULL) {
- /* Does it match this newly added device? */
- if (conn->child_fwnode == csdev->dev.fwnode) {
- ret = coresight_make_links(i_csdev,
- conn, csdev);
- if (ret)
- return ret;
- } else {
- /* This component still has an orphan */
- still_orphan = true;
- }
+
+ /*
+ * If we are at the "new" device, which triggered this search,
+ * we must find the remote device from the fwnode in the
+ * connection.
+ */
+ if (fixup_self)
+ dst_csdev = coresight_find_csdev_by_fwnode(
+ conn->dest_fwnode);
+
+ /* Does it match this newly added device? */
+ if (dst_csdev && conn->dest_fwnode == dst_csdev->dev.fwnode) {
+ ret = coresight_make_links(src_csdev, conn, dst_csdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Install the device connection. This also indicates that
+ * the links are operational on both ends.
+ */
+ conn->dest_dev = dst_csdev;
+ conn->src_dev = src_csdev;
+
+ ret = coresight_add_in_conn(conn);
+ if (ret)
+ return ret;
+ } else {
+ /* This component still has an orphan */
+ still_orphan = true;
}
}
- i_csdev->orphan = still_orphan;
+ src_csdev->orphan = still_orphan;
/*
* Returning '0' in case we didn't encounter any error,
@@ -1368,91 +1409,43 @@ static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
csdev, coresight_orphan_match);
}
-
-static int coresight_fixup_device_conns(struct coresight_device *csdev)
-{
- int i, ret = 0;
-
- for (i = 0; i < csdev->pdata->nr_outport; i++) {
- struct coresight_connection *conn = &csdev->pdata->conns[i];
-
- if (!conn->child_fwnode)
- continue;
- conn->child_dev =
- coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (conn->child_dev && conn->child_dev->has_conns_grp) {
- ret = coresight_make_links(csdev, conn,
- conn->child_dev);
- if (ret)
- break;
- } else {
- csdev->orphan = true;
- }
- }
-
- return ret;
-}
-
-static int coresight_remove_match(struct device *dev, void *data)
+/* coresight_remove_conns - Remove other device's references to this device */
+static void coresight_remove_conns(struct coresight_device *csdev)
{
- int i;
- struct coresight_device *csdev, *iterator;
+ int i, j;
struct coresight_connection *conn;
- csdev = data;
- iterator = to_coresight_device(dev);
-
- /* No need to check oneself */
- if (csdev == iterator)
- return 0;
-
/*
- * Circle throuch all the connection of that component. If we find
- * a connection whose name matches @csdev, remove it.
+ * Remove the input connection references from the destination device
+ * for each output connection.
*/
- for (i = 0; i < iterator->pdata->nr_outport; i++) {
- conn = &iterator->pdata->conns[i];
-
- if (conn->child_dev == NULL || conn->child_fwnode == NULL)
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ conn = csdev->pdata->out_conns[i];
+ if (!conn->dest_dev)
continue;
- if (csdev->dev.fwnode == conn->child_fwnode) {
- iterator->orphan = true;
- coresight_remove_links(iterator, conn);
- /*
- * Drop the reference to the handle for the remote
- * device acquired in parsing the connections from
- * platform data.
- */
- fwnode_handle_put(conn->child_fwnode);
- conn->child_fwnode = NULL;
- /* No need to continue */
- break;
- }
+ for (j = 0; j < conn->dest_dev->pdata->nr_inconns; ++j)
+ if (conn->dest_dev->pdata->in_conns[j] == conn) {
+ conn->dest_dev->pdata->in_conns[j] = NULL;
+ break;
+ }
}
/*
- * Returning '0' ensures that all known component on the
- * bus will be checked.
+ * For all input connections, remove references to this device.
+ * Connection objects are shared so modifying this device's input
+ * connections affects the other device's output connection.
*/
- return 0;
-}
+ for (i = 0; i < csdev->pdata->nr_inconns; ++i) {
+ conn = csdev->pdata->in_conns[i];
+ /* Input conns array is sparse */
+ if (!conn)
+ continue;
-/*
- * coresight_remove_conns - Remove references to this given devices
- * from the connections of other devices.
- */
-static void coresight_remove_conns(struct coresight_device *csdev)
-{
- /*
- * Another device will point to this device only if there is
- * an output port connected to this one. i.e, if the device
- * doesn't have at least one input port, there is no point
- * in searching all the devices.
- */
- if (csdev->pdata->nr_inport)
- bus_for_each_dev(&coresight_bustype, NULL,
- csdev, coresight_remove_match);
+ conn->src_dev->orphan = true;
+ coresight_remove_links(conn->src_dev, conn);
+ conn->dest_dev = NULL;
+ }
}
/**
@@ -1544,24 +1537,27 @@ void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
* to the output port of this device.
*/
void coresight_release_platform_data(struct coresight_device *csdev,
+ struct device *dev,
struct coresight_platform_data *pdata)
{
int i;
- struct coresight_connection *conns = pdata->conns;
+ struct coresight_connection **conns = pdata->out_conns;
- for (i = 0; i < pdata->nr_outport; i++) {
+ for (i = 0; i < pdata->nr_outconns; i++) {
/* If we have made the links, remove them now */
- if (csdev && conns[i].child_dev)
- coresight_remove_links(csdev, &conns[i]);
+ if (csdev && conns[i]->dest_dev)
+ coresight_remove_links(csdev, conns[i]);
/*
* Drop the refcount and clear the handle as this device
* is going away
*/
- if (conns[i].child_fwnode) {
- fwnode_handle_put(conns[i].child_fwnode);
- pdata->conns[i].child_fwnode = NULL;
- }
+ fwnode_handle_put(conns[i]->dest_fwnode);
+ conns[i]->dest_fwnode = NULL;
+ devm_kfree(dev, conns[i]);
}
+ devm_kfree(dev, pdata->out_conns);
+ devm_kfree(dev, pdata->in_conns);
+ devm_kfree(dev, pdata);
if (csdev)
coresight_remove_conns_sysfs_group(csdev);
}
@@ -1569,9 +1565,6 @@ void coresight_release_platform_data(struct coresight_device *csdev,
struct coresight_device *coresight_register(struct coresight_desc *desc)
{
int ret;
- int link_subtype;
- int nr_refcnts = 1;
- atomic_t *refcnts = NULL;
struct coresight_device *csdev;
bool registered = false;
@@ -1581,32 +1574,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto err_out;
}
- if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
- desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
- link_subtype = desc->subtype.link_subtype;
-
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
- nr_refcnts = desc->pdata->nr_inport;
- else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
- nr_refcnts = desc->pdata->nr_outport;
- }
-
- refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
- if (!refcnts) {
- ret = -ENOMEM;
- kfree(csdev);
- goto err_out;
- }
-
- csdev->refcnt = refcnts;
-
csdev->pdata = desc->pdata;
csdev->type = desc->type;
csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
csdev->access = desc->access;
- csdev->orphan = false;
+ csdev->orphan = true;
csdev->dev.type = &coresight_dev_type[desc->type];
csdev->dev.groups = desc->groups;
@@ -1657,8 +1631,6 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
ret = coresight_create_conns_sysfs_group(csdev);
if (!ret)
- ret = coresight_fixup_device_conns(csdev);
- if (!ret)
ret = coresight_fixup_orphan_conns(csdev);
out_unlock:
@@ -1678,7 +1650,7 @@ out_unlock:
err_out:
/* Cleanup the connection information */
- coresight_release_platform_data(NULL, desc->pdata);
+ coresight_release_platform_data(NULL, desc->dev, desc->pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
@@ -1691,7 +1663,7 @@ void coresight_unregister(struct coresight_device *csdev)
cti_assoc_ops->remove(csdev);
coresight_remove_conns(csdev);
coresight_clear_default_sink(csdev);
- coresight_release_platform_data(csdev, csdev->pdata);
+ coresight_release_platform_data(csdev, csdev->dev.parent, csdev->pdata);
device_unregister(&csdev->dev);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
@@ -1714,6 +1686,69 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
return -ENOENT;
}
+static bool coresight_compare_type(enum coresight_dev_type type_a,
+ union coresight_dev_subtype subtype_a,
+ enum coresight_dev_type type_b,
+ union coresight_dev_subtype subtype_b)
+{
+ if (type_a != type_b)
+ return false;
+
+ switch (type_a) {
+ case CORESIGHT_DEV_TYPE_SINK:
+ return subtype_a.sink_subtype == subtype_b.sink_subtype;
+ case CORESIGHT_DEV_TYPE_LINK:
+ return subtype_a.link_subtype == subtype_b.link_subtype;
+ case CORESIGHT_DEV_TYPE_LINKSINK:
+ return subtype_a.link_subtype == subtype_b.link_subtype &&
+ subtype_a.sink_subtype == subtype_b.sink_subtype;
+ case CORESIGHT_DEV_TYPE_SOURCE:
+ return subtype_a.source_subtype == subtype_b.source_subtype;
+ case CORESIGHT_DEV_TYPE_HELPER:
+ return subtype_a.helper_subtype == subtype_b.helper_subtype;
+ default:
+ return false;
+ }
+}
+
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype)
+{
+ int i;
+ struct coresight_connection *conn;
+
+ for (i = 0; i < pdata->nr_inconns; ++i) {
+ conn = pdata->in_conns[i];
+ if (conn &&
+ coresight_compare_type(type, subtype, conn->src_dev->type,
+ conn->src_dev->subtype))
+ return conn->src_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(coresight_find_input_type);
+
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype)
+{
+ int i;
+ struct coresight_connection *conn;
+
+ for (i = 0; i < pdata->nr_outconns; ++i) {
+ conn = pdata->out_conns[i];
+ if (conn->dest_dev &&
+ coresight_compare_type(type, subtype, conn->dest_dev->type,
+ conn->dest_dev->subtype))
+ return conn->dest_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(coresight_find_output_type);
+
bool coresight_loses_context_with_cpu(struct device *dev)
{
return fwnode_property_present(dev_fwnode(dev),
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 277c890a1f1f..7023ff70cc28 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -555,7 +555,10 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
mutex_lock(&ect_mutex);
/* exit if current is an ECT device.*/
- if ((csdev->type == CORESIGHT_DEV_TYPE_ECT) || list_empty(&ect_net))
+ if ((csdev->type == CORESIGHT_DEV_TYPE_HELPER &&
+ csdev->subtype.helper_subtype ==
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI) ||
+ list_empty(&ect_net))
goto cti_add_done;
/* if we didn't find the csdev previously we used the fwnode name */
@@ -571,8 +574,7 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
* if we found a matching csdev then update the ECT
* association pointer for the device with this CTI.
*/
- coresight_set_assoc_ectdev_mutex(csdev,
- ect_item->csdev);
+ coresight_add_helper(csdev, ect_item->csdev);
break;
}
}
@@ -582,26 +584,30 @@ cti_add_done:
/*
* Removing the associated devices is easier.
- * A CTI will not have a value for csdev->ect_dev.
*/
static void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
{
struct cti_drvdata *ctidrv;
struct cti_trig_con *tc;
struct cti_device *ctidev;
+ union coresight_dev_subtype cti_subtype = {
+ .helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
+ };
+ struct coresight_device *cti_csdev = coresight_find_output_type(
+ csdev->pdata, CORESIGHT_DEV_TYPE_HELPER, cti_subtype);
+
+ if (!cti_csdev)
+ return;
mutex_lock(&ect_mutex);
- if (csdev->ect_dev) {
- ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
- ctidev = &ctidrv->ctidev;
- list_for_each_entry(tc, &ctidev->trig_cons, node) {
- if (tc->con_dev == csdev) {
- cti_remove_sysfs_link(ctidrv, tc);
- tc->con_dev = NULL;
- break;
- }
+ ctidrv = csdev_to_cti_drvdata(cti_csdev);
+ ctidev = &ctidrv->ctidev;
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev == csdev) {
+ cti_remove_sysfs_link(ctidrv, tc);
+ tc->con_dev = NULL;
+ break;
}
- csdev->ect_dev = NULL;
}
mutex_unlock(&ect_mutex);
}
@@ -630,8 +636,8 @@ static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
/* if we can set the sysfs link */
if (cti_add_sysfs_link(drvdata, tc))
/* set the CTI/csdev association */
- coresight_set_assoc_ectdev_mutex(tc->con_dev,
- drvdata->csdev);
+ coresight_add_helper(tc->con_dev,
+ drvdata->csdev);
else
/* otherwise remove reference from CTI */
tc->con_dev = NULL;
@@ -646,8 +652,6 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev) {
- coresight_set_assoc_ectdev_mutex(tc->con_dev,
- NULL);
cti_remove_sysfs_link(drvdata, tc);
tc->con_dev = NULL;
}
@@ -795,27 +799,27 @@ static void cti_pm_release(struct cti_drvdata *drvdata)
}
/** cti ect operations **/
-int cti_enable(struct coresight_device *csdev)
+int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
return cti_enable_hw(drvdata);
}
-int cti_disable(struct coresight_device *csdev)
+int cti_disable(struct coresight_device *csdev, void *data)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
return cti_disable_hw(drvdata);
}
-static const struct coresight_ops_ect cti_ops_ect = {
+static const struct coresight_ops_helper cti_ops_ect = {
.enable = cti_enable,
.disable = cti_disable,
};
static const struct coresight_ops cti_ops = {
- .ect_ops = &cti_ops_ect,
+ .helper_ops = &cti_ops_ect,
};
/*
@@ -922,8 +926,8 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
/* set up coresight component description */
cti_desc.pdata = pdata;
- cti_desc.type = CORESIGHT_DEV_TYPE_ECT;
- cti_desc.subtype.ect_subtype = CORESIGHT_DEV_SUBTYPE_ECT_CTI;
+ cti_desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ cti_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI;
cti_desc.ops = &cti_ops;
cti_desc.groups = drvdata->ctidev.con_groups;
cti_desc.dev = dev;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index e528cff9d4e2..d25dd2737b49 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -112,11 +112,11 @@ static ssize_t enable_store(struct device *dev,
ret = pm_runtime_resume_and_get(dev->parent);
if (ret)
return ret;
- ret = cti_enable(drvdata->csdev);
+ ret = cti_enable(drvdata->csdev, CS_MODE_SYSFS, NULL);
if (ret)
pm_runtime_put(dev->parent);
} else {
- ret = cti_disable(drvdata->csdev);
+ ret = cti_disable(drvdata->csdev, NULL);
if (!ret)
pm_runtime_put(dev->parent);
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 8b106b13a244..cb9ee616d01f 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -215,8 +215,8 @@ int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
const char *assoc_dev_name);
struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
int out_sigs);
-int cti_enable(struct coresight_device *csdev);
-int cti_disable(struct coresight_device *csdev);
+int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data);
+int cti_disable(struct coresight_device *csdev, void *data);
void cti_write_all_hw_regs(struct cti_drvdata *drvdata);
void cti_write_intack(struct device *dev, u32 ackval);
void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value);
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
new file mode 100644
index 000000000000..8035120b70b3
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/coresight.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "coresight-priv.h"
+
+struct dummy_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+};
+
+DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
+DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
+
+static int dummy_source_enable(struct coresight_device *csdev,
+ struct perf_event *event, enum cs_mode mode)
+{
+ dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
+
+ return 0;
+}
+
+static void dummy_source_disable(struct coresight_device *csdev,
+ struct perf_event *event)
+{
+ dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
+}
+
+static int dummy_sink_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
+{
+ dev_dbg(csdev->dev.parent, "Dummy sink enabled\n");
+
+ return 0;
+}
+
+static int dummy_sink_disable(struct coresight_device *csdev)
+{
+ dev_dbg(csdev->dev.parent, "Dummy sink disabled\n");
+
+ return 0;
+}
+
+static const struct coresight_ops_source dummy_source_ops = {
+ .enable = dummy_source_enable,
+ .disable = dummy_source_disable,
+};
+
+static const struct coresight_ops dummy_source_cs_ops = {
+ .source_ops = &dummy_source_ops,
+};
+
+static const struct coresight_ops_sink dummy_sink_ops = {
+ .enable = dummy_sink_enable,
+ .disable = dummy_sink_disable,
+};
+
+static const struct coresight_ops dummy_sink_cs_ops = {
+ .sink_ops = &dummy_sink_ops,
+};
+
+static int dummy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct coresight_platform_data *pdata;
+ struct dummy_drvdata *drvdata;
+ struct coresight_desc desc = { 0 };
+
+ if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
+
+ desc.name = coresight_alloc_device_name(&source_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ desc.type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc.subtype.source_subtype =
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
+ desc.ops = &dummy_source_cs_ops;
+ } else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
+ desc.name = coresight_alloc_device_name(&sink_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_DUMMY;
+ desc.ops = &dummy_sink_cs_ops;
+ } else {
+ dev_err(dev, "Device type not set\n");
+ return -EINVAL;
+ }
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc.pdata = pdev->dev.platform_data;
+ desc.dev = &pdev->dev;
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ pm_runtime_enable(dev);
+ dev_dbg(dev, "Dummy device initialized\n");
+
+ return 0;
+}
+
+static int dummy_remove(struct platform_device *pdev)
+{
+ struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id dummy_match[] = {
+ {.compatible = "arm,coresight-dummy-source"},
+ {.compatible = "arm,coresight-dummy-sink"},
+ {},
+};
+
+static struct platform_driver dummy_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .driver = {
+ .name = "coresight-dummy",
+ .of_match_table = dummy_match,
+ },
+};
+
+static int __init dummy_init(void)
+{
+ return platform_driver_register(&dummy_driver);
+}
+module_init(dummy_init);
+
+static void __exit dummy_exit(void)
+{
+ platform_driver_unregister(&dummy_driver);
+}
+module_exit(dummy_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CoreSight dummy driver");
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 8aa6e4f83e42..fa80039e0821 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
drvdata->mode = CS_MODE_SYSFS;
}
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
@@ -199,7 +199,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
goto out;
}
@@ -217,7 +217,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
/* Associate with monitored process. */
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
}
out:
@@ -225,7 +225,8 @@ out:
return ret;
}
-static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
+static int etb_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
{
int ret;
@@ -355,7 +356,7 @@ static int etb_disable(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(csdev->refcnt)) {
+ if (atomic_dec_return(&csdev->refcnt)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -446,7 +447,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(csdev->refcnt) != 1)
+ if (atomic_read(&csdev->refcnt) != 1)
goto out;
__etb_disable_hw(drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 89e8ed214ea4..5ca6278baff4 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -493,7 +493,7 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
- if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
+ if (coresight_enable_source(csdev, CS_MODE_PERF, event))
goto fail_disable_path;
/*
@@ -587,7 +587,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- source_ops(csdev)->disable(csdev, event);
+ coresight_disable_source(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index afc57195ee52..116a91d90ac2 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -552,8 +552,8 @@ unlock_enable_sysfs:
return ret;
}
-static int etm_enable(struct coresight_device *csdev,
- struct perf_event *event, u32 mode)
+static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode)
{
int ret;
u32 val;
@@ -671,7 +671,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
static void etm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
- u32 mode;
+ enum cs_mode mode;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 4c15fae534f3..7e307022303a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -822,8 +822,8 @@ unlock_sysfs_enable:
return ret;
}
-static int etm4_enable(struct coresight_device *csdev,
- struct perf_event *event, u32 mode)
+static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode)
{
int ret;
u32 val;
@@ -989,7 +989,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
static void etm4_disable(struct coresight_device *csdev,
struct perf_event *event)
{
- u32 mode;
+ enum cs_mode mode;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
@@ -2190,7 +2190,7 @@ static void clear_etmdrvdata(void *info)
per_cpu(delayed_probe, cpu) = NULL;
}
-static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
bool had_delayed_probe;
/*
@@ -2217,8 +2217,6 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
cscfg_unregister_csdev(drvdata->csdev);
coresight_unregister(drvdata->csdev);
}
-
- return 0;
}
static void __exit etm4_remove_amba(struct amba_device *adev)
@@ -2231,13 +2229,12 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
{
- int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
if (drvdata)
- ret = etm4_remove_dev(drvdata);
+ etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
static const struct amba_id etm4_ids[] = {
@@ -2260,6 +2257,11 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
+ /*
+ * Match all PIDs with ETM4 DEVARCH. No need for adding any of the new
+ * CPUs to the list here.
+ */
+ CS_AMBA_MATCH_ALL_UCI(uci_id_etm4),
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 5e62aa40ecd0..a9f19629f3f8 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2411,7 +2411,6 @@ static ssize_t trctraceid_show(struct device *dev,
return sysfs_emit(buf, "0x%x\n", trace_id);
}
-static DEVICE_ATTR_RO(trctraceid);
struct etmv4_reg {
struct coresight_device *csdev;
@@ -2528,13 +2527,23 @@ coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
return 0;
}
-#define coresight_etm4x_reg(name, offset) \
- &((struct dev_ext_attribute[]) { \
- { \
- __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
- (void *)(unsigned long)offset \
- } \
- })[0].attr.attr
+/*
+ * Macro to set an RO ext attribute with offset and show function.
+ * Offset is used in mgmt group to ensure only correct registers for
+ * the ETM / ETE variant are visible.
+ */
+#define coresight_etm4x_reg_showfn(name, offset, showfn) ( \
+ &((struct dev_ext_attribute[]) { \
+ { \
+ __ATTR(name, 0444, showfn, NULL), \
+ (void *)(unsigned long)offset \
+ } \
+ })[0].attr.attr \
+ )
+
+/* macro using the default coresight_etm4x_reg_show function */
+#define coresight_etm4x_reg(name, offset) \
+ coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
coresight_etm4x_reg(trcpdcr, TRCPDCR),
@@ -2549,7 +2558,7 @@ static struct attribute *coresight_etmv4_mgmt_attrs[] = {
coresight_etm4x_reg(trcpidr3, TRCPIDR3),
coresight_etm4x_reg(trcoslsr, TRCOSLSR),
coresight_etm4x_reg(trcconfig, TRCCONFIGR),
- &dev_attr_trctraceid.attr,
+ coresight_etm4x_reg_showfn(trctraceid, TRCTRACEIDR, trctraceid_show),
coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b363dd6bc510..b8e150e45b27 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -74,8 +74,9 @@ done:
return rc;
}
-static int funnel_enable(struct coresight_device *csdev, int inport,
- int outport)
+static int funnel_enable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -83,18 +84,19 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&csdev->refcnt[inport]) == 0) {
+ if (atomic_read(&in->dest_refcnt) == 0) {
if (drvdata->base)
- rc = dynamic_funnel_enable_hw(drvdata, inport);
+ rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
if (!rc)
first_enable = true;
}
if (!rc)
- atomic_inc(&csdev->refcnt[inport]);
+ atomic_inc(&in->dest_refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
- dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
+ dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n",
+ in->dest_port);
return rc;
}
@@ -117,23 +119,25 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
CS_LOCK(drvdata->base);
}
-static void funnel_disable(struct coresight_device *csdev, int inport,
- int outport)
+static void funnel_disable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&csdev->refcnt[inport]) == 0) {
+ if (atomic_dec_return(&in->dest_refcnt) == 0) {
if (drvdata->base)
- dynamic_funnel_disable_hw(drvdata, inport);
+ dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
- dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n",
+ in->dest_port);
}
static const struct coresight_ops_link funnel_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 475899714104..3e2e135cb8f6 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -19,22 +19,85 @@
#include <asm/smp_plat.h>
#include "coresight-priv.h"
+
/*
- * coresight_alloc_conns: Allocate connections record for each output
- * port from the device.
+ * Add an entry to the connection list and assign @conn's contents to it.
+ *
+ * If the output port is already assigned on this device, return -EINVAL
*/
-static int coresight_alloc_conns(struct device *dev,
- struct coresight_platform_data *pdata)
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn)
{
- if (pdata->nr_outport) {
- pdata->conns = devm_kcalloc(dev, pdata->nr_outport,
- sizeof(*pdata->conns), GFP_KERNEL);
- if (!pdata->conns)
- return -ENOMEM;
+ int i;
+ struct coresight_connection *conn;
+
+ /*
+ * Warn on any existing duplicate output port.
+ */
+ for (i = 0; i < pdata->nr_outconns; ++i) {
+ conn = pdata->out_conns[i];
+ /* Output == -1 means ignore the port for example for helpers */
+ if (conn->src_port != -1 &&
+ conn->src_port == new_conn->src_port) {
+ dev_warn(dev, "Duplicate output port %d\n",
+ conn->src_port);
+ return ERR_PTR(-EINVAL);
+ }
}
+ pdata->nr_outconns++;
+ pdata->out_conns =
+ devm_krealloc_array(dev, pdata->out_conns, pdata->nr_outconns,
+ sizeof(*pdata->out_conns), GFP_KERNEL);
+ if (!pdata->out_conns)
+ return ERR_PTR(-ENOMEM);
+
+ conn = devm_kmalloc(dev, sizeof(struct coresight_connection),
+ GFP_KERNEL);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Copy the new connection into the allocation, save the pointer to the
+ * end of the connection array and also return it in case it needs to be
+ * used right away.
+ */
+ *conn = *new_conn;
+ pdata->out_conns[pdata->nr_outconns - 1] = conn;
+ return conn;
+}
+EXPORT_SYMBOL_GPL(coresight_add_out_conn);
+
+/*
+ * Add an input connection reference to @out_conn in the target's in_conns array
+ *
+ * @out_conn: Existing output connection to store as an input on the
+ * connection's remote device.
+ */
+int coresight_add_in_conn(struct coresight_connection *out_conn)
+{
+ int i;
+ struct device *dev = out_conn->dest_dev->dev.parent;
+ struct coresight_platform_data *pdata = out_conn->dest_dev->pdata;
+
+ for (i = 0; i < pdata->nr_inconns; ++i)
+ if (!pdata->in_conns[i]) {
+ pdata->in_conns[i] = out_conn;
+ return 0;
+ }
+
+ pdata->nr_inconns++;
+ pdata->in_conns =
+ devm_krealloc_array(dev, pdata->in_conns, pdata->nr_inconns,
+ sizeof(*pdata->in_conns), GFP_KERNEL);
+ if (!pdata->in_conns)
+ return -ENOMEM;
+ pdata->in_conns[pdata->nr_inconns - 1] = out_conn;
return 0;
}
+EXPORT_SYMBOL_GPL(coresight_add_in_conn);
static struct device *
coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
@@ -83,41 +146,6 @@ static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
return of_property_read_bool(ep, "slave-mode");
}
-static void of_coresight_get_ports_legacy(const struct device_node *node,
- int *nr_inport, int *nr_outport)
-{
- struct device_node *ep = NULL;
- struct of_endpoint endpoint;
- int in = 0, out = 0;
-
- /*
- * Avoid warnings in of_graph_get_next_endpoint()
- * if the device doesn't have any graph connections
- */
- if (!of_graph_is_present(node))
- return;
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
- if (of_graph_parse_endpoint(ep, &endpoint))
- continue;
-
- if (of_coresight_legacy_ep_is_input(ep)) {
- in = (endpoint.port + 1 > in) ?
- endpoint.port + 1 : in;
- } else {
- out = (endpoint.port + 1) > out ?
- endpoint.port + 1 : out;
- }
-
- } while (ep);
-
- *nr_inport = in;
- *nr_outport = out;
-}
-
static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
{
struct device_node *parent = of_graph_get_port_parent(ep);
@@ -134,58 +162,11 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
}
static inline struct device_node *
-of_coresight_get_input_ports_node(const struct device_node *node)
-{
- return of_get_child_by_name(node, "in-ports");
-}
-
-static inline struct device_node *
of_coresight_get_output_ports_node(const struct device_node *node)
{
return of_get_child_by_name(node, "out-ports");
}
-static inline int
-of_coresight_count_ports(struct device_node *port_parent)
-{
- int i = 0;
- struct device_node *ep = NULL;
- struct of_endpoint endpoint;
-
- while ((ep = of_graph_get_next_endpoint(port_parent, ep))) {
- /* Defer error handling to parsing */
- if (of_graph_parse_endpoint(ep, &endpoint))
- continue;
- if (endpoint.port + 1 > i)
- i = endpoint.port + 1;
- }
-
- return i;
-}
-
-static void of_coresight_get_ports(const struct device_node *node,
- int *nr_inport, int *nr_outport)
-{
- struct device_node *input_ports = NULL, *output_ports = NULL;
-
- input_ports = of_coresight_get_input_ports_node(node);
- output_ports = of_coresight_get_output_ports_node(node);
-
- if (input_ports || output_ports) {
- if (input_ports) {
- *nr_inport = of_coresight_count_ports(input_ports);
- of_node_put(input_ports);
- }
- if (output_ports) {
- *nr_outport = of_coresight_count_ports(output_ports);
- of_node_put(output_ports);
- }
- } else {
- /* Fall back to legacy DT bindings parsing */
- of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
- }
-}
-
static int of_coresight_get_cpu(struct device *dev)
{
int cpu;
@@ -206,7 +187,7 @@ static int of_coresight_get_cpu(struct device *dev)
/*
* of_coresight_parse_endpoint : Parse the given output endpoint @ep
- * and fill the connection information in @conn
+ * and fill the connection information in @pdata->out_conns
*
* Parses the local port, remote device name and the remote port.
*
@@ -224,7 +205,8 @@ static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *rep = NULL;
struct device *rdev = NULL;
struct fwnode_handle *rdev_fwnode;
- struct coresight_connection *conn;
+ struct coresight_connection conn = {};
+ struct coresight_connection *new_conn;
do {
/* Parse the local port details */
@@ -251,14 +233,7 @@ static int of_coresight_parse_endpoint(struct device *dev,
break;
}
- conn = &pdata->conns[endpoint.port];
- if (conn->child_fwnode) {
- dev_warn(dev, "Duplicate output port %d\n",
- endpoint.port);
- ret = -EINVAL;
- break;
- }
- conn->outport = endpoint.port;
+ conn.src_port = endpoint.port;
/*
* Hold the refcount to the target device. This could be
* released via:
@@ -267,8 +242,14 @@ static int of_coresight_parse_endpoint(struct device *dev,
* 2) While removing the target device via
* coresight_remove_match()
*/
- conn->child_fwnode = fwnode_handle_get(rdev_fwnode);
- conn->child_port = rendpoint.port;
+ conn.dest_fwnode = fwnode_handle_get(rdev_fwnode);
+ conn.dest_port = rendpoint.port;
+
+ new_conn = coresight_add_out_conn(dev, pdata, &conn);
+ if (IS_ERR_VALUE(new_conn)) {
+ fwnode_handle_put(conn.dest_fwnode);
+ return PTR_ERR(new_conn);
+ }
/* Connection record updated */
} while (0);
@@ -288,17 +269,6 @@ static int of_get_coresight_platform_data(struct device *dev,
bool legacy_binding = false;
struct device_node *node = dev->of_node;
- /* Get the number of input and output port for this component */
- of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
-
- /* If there are no output connections, we are done */
- if (!pdata->nr_outport)
- return 0;
-
- ret = coresight_alloc_conns(dev, pdata);
- if (ret)
- return ret;
-
parent = of_coresight_get_output_ports_node(node);
/*
* If the DT uses obsoleted bindings, the ports are listed
@@ -306,6 +276,12 @@ static int of_get_coresight_platform_data(struct device *dev,
* ports.
*/
if (!parent) {
+ /*
+ * Avoid warnings in of_graph_get_next_endpoint()
+ * if the device doesn't have any graph connections
+ */
+ if (!of_graph_is_present(node))
+ return 0;
legacy_binding = true;
parent = node;
dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
@@ -649,8 +625,8 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
dir = fields[3].integer.value;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- conn->outport = fields[0].integer.value;
- conn->child_port = fields[1].integer.value;
+ conn->src_port = fields[0].integer.value;
+ conn->dest_port = fields[1].integer.value;
rdev = coresight_find_device_by_fwnode(&r_adev->fwnode);
if (!rdev)
return -EPROBE_DEFER;
@@ -662,14 +638,14 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
* 2) While removing the target device via
* coresight_remove_match().
*/
- conn->child_fwnode = fwnode_handle_get(&r_adev->fwnode);
+ conn->dest_fwnode = fwnode_handle_get(&r_adev->fwnode);
} else if (dir == ACPI_CORESIGHT_LINK_SLAVE) {
/*
* We are only interested in the port number
* for the input ports at this component.
* Store the port number in child_port.
*/
- conn->child_port = fields[0].integer.value;
+ conn->dest_port = fields[0].integer.value;
} else {
/* Invalid direction */
return -EINVAL;
@@ -683,14 +659,15 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
* connection information and populate the supplied coresight_platform_data
* instance.
*/
-static int acpi_coresight_parse_graph(struct acpi_device *adev,
+static int acpi_coresight_parse_graph(struct device *dev,
+ struct acpi_device *adev,
struct coresight_platform_data *pdata)
{
- int rc, i, nlinks;
+ int i, nlinks;
const union acpi_object *graph;
- struct coresight_connection *conns, *ptr;
+ struct coresight_connection conn, zero_conn = {};
+ struct coresight_connection *new_conn;
- pdata->nr_inport = pdata->nr_outport = 0;
graph = acpi_get_coresight_graph(adev);
if (!graph)
return -ENOENT;
@@ -699,56 +676,22 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
if (!nlinks)
return 0;
- /*
- * To avoid scanning the table twice (once for finding the number of
- * output links and then later for parsing the output links),
- * cache the links information in one go and then later copy
- * it to the pdata.
- */
- conns = devm_kcalloc(&adev->dev, nlinks, sizeof(*conns), GFP_KERNEL);
- if (!conns)
- return -ENOMEM;
- ptr = conns;
for (i = 0; i < nlinks; i++) {
const union acpi_object *link = &graph->package.elements[3 + i];
int dir;
- dir = acpi_coresight_parse_link(adev, link, ptr);
+ conn = zero_conn;
+ dir = acpi_coresight_parse_link(adev, link, &conn);
if (dir < 0)
return dir;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- if (ptr->outport >= pdata->nr_outport)
- pdata->nr_outport = ptr->outport + 1;
- ptr++;
- } else {
- WARN_ON(pdata->nr_inport == ptr->child_port + 1);
- /*
- * We do not track input port connections for a device.
- * However we need the highest port number described,
- * which can be recorded now and reuse this connection
- * record for an output connection. Hence, do not move
- * the ptr for input connections
- */
- if (ptr->child_port >= pdata->nr_inport)
- pdata->nr_inport = ptr->child_port + 1;
+ new_conn = coresight_add_out_conn(dev, pdata, &conn);
+ if (IS_ERR(new_conn))
+ return PTR_ERR(new_conn);
}
}
- rc = coresight_alloc_conns(&adev->dev, pdata);
- if (rc)
- return rc;
-
- /* Copy the connection information to the final location */
- for (i = 0; conns + i < ptr; i++) {
- int port = conns[i].outport;
-
- /* Duplicate output port */
- WARN_ON(pdata->conns[port].child_fwnode);
- pdata->conns[port] = conns[i];
- }
-
- devm_kfree(&adev->dev, conns);
return 0;
}
@@ -809,7 +752,7 @@ acpi_get_coresight_platform_data(struct device *dev,
if (!adev)
return -EINVAL;
- return acpi_coresight_parse_graph(adev, pdata);
+ return acpi_coresight_parse_graph(dev, adev, pdata);
}
#else
@@ -863,7 +806,7 @@ coresight_get_platform_data(struct device *dev)
error:
if (!IS_ERR_OR_NULL(pdata))
/* Cleanup the connection information */
- coresight_release_platform_data(NULL, pdata);
+ coresight_release_platform_data(NULL, dev, pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_get_platform_data);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 595ce5862056..767076e07970 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -82,12 +82,6 @@ enum etm_addr_type {
ETM_ADDR_TYPE_STOP,
};
-enum cs_mode {
- CS_MODE_DISABLED,
- CS_MODE_SYSFS,
- CS_MODE_PERF,
-};
-
/**
* struct cs_buffer - keep track of a recording session' specifics
* @cur: index of the current buffer
@@ -133,7 +127,8 @@ static inline void CS_UNLOCK(void __iomem *addr)
}
void coresight_disable_path(struct list_head *path);
-int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
+int coresight_enable_path(struct list_head *path, enum cs_mode mode,
+ void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source);
@@ -193,12 +188,27 @@ extern void coresight_remove_cti_ops(void);
}
/* coresight AMBA ID, full UCI structure: id table entry. */
-#define CS_AMBA_UCI_ID(pid, uci_ptr) \
+#define __CS_AMBA_UCI_ID(pid, m, uci_ptr) \
{ \
.id = pid, \
- .mask = 0x000fffff, \
+ .mask = m, \
.data = (void *)uci_ptr \
}
+#define CS_AMBA_UCI_ID(pid, uci) __CS_AMBA_UCI_ID(pid, 0x000fffff, uci)
+/*
+ * PIDR2[JEDEC], BIT(3) must be 1 (Read As One) to indicate that rest of the
+ * PIDR1, PIDR2 DES_* fields follow JEDEC encoding for the designer. Use that
+ * as a match value for blanket matching all devices in the given CoreSight
+ * device type and architecture.
+ */
+#define PIDR2_JEDEC BIT(3)
+#define PID_PIDR2_JEDEC (PIDR2_JEDEC << 16)
+/*
+ * Match all PIDs in a given CoreSight device type and architecture, defined
+ * by the uci.
+ */
+#define CS_AMBA_MATCH_ALL_UCI(uci) \
+ __CS_AMBA_UCI_ID(PID_PIDR2_JEDEC, PID_PIDR2_JEDEC, uci)
/* extract the data value from a UCI structure given amba_id pointer. */
static inline void *coresight_get_uci_data(const struct amba_id *id)
@@ -212,13 +222,17 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
}
void coresight_release_platform_data(struct coresight_device *csdev,
+ struct device *dev,
struct coresight_platform_data *pdata);
struct coresight_device *
coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
-void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
- struct coresight_device *ect_csdev);
+void coresight_add_helper(struct coresight_device *csdev,
+ struct coresight_device *helper);
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
struct coresight_device *coresight_get_percpu_sink(int cpu);
+int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
+bool coresight_disable_source(struct coresight_device *csdev, void *data);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 4dd50546d7e4..b6be73034996 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -114,8 +114,9 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
return rc;
}
-static int replicator_enable(struct coresight_device *csdev, int inport,
- int outport)
+static int replicator_enable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -123,15 +124,15 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&csdev->refcnt[outport]) == 0) {
+ if (atomic_read(&out->src_refcnt) == 0) {
if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, inport,
- outport);
+ rc = dynamic_replicator_enable(drvdata, in->dest_port,
+ out->src_port);
if (!rc)
first_enable = true;
}
if (!rc)
- atomic_inc(&csdev->refcnt[outport]);
+ atomic_inc(&out->src_refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -168,17 +169,19 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
CS_LOCK(drvdata->base);
}
-static void replicator_disable(struct coresight_device *csdev, int inport,
- int outport)
+static void replicator_disable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&csdev->refcnt[outport]) == 0) {
+ if (atomic_dec_return(&out->src_refcnt) == 0) {
if (drvdata->base)
- dynamic_replicator_disable(drvdata, inport, outport);
+ dynamic_replicator_disable(drvdata, in->dest_port,
+ out->src_port);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 66a614c5492c..a1c27c901ad1 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -119,7 +119,7 @@ DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm");
* @spinlock: only one at a time pls.
* @chs: the channels accociated to this STM.
* @stm: structure associated to the generic STM interface.
- * @mode: this tracer's mode, i.e sysFS, or disabled.
+ * @mode: this tracer's mode (enum cs_mode), i.e sysFS, or disabled.
* @traceid: value of the current ID for this component.
* @write_bytes: Maximus bytes this STM can write at a time.
* @stmsper: settings for register STMSPER.
@@ -192,8 +192,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int stm_enable(struct coresight_device *csdev,
- struct perf_event *event, u32 mode)
+static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode)
{
u32 val;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index 34d2a2d31d00..dd78e9fcfc4d 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -148,13 +148,17 @@ int coresight_make_links(struct coresight_device *orig,
char *outs = NULL, *ins = NULL;
struct coresight_sysfs_link *link = NULL;
+ /* Helper devices aren't shown in sysfs */
+ if (conn->dest_port == -1 && conn->src_port == -1)
+ return 0;
+
do {
outs = devm_kasprintf(&orig->dev, GFP_KERNEL,
- "out:%d", conn->outport);
+ "out:%d", conn->src_port);
if (!outs)
break;
ins = devm_kasprintf(&target->dev, GFP_KERNEL,
- "in:%d", conn->child_port);
+ "in:%d", conn->dest_port);
if (!ins)
break;
link = devm_kzalloc(&orig->dev,
@@ -173,12 +177,6 @@ int coresight_make_links(struct coresight_device *orig,
break;
conn->link = link;
-
- /*
- * Install the device connection. This also indicates that
- * the links are operational on both ends.
- */
- conn->child_dev = target;
return 0;
} while (0);
@@ -198,9 +196,8 @@ void coresight_remove_links(struct coresight_device *orig,
coresight_remove_sysfs_link(conn->link);
- devm_kfree(&conn->child_dev->dev, conn->link->target_name);
+ devm_kfree(&conn->dest_dev->dev, conn->link->target_name);
devm_kfree(&orig->dev, conn->link->orig_name);
devm_kfree(&orig->dev, conn->link);
conn->link = NULL;
- conn->child_dev = NULL;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0ab1f73c2d06..79d8c64eac49 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -206,7 +206,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* touched.
*/
if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
goto out;
}
@@ -229,7 +229,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
} else {
/* Free up the buffer if we failed to enable */
used = false;
@@ -284,7 +284,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
break;
}
@@ -293,7 +293,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
/* Associate with monitored process. */
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
}
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -302,7 +302,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
}
static int tmc_enable_etf_sink(struct coresight_device *csdev,
- u32 mode, void *data)
+ enum cs_mode mode, void *data)
{
int ret;
@@ -338,7 +338,7 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(csdev->refcnt)) {
+ if (atomic_dec_return(&csdev->refcnt)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -357,7 +357,8 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
- int inport, int outport)
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
int ret = 0;
unsigned long flags;
@@ -370,7 +371,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- if (atomic_read(&csdev->refcnt[0]) == 0) {
+ if (atomic_read(&csdev->refcnt) == 0) {
ret = tmc_etf_enable_hw(drvdata);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
@@ -378,7 +379,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
}
if (!ret)
- atomic_inc(&csdev->refcnt[0]);
+ atomic_inc(&csdev->refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -387,7 +388,8 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
static void tmc_disable_etf_link(struct coresight_device *csdev,
- int inport, int outport)
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -399,7 +401,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
+ if (atomic_dec_return(&csdev->refcnt) == 0) {
tmc_etf_disable_hw(drvdata);
drvdata->mode = CS_MODE_DISABLED;
last_disable = true;
@@ -487,7 +489,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(csdev->refcnt) != 1)
+ if (atomic_read(&csdev->refcnt) != 1)
goto out;
CS_UNLOCK(drvdata->base);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index eaa296ced167..766325de0e29 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -775,40 +775,19 @@ static const struct etr_buf_operations etr_sg_buf_ops = {
struct coresight_device *
tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
{
- int i;
- struct coresight_device *tmp, *etr = drvdata->csdev;
+ struct coresight_device *etr = drvdata->csdev;
+ union coresight_dev_subtype catu_subtype = {
+ .helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU
+ };
if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
return NULL;
- for (i = 0; i < etr->pdata->nr_outport; i++) {
- tmp = etr->pdata->conns[i].child_dev;
- if (tmp && coresight_is_catu_device(tmp))
- return tmp;
- }
-
- return NULL;
+ return coresight_find_output_type(etr->pdata, CORESIGHT_DEV_TYPE_HELPER,
+ catu_subtype);
}
EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device);
-static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
- struct etr_buf *etr_buf)
-{
- struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
-
- if (catu && helper_ops(catu)->enable)
- return helper_ops(catu)->enable(catu, etr_buf);
- return 0;
-}
-
-static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
-{
- struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
-
- if (catu && helper_ops(catu)->disable)
- helper_ops(catu)->disable(catu, drvdata->etr_buf);
-}
-
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
@@ -1058,13 +1037,6 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
if (WARN_ON(drvdata->etr_buf))
return -EBUSY;
- /*
- * If this ETR is connected to a CATU, enable it before we turn
- * this on.
- */
- rc = tmc_etr_enable_catu(drvdata, etr_buf);
- if (rc)
- return rc;
rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
@@ -1072,7 +1044,6 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
if (rc) {
drvdata->etr_buf = NULL;
coresight_disclaim_device(drvdata->csdev);
- tmc_etr_disable_catu(drvdata);
}
}
@@ -1162,14 +1133,12 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etr_disable_hw(drvdata);
- /* Disable CATU device if this ETR is connected to one */
- tmc_etr_disable_catu(drvdata);
coresight_disclaim_device(drvdata->csdev);
/* Reset the ETR buf used by hardware */
drvdata->etr_buf = NULL;
}
-static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
+static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
{
int ret = 0;
unsigned long flags;
@@ -1192,7 +1161,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf))
- return PTR_ERR(new_buf);
+ return new_buf;
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1209,7 +1178,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* touched, even if the buffer size has changed.
*/
if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
goto out;
}
@@ -1223,17 +1192,33 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
drvdata->sysfs_buf = new_buf;
}
- ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
- if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(csdev->refcnt);
- }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
+ return ret ? ERR_PTR(ret) : drvdata->sysfs_buf;
+}
+
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
+{
+ int ret;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
+
+ if (IS_ERR(sysfs_buf))
+ return PTR_ERR(sysfs_buf);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
+ if (!ret) {
+ drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(&csdev->refcnt);
+ }
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
@@ -1241,6 +1226,26 @@ out:
return ret;
}
+struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
+ enum cs_mode mode, void *data)
+{
+ struct perf_output_handle *handle = data;
+ struct etr_perf_buffer *etr_perf;
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ return tmc_etr_get_sysfs_buffer(csdev);
+ case CS_MODE_PERF:
+ etr_perf = etm_perf_sink_config(handle);
+ if (WARN_ON(!etr_perf || !etr_perf->etr_buf))
+ return ERR_PTR(-EINVAL);
+ return etr_perf->etr_buf;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+EXPORT_SYMBOL_GPL(tmc_etr_get_buffer);
+
/*
* alloc_etr_buf: Allocate ETR buffer for use by perf.
* The size of the hardware buffer is dependent on the size configured
@@ -1535,7 +1540,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(csdev->refcnt) != 1) {
+ if (atomic_read(&csdev->refcnt) != 1) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
@@ -1647,7 +1652,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
goto unlock_out;
}
@@ -1657,7 +1662,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
drvdata->perf_buf = etr_perf->etr_buf;
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
}
unlock_out:
@@ -1666,17 +1671,16 @@ unlock_out:
}
static int tmc_enable_etr_sink(struct coresight_device *csdev,
- u32 mode, void *data)
+ enum cs_mode mode, void *data)
{
switch (mode) {
case CS_MODE_SYSFS:
return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
return tmc_enable_etr_sink_perf(csdev, data);
+ default:
+ return -EINVAL;
}
-
- /* We shouldn't be here */
- return -EINVAL;
}
static int tmc_disable_etr_sink(struct coresight_device *csdev)
@@ -1691,7 +1695,7 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(csdev->refcnt)) {
+ if (atomic_dec_return(&csdev->refcnt)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 01c0382a29c0..b97da39652d2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -332,5 +332,7 @@ struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
void tmc_etr_remove_catu_ops(void);
+struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
+ enum cs_mode mode, void *data);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index f712e112ecff..8d2b9d29237d 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -54,18 +54,20 @@ static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
CS_LOCK(drvdata->base);
}
-static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
+static int tpda_enable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_read(&csdev->refcnt[inport]) == 0)
- __tpda_enable(drvdata, inport);
+ if (atomic_read(&in->dest_refcnt) == 0)
+ __tpda_enable(drvdata, in->dest_port);
- atomic_inc(&csdev->refcnt[inport]);
+ atomic_inc(&in->dest_refcnt);
spin_unlock(&drvdata->spinlock);
- dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", inport);
+ dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
return 0;
}
@@ -82,18 +84,19 @@ static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
CS_LOCK(drvdata->base);
}
-static void tpda_disable(struct coresight_device *csdev, int inport,
- int outport)
+static void tpda_disable(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&csdev->refcnt[inport]) == 0)
- __tpda_disable(drvdata, inport);
+ if (atomic_dec_return(&in->dest_refcnt) == 0)
+ __tpda_disable(drvdata, in->dest_port);
spin_unlock(&drvdata->spinlock);
- dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", inport);
+ dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
}
static const struct coresight_ops_link tpda_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 9479a5e8c672..f4854af0431e 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -42,8 +42,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tpdm_enable(struct coresight_device *csdev,
- struct perf_event *event, u32 mode)
+static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 34d37abd2c8d..59eac93fd6bb 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -69,10 +69,11 @@ static void tpiu_enable_hw(struct csdev_access *csa)
CS_LOCK(csa->base);
}
-static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
+static int tpiu_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *__unused)
{
tpiu_enable_hw(&csdev->access);
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
@@ -95,7 +96,7 @@ static void tpiu_disable_hw(struct csdev_access *csa)
static int tpiu_disable(struct coresight_device *csdev)
{
- if (atomic_dec_return(csdev->refcnt))
+ if (atomic_dec_return(&csdev->refcnt))
return -EBUSY;
tpiu_disable_hw(&csdev->access);
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 1bab91ce8e95..7720619909d6 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -1006,7 +1006,8 @@ err:
return ret;
}
-static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
+static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index b317342c7ce5..e9a32a97fbee 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -106,7 +106,7 @@ static int smb_open(struct inode *inode, struct file *file)
goto out;
}
- if (atomic_read(drvdata->csdev->refcnt)) {
+ if (atomic_read(&drvdata->csdev->refcnt)) {
ret = -EBUSY;
goto out;
}
@@ -256,7 +256,8 @@ static int smb_enable_perf(struct coresight_device *csdev, void *data)
return 0;
}
-static int smb_enable(struct coresight_device *csdev, u32 mode, void *data)
+static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
+ void *data)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
@@ -289,7 +290,7 @@ static int smb_enable(struct coresight_device *csdev, u32 mode, void *data)
if (ret)
goto out;
- atomic_inc(csdev->refcnt);
+ atomic_inc(&csdev->refcnt);
dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
out:
@@ -310,7 +311,7 @@ static int smb_disable(struct coresight_device *csdev)
goto out;
}
- if (atomic_dec_return(csdev->refcnt)) {
+ if (atomic_dec_return(&csdev->refcnt)) {
ret = -EBUSY;
goto out;
}
@@ -410,7 +411,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
mutex_lock(&drvdata->mutex);
/* Don't do anything if another tracer is using this sink. */
- if (atomic_read(csdev->refcnt) != 1)
+ if (atomic_read(&csdev->refcnt) != 1)
goto out;
smb_disable_hw(drvdata);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index 7dfbe42e37a0..d2e14e8d2c8a 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -119,7 +119,7 @@ struct smb_drv_data {
struct mutex mutex;
bool reading;
pid_t pid;
- u32 mode;
+ enum cs_mode mode;
};
#endif
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index 30f1525639b5..ba081b6d2435 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -341,19 +341,319 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
if (ret < 0)
return ret;
- ret = devm_request_threaded_irq(&pdev->dev,
- pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ),
+ hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
+ ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
NULL, hisi_ptt_isr, 0,
DRV_NAME, hisi_ptt);
if (ret) {
pci_err(pdev, "failed to request irq %d, ret = %d\n",
- pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), ret);
+ hisi_ptt->trace_irq, ret);
return ret;
}
return 0;
}
+static void hisi_ptt_del_free_filter(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ if (filter->is_port)
+ hisi_ptt->port_mask &= ~hisi_ptt_get_filter_val(filter->devid, true);
+
+ list_del(&filter->list);
+ kfree(filter->name);
+ kfree(filter);
+}
+
+static struct hisi_ptt_filter_desc *
+hisi_ptt_alloc_add_filter(struct hisi_ptt *hisi_ptt, u16 devid, bool is_port)
+{
+ struct hisi_ptt_filter_desc *filter;
+ u8 devfn = devid & 0xff;
+ char *filter_name;
+
+ filter_name = kasprintf(GFP_KERNEL, "%04x:%02x:%02x.%d", pci_domain_nr(hisi_ptt->pdev->bus),
+ PCI_BUS_NUM(devid), PCI_SLOT(devfn), PCI_FUNC(devfn));
+ if (!filter_name) {
+ pci_err(hisi_ptt->pdev, "failed to allocate name for filter %04x:%02x:%02x.%d\n",
+ pci_domain_nr(hisi_ptt->pdev->bus), PCI_BUS_NUM(devid),
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return NULL;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter) {
+ pci_err(hisi_ptt->pdev, "failed to add filter for %s\n",
+ filter_name);
+ kfree(filter_name);
+ return NULL;
+ }
+
+ filter->name = filter_name;
+ filter->is_port = is_port;
+ filter->devid = devid;
+
+ if (filter->is_port) {
+ list_add_tail(&filter->list, &hisi_ptt->port_filters);
+
+ /* Update the available port mask */
+ hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true);
+ } else {
+ list_add_tail(&filter->list, &hisi_ptt->req_filters);
+ }
+
+ return filter;
+}
+
+static ssize_t hisi_ptt_filter_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hisi_ptt_filter_desc *filter;
+ unsigned long filter_val;
+
+ filter = container_of(attr, struct hisi_ptt_filter_desc, attr);
+ filter_val = hisi_ptt_get_filter_val(filter->devid, filter->is_port) |
+ (filter->is_port ? HISI_PTT_PMU_FILTER_IS_PORT : 0);
+
+ return sysfs_emit(buf, "0x%05lx\n", filter_val);
+}
+
+static int hisi_ptt_create_rp_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
+
+ sysfs_attr_init(&filter->attr.attr);
+ filter->attr.attr.name = filter->name;
+ filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
+ filter->attr.show = hisi_ptt_filter_show;
+
+ return sysfs_add_file_to_group(kobj, &filter->attr.attr,
+ HISI_PTT_RP_FILTERS_GRP_NAME);
+}
+
+static void hisi_ptt_remove_rp_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
+
+ sysfs_remove_file_from_group(kobj, &filter->attr.attr,
+ HISI_PTT_RP_FILTERS_GRP_NAME);
+}
+
+static int hisi_ptt_create_req_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
+
+ sysfs_attr_init(&filter->attr.attr);
+ filter->attr.attr.name = filter->name;
+ filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
+ filter->attr.show = hisi_ptt_filter_show;
+
+ return sysfs_add_file_to_group(kobj, &filter->attr.attr,
+ HISI_PTT_REQ_FILTERS_GRP_NAME);
+}
+
+static void hisi_ptt_remove_req_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
+
+ sysfs_remove_file_from_group(kobj, &filter->attr.attr,
+ HISI_PTT_REQ_FILTERS_GRP_NAME);
+}
+
+static int hisi_ptt_create_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ int ret;
+
+ if (filter->is_port)
+ ret = hisi_ptt_create_rp_filter_attr(hisi_ptt, filter);
+ else
+ ret = hisi_ptt_create_req_filter_attr(hisi_ptt, filter);
+
+ if (ret)
+ pci_err(hisi_ptt->pdev, "failed to create sysfs attribute for filter %s\n",
+ filter->name);
+
+ return ret;
+}
+
+static void hisi_ptt_remove_filter_attr(struct hisi_ptt *hisi_ptt,
+ struct hisi_ptt_filter_desc *filter)
+{
+ if (filter->is_port)
+ hisi_ptt_remove_rp_filter_attr(hisi_ptt, filter);
+ else
+ hisi_ptt_remove_req_filter_attr(hisi_ptt, filter);
+}
+
+static void hisi_ptt_remove_all_filter_attributes(void *data)
+{
+ struct hisi_ptt_filter_desc *filter;
+ struct hisi_ptt *hisi_ptt = data;
+
+ mutex_lock(&hisi_ptt->filter_lock);
+
+ list_for_each_entry(filter, &hisi_ptt->req_filters, list)
+ hisi_ptt_remove_filter_attr(hisi_ptt, filter);
+
+ list_for_each_entry(filter, &hisi_ptt->port_filters, list)
+ hisi_ptt_remove_filter_attr(hisi_ptt, filter);
+
+ hisi_ptt->sysfs_inited = false;
+ mutex_unlock(&hisi_ptt->filter_lock);
+}
+
+static int hisi_ptt_init_filter_attributes(struct hisi_ptt *hisi_ptt)
+{
+ struct hisi_ptt_filter_desc *filter;
+ int ret;
+
+ mutex_lock(&hisi_ptt->filter_lock);
+
+ /*
+ * Register the reset callback in the first stage. In reset we traverse
+ * the filters list to remove the sysfs attributes so the callback can
+ * be called safely even without below filter attributes creation.
+ */
+ ret = devm_add_action(&hisi_ptt->pdev->dev,
+ hisi_ptt_remove_all_filter_attributes,
+ hisi_ptt);
+ if (ret)
+ goto out;
+
+ list_for_each_entry(filter, &hisi_ptt->port_filters, list) {
+ ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
+ if (ret)
+ goto out;
+ }
+
+ list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
+ ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
+ if (ret)
+ goto out;
+ }
+
+ hisi_ptt->sysfs_inited = true;
+out:
+ mutex_unlock(&hisi_ptt->filter_lock);
+ return ret;
+}
+
+static void hisi_ptt_update_filters(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct hisi_ptt_filter_update_info info;
+ struct hisi_ptt_filter_desc *filter;
+ struct hisi_ptt *hisi_ptt;
+
+ hisi_ptt = container_of(delayed_work, struct hisi_ptt, work);
+
+ if (!mutex_trylock(&hisi_ptt->filter_lock)) {
+ schedule_delayed_work(&hisi_ptt->work, HISI_PTT_WORK_DELAY_MS);
+ return;
+ }
+
+ while (kfifo_get(&hisi_ptt->filter_update_kfifo, &info)) {
+ if (info.is_add) {
+ /*
+ * Notify the users if failed to add this filter, others
+ * still work and available. See the comments in
+ * hisi_ptt_init_filters().
+ */
+ filter = hisi_ptt_alloc_add_filter(hisi_ptt, info.devid, info.is_port);
+ if (!filter)
+ continue;
+
+ /*
+ * If filters' sysfs entries hasn't been initialized,
+ * then we're still at probe stage. Add the filters to
+ * the list and later hisi_ptt_init_filter_attributes()
+ * will create sysfs attributes for all the filters.
+ */
+ if (hisi_ptt->sysfs_inited &&
+ hisi_ptt_create_filter_attr(hisi_ptt, filter)) {
+ hisi_ptt_del_free_filter(hisi_ptt, filter);
+ continue;
+ }
+ } else {
+ struct hisi_ptt_filter_desc *tmp;
+ struct list_head *target_list;
+
+ target_list = info.is_port ? &hisi_ptt->port_filters :
+ &hisi_ptt->req_filters;
+
+ list_for_each_entry_safe(filter, tmp, target_list, list)
+ if (filter->devid == info.devid) {
+ if (hisi_ptt->sysfs_inited)
+ hisi_ptt_remove_filter_attr(hisi_ptt, filter);
+
+ hisi_ptt_del_free_filter(hisi_ptt, filter);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&hisi_ptt->filter_lock);
+}
+
+/*
+ * A PCI bus notifier is used here for dynamically updating the filter
+ * list.
+ */
+static int hisi_ptt_notifier_call(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct hisi_ptt *hisi_ptt = container_of(nb, struct hisi_ptt, hisi_ptt_nb);
+ struct hisi_ptt_filter_update_info info;
+ struct pci_dev *pdev, *root_port;
+ struct device *dev = data;
+ u32 port_devid;
+
+ pdev = to_pci_dev(dev);
+ root_port = pcie_find_root_port(pdev);
+ if (!root_port)
+ return 0;
+
+ port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
+ if (port_devid < hisi_ptt->lower_bdf ||
+ port_devid > hisi_ptt->upper_bdf)
+ return 0;
+
+ info.is_port = pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT;
+ info.devid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ info.is_add = true;
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ info.is_add = false;
+ break;
+ default:
+ return 0;
+ }
+
+ /*
+ * The FIFO size is 16 which is sufficient for almost all the cases,
+ * since each PCIe core will have most 8 Root Ports (typically only
+ * 1~4 Root Ports). On failure log the failed filter and let user
+ * handle it.
+ */
+ if (kfifo_in_spinlocked(&hisi_ptt->filter_update_kfifo, &info, 1,
+ &hisi_ptt->filter_update_lock))
+ schedule_delayed_work(&hisi_ptt->work, 0);
+ else
+ pci_warn(hisi_ptt->pdev,
+ "filter update fifo overflow for target %s\n",
+ pci_name(pdev));
+
+ return 0;
+}
+
static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
{
struct pci_dev *root_port = pcie_find_root_port(pdev);
@@ -374,23 +674,10 @@ static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
* should be partial initialized and users would know which filter fails
* through the log. Other functions of PTT device are still available.
*/
- filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!filter) {
- pci_err(hisi_ptt->pdev, "failed to add filter %s\n", pci_name(pdev));
+ filter = hisi_ptt_alloc_add_filter(hisi_ptt, PCI_DEVID(pdev->bus->number, pdev->devfn),
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT);
+ if (!filter)
return -ENOMEM;
- }
-
- filter->devid = PCI_DEVID(pdev->bus->number, pdev->devfn);
-
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
- filter->is_port = true;
- list_add_tail(&filter->list, &hisi_ptt->port_filters);
-
- /* Update the available port mask */
- hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true);
- } else {
- list_add_tail(&filter->list, &hisi_ptt->req_filters);
- }
return 0;
}
@@ -400,15 +687,11 @@ static void hisi_ptt_release_filters(void *data)
struct hisi_ptt_filter_desc *filter, *tmp;
struct hisi_ptt *hisi_ptt = data;
- list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list) {
- list_del(&filter->list);
- kfree(filter);
- }
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list)
+ hisi_ptt_del_free_filter(hisi_ptt, filter);
- list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list) {
- list_del(&filter->list);
- kfree(filter);
- }
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list)
+ hisi_ptt_del_free_filter(hisi_ptt, filter);
}
static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt)
@@ -451,8 +734,13 @@ static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt)
int ret;
u32 reg;
+ INIT_DELAYED_WORK(&hisi_ptt->work, hisi_ptt_update_filters);
+ INIT_KFIFO(hisi_ptt->filter_update_kfifo);
+ spin_lock_init(&hisi_ptt->filter_update_lock);
+
INIT_LIST_HEAD(&hisi_ptt->port_filters);
INIT_LIST_HEAD(&hisi_ptt->req_filters);
+ mutex_init(&hisi_ptt->filter_lock);
ret = hisi_ptt_config_trace_buf(hisi_ptt);
if (ret)
@@ -528,10 +816,58 @@ static struct attribute_group hisi_ptt_pmu_format_group = {
.attrs = hisi_ptt_pmu_format_attrs,
};
+static ssize_t hisi_ptt_filter_multiselect_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr;
+
+ ext_attr = container_of(attr, struct dev_ext_attribute, attr);
+ return sysfs_emit(buf, "%s\n", (char *)ext_attr->var);
+}
+
+static struct dev_ext_attribute root_port_filters_multiselect = {
+ .attr = {
+ .attr = { .name = "multiselect", .mode = 0400 },
+ .show = hisi_ptt_filter_multiselect_show,
+ },
+ .var = "1",
+};
+
+static struct attribute *hisi_ptt_pmu_root_ports_attrs[] = {
+ &root_port_filters_multiselect.attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_ptt_pmu_root_ports_group = {
+ .name = HISI_PTT_RP_FILTERS_GRP_NAME,
+ .attrs = hisi_ptt_pmu_root_ports_attrs,
+};
+
+static struct dev_ext_attribute requester_filters_multiselect = {
+ .attr = {
+ .attr = { .name = "multiselect", .mode = 0400 },
+ .show = hisi_ptt_filter_multiselect_show,
+ },
+ .var = "0",
+};
+
+static struct attribute *hisi_ptt_pmu_requesters_attrs[] = {
+ &requester_filters_multiselect.attr.attr,
+ NULL
+};
+
+static struct attribute_group hisi_ptt_pmu_requesters_group = {
+ .name = HISI_PTT_REQ_FILTERS_GRP_NAME,
+ .attrs = hisi_ptt_pmu_requesters_attrs,
+};
+
static const struct attribute_group *hisi_ptt_pmu_groups[] = {
&hisi_ptt_cpumask_attr_group,
&hisi_ptt_pmu_format_group,
&hisi_ptt_tune_group,
+ &hisi_ptt_pmu_root_ports_group,
+ &hisi_ptt_pmu_requesters_group,
NULL
};
@@ -605,6 +941,7 @@ static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
{
unsigned long val, port_mask = hisi_ptt->port_mask;
struct hisi_ptt_filter_desc *filter;
+ int ret = 0;
hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config);
val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config);
@@ -618,16 +955,20 @@ static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
* For Requester ID filters, walk the available filter list to see
* whether we have one matched.
*/
+ mutex_lock(&hisi_ptt->filter_lock);
if (!hisi_ptt->trace_ctrl.is_port) {
list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
if (val == hisi_ptt_get_filter_val(filter->devid, filter->is_port))
- return 0;
+ goto out;
}
} else if (bitmap_subset(&val, &port_mask, BITS_PER_LONG)) {
- return 0;
+ goto out;
}
- return -EINVAL;
+ ret = -EINVAL;
+out:
+ mutex_unlock(&hisi_ptt->filter_lock);
+ return ret;
}
static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event)
@@ -757,8 +1098,7 @@ static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
* core in event_function_local(). If CPU passed is offline we'll fail
* here, just log it since we can do nothing here.
*/
- ret = irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
- cpumask_of(cpu));
+ ret = irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(cpu));
if (ret)
dev_warn(dev, "failed to set the affinity of trace interrupt\n");
@@ -871,7 +1211,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
hisi_ptt->hisi_ptt_pmu = (struct pmu) {
.module = THIS_MODULE,
- .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+ .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_sw_context,
.attr_groups = hisi_ptt_pmu_groups,
.event_init = hisi_ptt_pmu_event_init,
@@ -901,6 +1241,31 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
&hisi_ptt->hisi_ptt_pmu);
}
+static void hisi_ptt_unregister_filter_update_notifier(void *data)
+{
+ struct hisi_ptt *hisi_ptt = data;
+
+ bus_unregister_notifier(&pci_bus_type, &hisi_ptt->hisi_ptt_nb);
+
+ /* Cancel any work that has been queued */
+ cancel_delayed_work_sync(&hisi_ptt->work);
+}
+
+/* Register the bus notifier for dynamically updating the filter list */
+static int hisi_ptt_register_filter_update_notifier(struct hisi_ptt *hisi_ptt)
+{
+ int ret;
+
+ hisi_ptt->hisi_ptt_nb.notifier_call = hisi_ptt_notifier_call;
+ ret = bus_register_notifier(&pci_bus_type, &hisi_ptt->hisi_ptt_nb);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
+ hisi_ptt_unregister_filter_update_notifier,
+ hisi_ptt);
+}
+
/*
* The DMA of PTT trace can only use direct mappings due to some
* hardware restriction. Check whether there is no IOMMU or the
@@ -972,12 +1337,22 @@ static int hisi_ptt_probe(struct pci_dev *pdev,
return ret;
}
+ ret = hisi_ptt_register_filter_update_notifier(hisi_ptt);
+ if (ret)
+ pci_warn(pdev, "failed to register filter update notifier, ret = %d", ret);
+
ret = hisi_ptt_register_pmu(hisi_ptt);
if (ret) {
pci_err(pdev, "failed to register PMU device, ret = %d", ret);
return ret;
}
+ ret = hisi_ptt_init_filter_attributes(hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to init sysfs filter attributes, ret = %d", ret);
+ return ret;
+ }
+
return 0;
}
@@ -1018,8 +1393,7 @@ static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
* Also make sure the interrupt bind to the migrated CPU as well. Warn
* the user on failure here.
*/
- if (irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
- cpumask_of(target)))
+ if (irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(target)))
dev_warn(dev, "failed to set the affinity of trace interrupt\n");
hisi_ptt->trace_ctrl.on_cpu = target;
diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h
index 5beb1648c93a..e17f045d7e72 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.h
+++ b/drivers/hwtracing/ptt/hisi_ptt.h
@@ -11,12 +11,16 @@
#include <linux/bits.h>
#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#define DRV_NAME "hisi_ptt"
@@ -71,6 +75,11 @@
#define HISI_PTT_WAIT_TRACE_TIMEOUT_US 100UL
#define HISI_PTT_WAIT_POLL_INTERVAL_US 10UL
+/* FIFO size for dynamically updating the PTT trace filter list. */
+#define HISI_PTT_FILTER_UPDATE_FIFO_SIZE 16
+/* Delay time for filter updating work */
+#define HISI_PTT_WORK_DELAY_MS 100UL
+
#define HISI_PCIE_CORE_PORT_ID(devfn) ((PCI_SLOT(devfn) & 0x7) << 1)
/* Definition of the PMU configs */
@@ -131,15 +140,40 @@ struct hisi_ptt_trace_ctrl {
u32 type:4;
};
+/*
+ * sysfs attribute group name for root port filters and requester filters:
+ * /sys/devices/hisi_ptt<sicl_id>_<core_id>/root_port_filters
+ * and
+ * /sys/devices/hisi_ptt<sicl_id>_<core_id>/requester_filters
+ */
+#define HISI_PTT_RP_FILTERS_GRP_NAME "root_port_filters"
+#define HISI_PTT_REQ_FILTERS_GRP_NAME "requester_filters"
+
/**
* struct hisi_ptt_filter_desc - Descriptor of the PTT trace filter
+ * @attr: sysfs attribute of this filter
* @list: entry of this descriptor in the filter list
* @is_port: the PCI device of the filter is a Root Port or not
+ * @name: name of this filter, same as the name of the related PCI device
* @devid: the PCI device's devid of the filter
*/
struct hisi_ptt_filter_desc {
+ struct device_attribute attr;
struct list_head list;
bool is_port;
+ char *name;
+ u16 devid;
+};
+
+/**
+ * struct hisi_ptt_filter_update_info - Information for PTT filter updating
+ * @is_port: the PCI device to update is a Root Port or not
+ * @is_add: adding to the filter or not
+ * @devid: the PCI device's devid of the filter
+ */
+struct hisi_ptt_filter_update_info {
+ bool is_port;
+ bool is_add;
u16 devid;
};
@@ -160,26 +194,35 @@ struct hisi_ptt_pmu_buf {
/**
* struct hisi_ptt - Per PTT device data
* @trace_ctrl: the control information of PTT trace
+ * @hisi_ptt_nb: dynamic filter update notifier
* @hotplug_node: node for register cpu hotplug event
* @hisi_ptt_pmu: the pum device of trace
* @iobase: base IO address of the device
* @pdev: pci_dev of this PTT device
* @tune_lock: lock to serialize the tune process
* @pmu_lock: lock to serialize the perf process
+ * @trace_irq: interrupt number used by trace
* @upper_bdf: the upper BDF range of the PCI devices managed by this PTT device
* @lower_bdf: the lower BDF range of the PCI devices managed by this PTT device
* @port_filters: the filter list of root ports
* @req_filters: the filter list of requester ID
+ * @filter_lock: lock to protect the filters
+ * @sysfs_inited: whether the filters' sysfs entries has been initialized
* @port_mask: port mask of the managed root ports
+ * @work: delayed work for filter updating
+ * @filter_update_lock: spinlock to protect the filter update fifo
+ * @filter_update_fifo: fifo of the filters waiting to update the filter list
*/
struct hisi_ptt {
struct hisi_ptt_trace_ctrl trace_ctrl;
+ struct notifier_block hisi_ptt_nb;
struct hlist_node hotplug_node;
struct pmu hisi_ptt_pmu;
void __iomem *iobase;
struct pci_dev *pdev;
struct mutex tune_lock;
spinlock_t pmu_lock;
+ int trace_irq;
u32 upper_bdf;
u32 lower_bdf;
@@ -192,7 +235,20 @@ struct hisi_ptt {
*/
struct list_head port_filters;
struct list_head req_filters;
+ struct mutex filter_lock;
+ bool sysfs_inited;
u16 port_mask;
+
+ /*
+ * We use a delayed work here to avoid indefinitely waiting for
+ * the hisi_ptt->mutex which protecting the filter list. The
+ * work will be delayed only if the mutex can not be held,
+ * otherwise no delay will be applied.
+ */
+ struct delayed_work work;
+ spinlock_t filter_update_lock;
+ DECLARE_KFIFO(filter_update_kfifo, struct hisi_ptt_filter_update_info,
+ HISI_PTT_FILTER_UPDATE_FIFO_SIZE);
};
#define to_hisi_ptt(pmu) container_of(pmu, struct hisi_ptt, hisi_ptt_pmu)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 87600b4aacb3..9cfe8fc509d7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -157,7 +157,7 @@ config I2C_I801
Emmitsburg (PCH)
Alder Lake (PCH)
Raptor Lake (PCH)
- Meteor Lake (SOC)
+ Meteor Lake (SOC and PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -1025,7 +1025,7 @@ config I2C_RZV2M
depends on ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
- Renesas RZ/V2M I2C interface.
+ Renesas RZ/V2M I2C interface.
This driver can also be built as a module. If so, the module
will be called i2c-rzv2m.
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 50e7f3f670b6..252fbd175fb1 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -465,14 +465,12 @@ static int altr_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int altr_i2c_remove(struct platform_device *pdev)
+static void altr_i2c_remove(struct platform_device *pdev)
{
struct altr_i2c_dev *idev = platform_get_drvdata(pdev);
clk_disable_unprepare(idev->i2c_clk);
i2c_del_adapter(&idev->adapter);
-
- return 0;
}
/* Match table for of_platform binding */
@@ -484,7 +482,7 @@ MODULE_DEVICE_TABLE(of, altr_i2c_of_match);
static struct platform_driver altr_i2c_driver = {
.probe = altr_i2c_probe,
- .remove = altr_i2c_remove,
+ .remove_new = altr_i2c_remove,
.driver = {
.name = "altera-i2c",
.of_match_table = altr_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 423fe0c8a471..112fe2bc5662 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -322,7 +322,7 @@ static int i2c_amd_probe(struct platform_device *pdev)
return ret;
}
-static int i2c_amd_remove(struct platform_device *pdev)
+static void i2c_amd_remove(struct platform_device *pdev)
{
struct amd_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
struct amd_i2c_common *i2c_common = &i2c_dev->common;
@@ -336,7 +336,6 @@ static int i2c_amd_remove(struct platform_device *pdev)
i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
i2c_del_adapter(&i2c_dev->adap);
- return 0;
}
static const struct acpi_device_id i2c_amd_acpi_match[] = {
@@ -347,7 +346,7 @@ MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
static struct platform_driver i2c_amd_plat_driver = {
.probe = i2c_amd_probe,
- .remove = i2c_amd_remove,
+ .remove_new = i2c_amd_remove,
.driver = {
.name = "i2c_amd_mp2",
.acpi_match_table = ACPI_PTR(i2c_amd_acpi_match),
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index d3c99c5b3247..2e5acfeb76c8 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1061,7 +1061,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
return 0;
}
-static int aspeed_i2c_remove_bus(struct platform_device *pdev)
+static void aspeed_i2c_remove_bus(struct platform_device *pdev)
{
struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
unsigned long flags;
@@ -1077,13 +1077,11 @@ static int aspeed_i2c_remove_bus(struct platform_device *pdev)
reset_control_assert(bus->rst);
i2c_del_adapter(&bus->adap);
-
- return 0;
}
static struct platform_driver aspeed_i2c_bus_driver = {
.probe = aspeed_i2c_probe_bus,
- .remove = aspeed_i2c_remove_bus,
+ .remove_new = aspeed_i2c_remove_bus,
.driver = {
.name = "aspeed-i2c-bus",
.of_match_table = aspeed_i2c_bus_of_table,
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 2df9df585131..05ad3bc3578a 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -273,7 +273,7 @@ static int at91_twi_probe(struct platform_device *pdev)
return 0;
}
-static int at91_twi_remove(struct platform_device *pdev)
+static void at91_twi_remove(struct platform_device *pdev)
{
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
@@ -282,8 +282,6 @@ static int at91_twi_remove(struct platform_device *pdev)
pm_runtime_disable(dev->dev);
pm_runtime_set_suspended(dev->dev);
-
- return 0;
}
static int __maybe_unused at91_twi_runtime_suspend(struct device *dev)
@@ -342,7 +340,7 @@ static const struct dev_pm_ops __maybe_unused at91_twi_pm = {
static struct platform_driver at91_twi_driver = {
.probe = at91_twi_probe,
- .remove = at91_twi_remove,
+ .remove_new = at91_twi_remove,
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 7b42d35b1294..e66c12ecf270 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -334,13 +334,12 @@ i2c_au1550_probe(struct platform_device *pdev)
return 0;
}
-static int i2c_au1550_remove(struct platform_device *pdev)
+static void i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
i2c_au1550_disable(priv);
- return 0;
}
#ifdef CONFIG_PM
@@ -379,7 +378,7 @@ static struct platform_driver au1xpsc_smbus_driver = {
.pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
- .remove = i2c_au1550_remove,
+ .remove_new = i2c_au1550_remove,
};
module_platform_driver(au1xpsc_smbus_driver);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index c1c74ce08407..d7f1e98777ac 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -804,14 +804,12 @@ error_disable_clk:
return ret;
}
-static int axxia_i2c_remove(struct platform_device *pdev)
+static void axxia_i2c_remove(struct platform_device *pdev)
{
struct axxia_i2c_dev *idev = platform_get_drvdata(pdev);
clk_disable_unprepare(idev->i2c_clk);
i2c_del_adapter(&idev->adapter);
-
- return 0;
}
/* Match table for of_platform binding */
@@ -824,7 +822,7 @@ MODULE_DEVICE_TABLE(of, axxia_i2c_of_match);
static struct platform_driver axxia_i2c_driver = {
.probe = axxia_i2c_probe,
- .remove = axxia_i2c_remove,
+ .remove_new = axxia_i2c_remove,
.driver = {
.name = "axxia-i2c",
.of_match_table = axxia_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 85d8a6b04885..2d8342fdc25d 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1107,7 +1107,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
return i2c_add_adapter(adap);
}
-static int bcm_iproc_i2c_remove(struct platform_device *pdev)
+static void bcm_iproc_i2c_remove(struct platform_device *pdev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
@@ -1123,8 +1123,6 @@ static int bcm_iproc_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&iproc_i2c->adapter);
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1260,7 +1258,7 @@ static struct platform_driver bcm_iproc_i2c_driver = {
.pm = BCM_IPROC_I2C_PM_OPS,
},
.probe = bcm_iproc_i2c_probe,
- .remove = bcm_iproc_i2c_remove,
+ .remove_new = bcm_iproc_i2c_remove,
};
module_platform_driver(bcm_iproc_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index f3e369f0fd40..a57088ec2b06 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -859,13 +859,11 @@ probe_disable_clk:
return rc;
}
-static int bcm_kona_i2c_remove(struct platform_device *pdev)
+static void bcm_kona_i2c_remove(struct platform_device *pdev)
{
struct bcm_kona_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adapter);
-
- return 0;
}
static const struct of_device_id bcm_kona_i2c_of_match[] = {
@@ -880,7 +878,7 @@ static struct platform_driver bcm_kona_i2c_driver = {
.of_match_table = bcm_kona_i2c_of_match,
},
.probe = bcm_kona_i2c_probe,
- .remove = bcm_kona_i2c_remove,
+ .remove_new = bcm_kona_i2c_remove,
};
module_platform_driver(bcm_kona_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 09a077b31bfe..8ce6d3f49551 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -503,7 +503,7 @@ err_put_exclusive_rate:
return ret;
}
-static int bcm2835_i2c_remove(struct platform_device *pdev)
+static void bcm2835_i2c_remove(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
@@ -512,8 +512,6 @@ static int bcm2835_i2c_remove(struct platform_device *pdev)
free_irq(i2c_dev->irq, i2c_dev);
i2c_del_adapter(&i2c_dev->adapter);
-
- return 0;
}
static const struct of_device_id bcm2835_i2c_of_match[] = {
@@ -525,7 +523,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match);
static struct platform_driver bcm2835_i2c_driver = {
.probe = bcm2835_i2c_probe,
- .remove = bcm2835_i2c_remove,
+ .remove_new = bcm2835_i2c_remove,
.driver = {
.name = "i2c-bcm2835",
.of_match_table = bcm2835_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index ef942714642a..cf92cbcb8c86 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -690,12 +690,11 @@ probe_errorout:
return rc;
}
-static int brcmstb_i2c_remove(struct platform_device *pdev)
+static void brcmstb_i2c_remove(struct platform_device *pdev)
{
struct brcmstb_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adapter);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -736,7 +735,7 @@ static struct platform_driver brcmstb_i2c_driver = {
.pm = &brcmstb_i2c_pm,
},
.probe = brcmstb_i2c_probe,
- .remove = brcmstb_i2c_remove,
+ .remove_new = brcmstb_i2c_remove,
};
module_platform_driver(brcmstb_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 3a4edf7e75f9..9849f4502570 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -1415,7 +1415,7 @@ err_clk_dis:
*
* Return: 0 always
*/
-static int cdns_i2c_remove(struct platform_device *pdev)
+static void cdns_i2c_remove(struct platform_device *pdev)
{
struct cdns_i2c *id = platform_get_drvdata(pdev);
@@ -1427,8 +1427,6 @@ static int cdns_i2c_remove(struct platform_device *pdev)
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
reset_control_assert(id->reset);
clk_disable_unprepare(id->clk);
-
- return 0;
}
static struct platform_driver cdns_i2c_drv = {
@@ -1438,7 +1436,7 @@ static struct platform_driver cdns_i2c_drv = {
.pm = &cdns_i2c_dev_pm_ops,
},
.probe = cdns_i2c_probe,
- .remove = cdns_i2c_remove,
+ .remove_new = cdns_i2c_remove,
};
module_platform_driver(cdns_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index d97c61eec95c..fdc1758a3275 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -200,13 +200,11 @@ static const struct i2c_algorithm cbus_i2c_algo = {
.functionality = cbus_i2c_func,
};
-static int cbus_i2c_remove(struct platform_device *pdev)
+static void cbus_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
i2c_del_adapter(adapter);
-
- return 0;
}
static int cbus_i2c_probe(struct platform_device *pdev)
@@ -266,7 +264,7 @@ MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
static struct platform_driver cbus_i2c_driver = {
.probe = cbus_i2c_probe,
- .remove = cbus_i2c_remove,
+ .remove_new = cbus_i2c_remove,
.driver = {
.name = "i2c-cbus-gpio",
.of_match_table = of_match_ptr(i2c_cbus_dt_ids),
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 2b2c3d090089..0209933b9a84 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -529,15 +529,13 @@ remove_irq_domain:
return ret;
}
-static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
+static void cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
{
struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev);
i2c_unregister_device(adap->client);
i2c_del_adapter(&adap->adapter);
irq_domain_remove(adap->irq_domain);
-
- return 0;
}
static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
@@ -548,7 +546,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table);
static struct platform_driver cht_wc_i2c_adap_driver = {
.probe = cht_wc_i2c_adap_i2c_probe,
- .remove = cht_wc_i2c_adap_i2c_remove,
+ .remove_new = cht_wc_i2c_adap_i2c_remove,
.driver = {
.name = "cht_wcove_ext_chgr",
},
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 24d584a1c9a7..732daf6a932b 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -676,7 +676,7 @@ out_free:
return result;
}
-static int cpm_i2c_remove(struct platform_device *ofdev)
+static void cpm_i2c_remove(struct platform_device *ofdev)
{
struct cpm_i2c *cpm = platform_get_drvdata(ofdev);
@@ -685,8 +685,6 @@ static int cpm_i2c_remove(struct platform_device *ofdev)
cpm_i2c_shutdown(cpm);
kfree(cpm);
-
- return 0;
}
static const struct of_device_id cpm_i2c_match[] = {
@@ -703,7 +701,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
- .remove = cpm_i2c_remove,
+ .remove_new = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
.of_match_table = cpm_i2c_match,
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 8b3ff5bb14d8..2737fd8abd32 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -283,13 +283,11 @@ static int ec_i2c_probe(struct platform_device *pdev)
return err;
}
-static int ec_i2c_remove(struct platform_device *dev)
+static void ec_i2c_remove(struct platform_device *dev)
{
struct ec_i2c_device *bus = platform_get_drvdata(dev);
i2c_del_adapter(&bus->adap);
-
- return 0;
}
static const struct of_device_id cros_ec_i2c_of_match[] __maybe_unused = {
@@ -306,7 +304,7 @@ MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
static struct platform_driver ec_i2c_tunnel_driver = {
.probe = ec_i2c_probe,
- .remove = ec_i2c_remove,
+ .remove_new = ec_i2c_remove,
.driver = {
.name = "cros-ec-i2c-tunnel",
.acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9750310f2c96..71b60778c643 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -767,12 +767,9 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "can't get irq resource\n");
- dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_i2c_dev),
- GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Memory allocation failed\n");
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- }
init_completion(&dev->cmd_complete);
@@ -885,7 +882,7 @@ err_pm:
return r;
}
-static int davinci_i2c_remove(struct platform_device *pdev)
+static void davinci_i2c_remove(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
@@ -894,17 +891,15 @@ static int davinci_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
-
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
+ dev_err(&pdev->dev, "Failed to resume device\n");
+ else
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
pm_runtime_dont_use_autosuspend(dev->dev);
pm_runtime_put_sync(dev->dev);
pm_runtime_disable(dev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -940,12 +935,16 @@ static const struct dev_pm_ops davinci_i2c_pm = {
#define davinci_i2c_pm_ops NULL
#endif
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:i2c_davinci");
+static const struct platform_device_id davinci_i2c_driver_ids[] = {
+ { .name = "i2c_davinci", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, davinci_i2c_driver_ids);
static struct platform_driver davinci_i2c_driver = {
.probe = davinci_i2c_probe,
- .remove = davinci_i2c_remove,
+ .remove_new = davinci_i2c_remove,
+ .id_table = davinci_i2c_driver_ids,
.driver = {
.name = "i2c_davinci",
.pm = davinci_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 0dc6b1ce663f..cdd8c67d9129 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -575,6 +575,14 @@ int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev)
unsigned int param;
int ret;
+ /* DW_IC_COMP_PARAM_1 not implement for IP issue */
+ if ((dev->flags & MODEL_MASK) == MODEL_WANGXUN_SP) {
+ dev->tx_fifo_depth = TXGBE_TX_FIFO_DEPTH;
+ dev->rx_fifo_depth = TXGBE_RX_FIFO_DEPTH;
+
+ return 0;
+ }
+
/*
* Try to detect the FIFO depth if not set by interface driver,
* the depth could be from 2 to 256 from HW spec.
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index bf23bfb51aea..cf4f684f5356 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -304,6 +304,7 @@ struct dw_i2c_dev {
#define MODEL_MSCC_OCELOT BIT(8)
#define MODEL_BAIKAL_BT1 BIT(9)
#define MODEL_AMD_NAVI_GPU BIT(10)
+#define MODEL_WANGXUN_SP BIT(11)
#define MODEL_MASK GENMASK(11, 8)
/*
@@ -313,6 +314,9 @@ struct dw_i2c_dev {
#define AMD_UCSI_INTR_REG 0x474
#define AMD_UCSI_INTR_EN 0xd
+#define TXGBE_TX_FIFO_DEPTH 4
+#define TXGBE_RX_FIFO_DEPTH 0
+
struct i2c_dw_semaphore_callbacks {
int (*probe)(struct dw_i2c_dev *dev);
void (*remove)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 55ea91a63382..3bfd7a2232db 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -354,6 +354,68 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
return 0;
}
+static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
+ val & DW_IC_INTR_TX_EMPTY,
+ 100, 1000);
+}
+
+static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val,
+ val & DW_IC_INTR_RX_FULL,
+ 100, 1000);
+}
+
+static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num_msgs)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int msg_idx, buf_len, data_idx, ret;
+ unsigned int val, stop = 0;
+ u8 *buf;
+
+ dev->msgs = msgs;
+ dev->msgs_num = num_msgs;
+ i2c_dw_xfer_init(dev);
+ regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+
+ for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) {
+ buf = msgs[msg_idx].buf;
+ buf_len = msgs[msg_idx].len;
+
+ for (data_idx = 0; data_idx < buf_len; data_idx++) {
+ if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1)
+ stop |= BIT(9);
+
+ if (msgs[msg_idx].flags & I2C_M_RD) {
+ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop);
+
+ ret = i2c_dw_poll_rx_full(dev);
+ if (ret)
+ return ret;
+
+ regmap_read(dev->map, DW_IC_DATA_CMD, &val);
+ buf[data_idx] = val;
+ } else {
+ ret = i2c_dw_poll_tx_empty(dev);
+ if (ret)
+ return ret;
+
+ regmap_write(dev->map, DW_IC_DATA_CMD,
+ buf[data_idx] | stop);
+ }
+ }
+ }
+
+ return num_msgs;
+}
+
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
@@ -559,13 +621,19 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
/*
- * Initiate I2C message transfer when AMD NAVI GPU card is enabled,
+ * Initiate I2C message transfer when polling mode is enabled,
* As it is polling based transfer mechanism, which does not support
* interrupt based functionalities of existing DesignWare driver.
*/
- if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ switch (dev->flags & MODEL_MASK) {
+ case MODEL_AMD_NAVI_GPU:
ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
goto done_nolock;
+ case MODEL_WANGXUN_SP:
+ ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num);
+ goto done_nolock;
+ default:
+ break;
}
reinit_completion(&dev->cmd_complete);
@@ -848,7 +916,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return 0;
}
-static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
+static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
int ret;
@@ -862,6 +930,17 @@ static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
return ret;
}
+static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev)
+{
+ switch (dev->flags & MODEL_MASK) {
+ case MODEL_AMD_NAVI_GPU:
+ case MODEL_WANGXUN_SP:
+ return true;
+ default:
+ return false;
+ }
+}
+
int i2c_dw_probe_master(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
@@ -917,8 +996,8 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
- if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
- return amd_i2c_adap_quirk(dev);
+ if (i2c_dw_is_model_poll(dev))
+ return i2c_dw_poll_adap_quirk(dev);
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
irq_flags = IRQF_NO_SUSPEND;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 782fe1ef3ca1..61d7a27aa070 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -234,6 +235,16 @@ static const struct dev_pm_ops i2c_dw_pm_ops = {
SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL)
};
+static const struct property_entry dgpu_properties[] = {
+ /* USB-C doesn't power the system */
+ PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
+ {}
+};
+
+static const struct software_node dgpu_node = {
+ .properties = dgpu_properties,
+};
+
static int i2c_dw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -325,7 +336,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
}
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
- dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, NULL);
+ dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
if (IS_ERR(dev->slave))
return dev_err_probe(dev->dev, PTR_ERR(dev->slave),
"register UCSI failed\n");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 89ad88c54754..970c1c3b0402 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -168,6 +168,15 @@ static inline int dw_i2c_of_configure(struct platform_device *pdev)
}
#endif
+static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev)
+{
+ dev->map = dev_get_regmap(dev->dev->parent, NULL);
+ if (!dev->map)
+ return -ENODEV;
+
+ return 0;
+}
+
static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
{
pm_runtime_disable(dev->dev);
@@ -185,6 +194,9 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
case MODEL_BAIKAL_BT1:
ret = bt1_i2c_request_regs(dev);
break;
+ case MODEL_WANGXUN_SP:
+ ret = txgbe_i2c_request_regs(dev);
+ break;
default:
dev->base = devm_platform_ioremap_resource(pdev, 0);
ret = PTR_ERR_OR_ZERO(dev->base);
@@ -277,6 +289,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
return -ENOMEM;
dev->flags = (uintptr_t)device_get_match_data(&pdev->dev);
+ if (device_property_present(&pdev->dev, "wx,i2c-snps-model"))
+ dev->flags = MODEL_WANGXUN_SP;
+
dev->dev = &pdev->dev;
dev->irq = irq;
platform_set_drvdata(pdev, dev);
@@ -384,7 +399,7 @@ exit_reset:
return ret;
}
-static int dw_i2c_plat_remove(struct platform_device *pdev)
+static void dw_i2c_plat_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
@@ -401,8 +416,6 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_remove_lock_support(dev);
reset_control_assert(dev->rst);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -481,7 +494,7 @@ MODULE_ALIAS("platform:i2c_designware");
static struct platform_driver dw_i2c_driver = {
.probe = dw_i2c_plat_probe,
- .remove = dw_i2c_plat_remove,
+ .remove_new = dw_i2c_plat_remove,
.driver = {
.name = "i2c_designware",
.of_match_table = of_match_ptr(dw_i2c_of_match),
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 50925d97fa42..3462f2bc0fa8 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,14 +347,12 @@ static int dc_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int dc_i2c_remove(struct platform_device *pdev)
+static void dc_i2c_remove(struct platform_device *pdev)
{
struct dc_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
clk_disable_unprepare(i2c->clk);
-
- return 0;
}
static const struct of_device_id dc_i2c_match[] = {
@@ -365,7 +363,7 @@ MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
- .remove = dc_i2c_remove,
+ .remove_new = dc_i2c_remove,
.driver = {
.name = "digicolor-i2c",
.of_match_table = dc_i2c_match,
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 2a2089db71a5..4f02cc2fb567 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -236,20 +236,18 @@ out_disable:
return ret;
}
-static int dln2_i2c_remove(struct platform_device *pdev)
+static void dln2_i2c_remove(struct platform_device *pdev)
{
struct dln2_i2c *dln2 = platform_get_drvdata(pdev);
i2c_del_adapter(&dln2->adapter);
dln2_i2c_enable(dln2, false);
-
- return 0;
}
static struct platform_driver dln2_i2c_driver = {
.driver.name = "dln2-i2c",
.probe = dln2_i2c_probe,
- .remove = dln2_i2c_remove,
+ .remove_new = dln2_i2c_remove,
};
module_platform_driver(dln2_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index f2e537b137b2..4ba93cd91c0f 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -419,14 +419,12 @@ err_clk:
return ret;
}
-static int em_i2c_remove(struct platform_device *dev)
+static void em_i2c_remove(struct platform_device *dev)
{
struct em_i2c_device *priv = platform_get_drvdata(dev);
i2c_del_adapter(&priv->adap);
clk_disable_unprepare(priv->sclk);
-
- return 0;
}
static const struct of_device_id em_i2c_ids[] = {
@@ -436,7 +434,7 @@ static const struct of_device_id em_i2c_ids[] = {
static struct platform_driver em_i2c_driver = {
.probe = em_i2c_probe,
- .remove = em_i2c_remove,
+ .remove_new = em_i2c_remove,
.driver = {
.name = "em-i2c",
.of_match_table = em_i2c_ids,
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 4a6260d04db2..f378cd479e55 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -882,7 +882,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return ret;
}
-static int exynos5_i2c_remove(struct platform_device *pdev)
+static void exynos5_i2c_remove(struct platform_device *pdev)
{
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
@@ -890,8 +890,6 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
clk_unprepare(i2c->clk);
clk_unprepare(i2c->pclk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -945,7 +943,7 @@ static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
static struct platform_driver exynos5_i2c_driver = {
.probe = exynos5_i2c_probe,
- .remove = exynos5_i2c_remove,
+ .remove_new = exynos5_i2c_remove,
.driver = {
.name = "exynos5-hsi2c",
.pm = &exynos5_i2c_dev_pm_ops,
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 1794c0399f22..e5a5b9e8bf2c 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -475,7 +475,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int i2c_gpio_remove(struct platform_device *pdev)
+static void i2c_gpio_remove(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_adapter *adap;
@@ -486,8 +486,6 @@ static int i2c_gpio_remove(struct platform_device *pdev)
adap = &priv->adap;
i2c_del_adapter(adap);
-
- return 0;
}
static const struct of_device_id i2c_gpio_dt_ids[] = {
@@ -510,7 +508,7 @@ static struct platform_driver i2c_gpio_driver = {
.acpi_match_table = i2c_gpio_acpi_match,
},
.probe = i2c_gpio_probe,
- .remove = i2c_gpio_remove,
+ .remove_new = i2c_gpio_remove,
};
static int __init i2c_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index 8ea3fb5e4c7f..70b0de07ed99 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -577,15 +577,13 @@ static int gxp_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int gxp_i2c_remove(struct platform_device *pdev)
+static void gxp_i2c_remove(struct platform_device *pdev)
{
struct gxp_i2c_drvdata *drvdata = platform_get_drvdata(pdev);
/* Disable interrupt */
regmap_update_bits(i2cg_map, GXP_I2CINTEN, BIT(drvdata->engine), 0);
i2c_del_adapter(&drvdata->adapter);
-
- return 0;
}
static const struct of_device_id gxp_i2c_of_match[] = {
@@ -596,7 +594,7 @@ MODULE_DEVICE_TABLE(of, gxp_i2c_of_match);
static struct platform_driver gxp_i2c_driver = {
.probe = gxp_i2c_probe,
- .remove = gxp_i2c_remove,
+ .remove_new = gxp_i2c_remove,
.driver = {
.name = "gxp-i2c",
.of_match_table = gxp_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 4374a8677271..7922bc917c33 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -435,7 +435,7 @@ err:
return ret;
}
-static int highlander_i2c_remove(struct platform_device *pdev)
+static void highlander_i2c_remove(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev = platform_get_drvdata(pdev);
@@ -446,8 +446,6 @@ static int highlander_i2c_remove(struct platform_device *pdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static struct platform_driver highlander_i2c_driver = {
@@ -456,7 +454,7 @@ static struct platform_driver highlander_i2c_driver = {
},
.probe = highlander_i2c_probe,
- .remove = highlander_i2c_remove,
+ .remove_new = highlander_i2c_remove,
};
module_platform_driver(highlander_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 0e34cbaca22d..784a5f56eb76 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -360,7 +360,11 @@ static int hix5hd2_i2c_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(priv->dev);
for (i = 0; i < num; i++, msgs++) {
- stop = (i == num - 1);
+ if ((i == num - 1) || (msgs->flags & I2C_M_STOP))
+ stop = 1;
+ else
+ stop = 0;
+
ret = hix5hd2_i2c_xfer_msg(priv, msgs, stop);
if (ret < 0)
goto out;
@@ -416,12 +420,11 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
+ dev_err(&pdev->dev, "cannot enable clock\n");
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
priv->dev = &pdev->dev;
@@ -442,7 +445,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
if (ret != 0) {
dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
- goto err_clk;
+ return ret;
}
pm_runtime_set_autosuspend_delay(priv->dev, MSEC_PER_SEC);
@@ -459,21 +462,17 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
err_runtime:
pm_runtime_disable(priv->dev);
pm_runtime_set_suspended(priv->dev);
-err_clk:
- clk_disable_unprepare(priv->clk);
+
return ret;
}
-static int hix5hd2_i2c_remove(struct platform_device *pdev)
+static void hix5hd2_i2c_remove(struct platform_device *pdev)
{
struct hix5hd2_i2c_priv *priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
pm_runtime_disable(priv->dev);
pm_runtime_set_suspended(priv->dev);
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -511,7 +510,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_i2c_match);
static struct platform_driver hix5hd2_i2c_driver = {
.probe = hix5hd2_i2c_probe,
- .remove = hix5hd2_i2c_remove,
+ .remove_new = hix5hd2_i2c_remove,
.driver = {
.name = "hix5hd2-i2c",
.pm = &hix5hd2_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ac5326747c51..943b8e6d026d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -77,6 +77,8 @@
* Alder Lake-M (PCH) 0x54a3 32 hard yes yes yes
* Raptor Lake-S (PCH) 0x7a23 32 hard yes yes yes
* Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
+ * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
+ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -233,6 +235,7 @@
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_P_SMBUS 0x7e22
+#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_PCH_S_SMBUS 0x7f23
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
@@ -250,6 +253,7 @@
#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
+#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22
struct i801_mux_config {
char *gpio_chip;
@@ -977,67 +981,69 @@ static const struct i2c_algorithm smbus_algorithm = {
FEATURE_HOST_NOTIFY)
static const struct pci_device_id i801_ids[] = {
- { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) },
- { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) },
- { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) },
- { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) },
- { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) },
- { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) },
- { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) },
- { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
- { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, RAPTOR_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) },
+ { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) },
+ { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, RAPTOR_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index eeb80e34f9ad..1ad9d3b26dd3 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -694,10 +694,8 @@ static int iic_probe(struct platform_device *ofdev)
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&ofdev->dev, "failed to allocate device data\n");
+ if (!dev)
return -ENOMEM;
- }
platform_set_drvdata(ofdev, dev);
@@ -769,7 +767,7 @@ error_cleanup:
/*
* Cleanup initialized IIC interface
*/
-static int iic_remove(struct platform_device *ofdev)
+static void iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = platform_get_drvdata(ofdev);
@@ -782,8 +780,6 @@ static int iic_remove(struct platform_device *ofdev)
iounmap(dev->vaddr);
kfree(dev);
-
- return 0;
}
static const struct of_device_id ibm_iic_match[] = {
@@ -798,7 +794,7 @@ static struct platform_driver ibm_iic_driver = {
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
- .remove = iic_remove,
+ .remove_new = iic_remove,
};
module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 39c479f96eb5..66ba36949ab5 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -913,7 +913,7 @@ static unsigned int img_i2c_auto(struct img_i2c *i2c,
static irqreturn_t img_i2c_isr(int irq, void *dev_id)
{
- struct img_i2c *i2c = (struct img_i2c *)dev_id;
+ struct img_i2c *i2c = dev_id;
u32 int_status, line_status;
/* We handle transaction completion AFTER accessing registers */
unsigned int hret;
@@ -1413,7 +1413,7 @@ rpm_disable:
return ret;
}
-static int img_i2c_remove(struct platform_device *dev)
+static void img_i2c_remove(struct platform_device *dev)
{
struct img_i2c *i2c = platform_get_drvdata(dev);
@@ -1421,8 +1421,6 @@ static int img_i2c_remove(struct platform_device *dev)
pm_runtime_disable(&dev->dev);
if (!pm_runtime_status_suspended(&dev->dev))
img_i2c_runtime_suspend(&dev->dev);
-
- return 0;
}
static int img_i2c_runtime_suspend(struct device *dev)
@@ -1506,7 +1504,7 @@ static struct platform_driver img_scb_i2c_driver = {
.pm = &img_i2c_pm,
},
.probe = img_i2c_probe,
- .remove = img_i2c_remove,
+ .remove_new = img_i2c_remove,
};
module_platform_driver(img_scb_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 4d24ceb57ee7..c3287c887c6f 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -217,7 +217,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
for (prescale = 0; prescale <= 7; prescale++) {
clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
- 3 - (filt >> 1);
- clkhi = (clk_cycle + I2C_CLK_RATIO) / (I2C_CLK_RATIO + 1);
+ clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1);
clklo = clk_cycle - clkhi;
if (clklo < 64)
break;
@@ -623,7 +623,7 @@ rpm_disable:
return ret;
}
-static int lpi2c_imx_remove(struct platform_device *pdev)
+static void lpi2c_imx_remove(struct platform_device *pdev)
{
struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
@@ -631,8 +631,6 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
-
- return 0;
}
static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
@@ -669,7 +667,7 @@ static const struct dev_pm_ops lpi2c_pm_ops = {
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
- .remove = lpi2c_imx_remove,
+ .remove_new = lpi2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42189a5f2905..65128a73e8a3 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1561,7 +1561,7 @@ rpm_disable:
return ret;
}
-static int i2c_imx_remove(struct platform_device *pdev)
+static void i2c_imx_remove(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
@@ -1592,8 +1592,6 @@ static int i2c_imx_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev)
@@ -1624,7 +1622,7 @@ static const struct dev_pm_ops i2c_imx_pm_ops = {
static struct platform_driver i2c_imx_driver = {
.probe = i2c_imx_probe,
- .remove = i2c_imx_remove,
+ .remove_new = i2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &i2c_imx_pm_ops,
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 4a6ff54d87fe..f2f7ebeeaecb 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -388,7 +388,7 @@ static const struct i2c_algorithm iop3xx_i2c_algo = {
.functionality = iop3xx_i2c_func,
};
-static int
+static void
iop3xx_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *padapter = platform_get_drvdata(pdev);
@@ -408,8 +408,6 @@ iop3xx_i2c_remove(struct platform_device *pdev)
release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
kfree(adapter_data);
kfree(padapter);
-
- return 0;
}
static int
@@ -529,7 +527,7 @@ MODULE_DEVICE_TABLE(of, i2c_iop3xx_match);
static struct platform_driver iop3xx_i2c_driver = {
.probe = iop3xx_i2c_probe,
- .remove = iop3xx_i2c_remove,
+ .remove_new = iop3xx_i2c_remove,
.driver = {
.name = "IOP3xx-I2C",
.of_match_table = i2c_iop3xx_match,
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 2dc7ada06ac5..1dc1ceaa4443 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -286,14 +286,12 @@ static int smbus_sch_probe(struct platform_device *dev)
return retval;
}
-static int smbus_sch_remove(struct platform_device *pdev)
+static void smbus_sch_remove(struct platform_device *pdev)
{
if (sch_smba) {
i2c_del_adapter(&sch_adapter);
sch_smba = 0;
}
-
- return 0;
}
static struct platform_driver smbus_sch_driver = {
@@ -301,7 +299,7 @@ static struct platform_driver smbus_sch_driver = {
.name = "isch_smbus",
},
.probe = smbus_sch_probe,
- .remove = smbus_sch_remove,
+ .remove_new = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index baa7319eee53..0dfe60399521 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -845,18 +845,17 @@ err:
return ret;
}
-static int jz4780_i2c_remove(struct platform_device *pdev)
+static void jz4780_i2c_remove(struct platform_device *pdev)
{
struct jz4780_i2c *i2c = platform_get_drvdata(pdev);
clk_disable_unprepare(i2c->clk);
i2c_del_adapter(&i2c->adap);
- return 0;
}
static struct platform_driver jz4780_i2c_driver = {
.probe = jz4780_i2c_probe,
- .remove = jz4780_i2c_remove,
+ .remove_new = jz4780_i2c_remove,
.driver = {
.name = "jz4780-i2c",
.of_match_table = jz4780_i2c_of_matches,
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index cf857cf22507..281058e3ea46 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -329,7 +329,7 @@ static int kempld_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int kempld_i2c_remove(struct platform_device *pdev)
+static void kempld_i2c_remove(struct platform_device *pdev)
{
struct kempld_i2c_data *i2c = platform_get_drvdata(pdev);
struct kempld_device_data *pld = i2c->pld;
@@ -348,8 +348,6 @@ static int kempld_i2c_remove(struct platform_device *pdev)
kempld_release_mutex(pld);
i2c_del_adapter(&i2c->adap);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -389,7 +387,7 @@ static struct platform_driver kempld_i2c_driver = {
.name = "kempld-i2c",
},
.probe = kempld_i2c_probe,
- .remove = kempld_i2c_remove,
+ .remove_new = kempld_i2c_remove,
.suspend = kempld_i2c_suspend,
.resume = kempld_i2c_resume,
};
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 8fff6fbb7065..5c6d96554753 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -365,23 +365,17 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
init_waitqueue_head(&i2c->wait);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "error getting clock\n");
+ dev_err(&pdev->dev, "failed to enable clock.\n");
return PTR_ERR(i2c->clk);
}
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable clock.\n");
- return ret;
- }
-
ret = devm_request_irq(&pdev->dev, i2c->irq, i2c_lpc2k_handler, 0,
dev_name(&pdev->dev), i2c);
if (ret < 0) {
dev_err(&pdev->dev, "can't request interrupt.\n");
- goto fail_clk;
+ return ret;
}
disable_irq_nosync(i2c->irq);
@@ -397,8 +391,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
clkrate = clk_get_rate(i2c->clk);
if (clkrate == 0) {
dev_err(&pdev->dev, "can't get I2C base clock\n");
- ret = -EINVAL;
- goto fail_clk;
+ return -EINVAL;
}
/* Setup I2C dividers to generate clock with proper duty cycle */
@@ -424,25 +417,18 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0)
- goto fail_clk;
+ return ret;
dev_info(&pdev->dev, "LPC2K I2C adapter\n");
return 0;
-
-fail_clk:
- clk_disable_unprepare(i2c->clk);
- return ret;
}
-static int i2c_lpc2k_remove(struct platform_device *dev)
+static void i2c_lpc2k_remove(struct platform_device *dev)
{
struct lpc2k_i2c *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -483,7 +469,7 @@ MODULE_DEVICE_TABLE(of, lpc2k_i2c_match);
static struct platform_driver i2c_lpc2k_driver = {
.probe = i2c_lpc2k_probe,
- .remove = i2c_lpc2k_remove,
+ .remove_new = i2c_lpc2k_remove,
.driver = {
.name = "lpc2k-i2c",
.pm = I2C_LPC2K_DEV_PM_OPS,
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 889eff06b78f..16026c895bb6 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -535,14 +535,12 @@ static int meson_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int meson_i2c_remove(struct platform_device *pdev)
+static void meson_i2c_remove(struct platform_device *pdev)
{
struct meson_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
clk_disable_unprepare(i2c->clk);
-
- return 0;
}
static const struct meson_i2c_data i2c_meson6_data = {
@@ -568,7 +566,7 @@ MODULE_DEVICE_TABLE(of, meson_i2c_match);
static struct platform_driver meson_i2c_driver = {
.probe = meson_i2c_probe,
- .remove = meson_i2c_remove,
+ .remove_new = meson_i2c_remove,
.driver = {
.name = "meson-i2c",
.of_match_table = meson_i2c_match,
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 4d7e9b25f018..7f58f7eaabb6 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -446,14 +446,12 @@ static int mchp_corei2c_probe(struct platform_device *pdev)
return 0;
}
-static int mchp_corei2c_remove(struct platform_device *pdev)
+static void mchp_corei2c_remove(struct platform_device *pdev)
{
struct mchp_corei2c_dev *idev = platform_get_drvdata(pdev);
clk_disable_unprepare(idev->i2c_clk);
i2c_del_adapter(&idev->adapter);
-
- return 0;
}
static const struct of_device_id mchp_corei2c_of_match[] = {
@@ -465,7 +463,7 @@ MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match);
static struct platform_driver mchp_corei2c_driver = {
.probe = mchp_corei2c_probe,
- .remove = mchp_corei2c_remove,
+ .remove_new = mchp_corei2c_remove,
.driver = {
.name = "microchip-corei2c",
.of_match_table = mchp_corei2c_of_match,
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 1810d5791b3d..ae66bdd1b737 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -2433,7 +2433,7 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int mlxbf_i2c_remove(struct platform_device *pdev)
+static void mlxbf_i2c_remove(struct platform_device *pdev)
{
struct mlxbf_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -2474,13 +2474,11 @@ static int mlxbf_i2c_remove(struct platform_device *pdev)
devm_free_irq(dev, priv->irq, priv);
i2c_del_adapter(&priv->adap);
-
- return 0;
}
static struct platform_driver mlxbf_i2c_driver = {
.probe = mlxbf_i2c_probe,
- .remove = mlxbf_i2c_remove,
+ .remove_new = mlxbf_i2c_remove,
.driver = {
.name = "i2c-mlxbf",
.acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 081f51ef0551..c42fd4b329e4 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -571,19 +571,17 @@ mlxcpld_i2_probe_failed:
return err;
}
-static int mlxcpld_i2c_remove(struct platform_device *pdev)
+static void mlxcpld_i2c_remove(struct platform_device *pdev)
{
struct mlxcpld_i2c_priv *priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
mutex_destroy(&priv->lock);
-
- return 0;
}
static struct platform_driver mlxcpld_i2c_driver = {
.probe = mlxcpld_i2c_probe,
- .remove = mlxcpld_i2c_remove,
+ .remove_new = mlxcpld_i2c_remove,
.driver = {
.name = MLXCPLD_I2C_DEVICE_NAME,
},
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index cfd074ee6d54..f460a7fb4eae 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -307,7 +307,6 @@ static void mpc_i2c_setup_512x(struct device_node *node,
{
struct device_node *node_ctrl;
void __iomem *ctrl;
- const u32 *pval;
u32 idx;
/* Enable I2C interrupts for mpc5121 */
@@ -316,9 +315,10 @@ static void mpc_i2c_setup_512x(struct device_node *node,
if (node_ctrl) {
ctrl = of_iomap(node_ctrl, 0);
if (ctrl) {
+ u64 addr;
/* Interrupt enable bits for i2c-0/1/2: bit 24/26/28 */
- pval = of_get_property(node, "reg", NULL);
- idx = (*pval & 0xff) / 0x20;
+ of_property_read_reg(node, 0, &addr, NULL);
+ idx = (addr & 0xff) / 0x20;
setbits32(ctrl, 1 << (24 + idx * 2));
iounmap(ctrl);
}
@@ -890,15 +890,13 @@ static int fsl_i2c_probe(struct platform_device *op)
return result;
};
-static int fsl_i2c_remove(struct platform_device *op)
+static void fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = platform_get_drvdata(op);
i2c_del_adapter(&i2c->adap);
clk_disable_unprepare(i2c->clk_per);
-
- return 0;
};
static int __maybe_unused mpc_i2c_suspend(struct device *dev)
@@ -959,7 +957,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
- .remove = fsl_i2c_remove,
+ .remove_new = fsl_i2c_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index a43c4d77739a..7ca3f2221ba6 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1505,15 +1505,13 @@ err_bulk_unprepare:
return ret;
}
-static int mtk_i2c_remove(struct platform_device *pdev)
+static void mtk_i2c_remove(struct platform_device *pdev)
{
struct mtk_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1555,7 +1553,7 @@ static const struct dev_pm_ops mtk_i2c_pm = {
static struct platform_driver mtk_i2c_driver = {
.probe = mtk_i2c_probe,
- .remove = mtk_i2c_remove,
+ .remove_new = mtk_i2c_remove,
.driver = {
.name = I2C_DRV_NAME,
.pm = &mtk_i2c_pm,
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 20eda5738ac4..104bb194e990 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -282,16 +282,11 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "no clock defined\n");
+ dev_err(&pdev->dev, "Failed to enable clock\n");
return PTR_ERR(i2c->clk);
}
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock\n");
- return ret;
- }
i2c->dev = &pdev->dev;
@@ -301,8 +296,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
- ret = -EINVAL;
- goto err_disable_clk;
+ return -EINVAL;
}
adap = &i2c->adap;
@@ -320,31 +314,23 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret < 0)
- goto err_disable_clk;
+ return ret;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
return 0;
-
-err_disable_clk:
- clk_disable_unprepare(i2c->clk);
-
- return ret;
}
-static int mtk_i2c_remove(struct platform_device *pdev)
+static void mtk_i2c_remove(struct platform_device *pdev)
{
struct mtk_i2c *i2c = platform_get_drvdata(pdev);
- clk_disable_unprepare(i2c->clk);
i2c_del_adapter(&i2c->adap);
-
- return 0;
}
static struct platform_driver mtk_i2c_driver = {
.probe = mtk_i2c_probe,
- .remove = mtk_i2c_remove,
+ .remove_new = mtk_i2c_remove,
.driver = {
.name = "i2c-mt7621",
.of_match_table = i2c_mtk_dt_ids,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 878c076ebdc6..fd8403b07fa6 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -1084,7 +1084,7 @@ exit_disable_pm:
return rc;
}
-static int
+static void
mv64xxx_i2c_remove(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pd);
@@ -1094,8 +1094,6 @@ mv64xxx_i2c_remove(struct platform_device *pd)
pm_runtime_disable(&pd->dev);
if (!pm_runtime_status_suspended(&pd->dev))
mv64xxx_i2c_runtime_suspend(&pd->dev);
-
- return 0;
}
static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
@@ -1107,7 +1105,7 @@ static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
- .remove = mv64xxx_i2c_remove,
+ .remove_new = mv64xxx_i2c_remove,
.driver = {
.name = MV64XXX_I2C_CTLR_NAME,
.pm = &mv64xxx_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index e0f3b3545cfe..1d76f1c4dc06 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -864,7 +864,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int mxs_i2c_remove(struct platform_device *pdev)
+static void mxs_i2c_remove(struct platform_device *pdev)
{
struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
@@ -874,8 +874,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
dma_release_channel(i2c->dmach);
writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
-
- return 0;
}
static struct platform_driver mxs_i2c_driver = {
@@ -884,7 +882,7 @@ static struct platform_driver mxs_i2c_driver = {
.of_match_table = mxs_i2c_dt_ids,
},
.probe = mxs_i2c_probe,
- .remove = mxs_i2c_remove,
+ .remove_new = mxs_i2c_remove,
};
static int __init mxs_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a2d12a5b1c34..212f412f1c74 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -970,12 +970,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
- dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&adev->dev, "cannot allocate memory\n");
- ret = -ENOMEM;
- goto err_no_mem;
- }
+ dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
dev->vendor = vendor;
dev->adev = adev;
nmk_i2c_of_probe(np, dev);
@@ -996,31 +994,20 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
resource_size(&adev->res));
- if (!dev->virtbase) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
+ if (!dev->virtbase)
+ return -ENOMEM;
dev->irq = adev->irq[0];
ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
- if (ret) {
- dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
- goto err_no_mem;
- }
-
- dev->clk = devm_clk_get(&adev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(&adev->dev, "could not get i2c clock\n");
- ret = PTR_ERR(dev->clk);
- goto err_no_mem;
- }
+ if (ret)
+ return dev_err_probe(&adev->dev, ret,
+ "cannot claim the irq %d\n", dev->irq);
- ret = clk_prepare_enable(dev->clk);
- if (ret) {
- dev_err(&adev->dev, "can't prepare_enable clock\n");
- goto err_no_mem;
- }
+ dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return dev_err_probe(&adev->dev, PTR_ERR(dev->clk),
+ "could enable i2c clock\n");
init_hw(dev);
@@ -1042,22 +1029,15 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
ret = i2c_add_adapter(adap);
if (ret)
- goto err_no_adap;
+ return ret;
pm_runtime_put(&adev->dev);
return 0;
-
- err_no_adap:
- clk_disable_unprepare(dev->clk);
- err_no_mem:
-
- return ret;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
- struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
i2c_del_adapter(&dev->adap);
@@ -1066,8 +1046,6 @@ static void nmk_i2c_remove(struct amba_device *adev)
clear_all_interrupts(dev);
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
- clk_disable_unprepare(dev->clk);
- release_mem_region(res->start, resource_size(res));
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 38d5864d0cb5..53b65ffb6a64 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2361,7 +2361,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
return 0;
}
-static int npcm_i2c_remove_bus(struct platform_device *pdev)
+static void npcm_i2c_remove_bus(struct platform_device *pdev)
{
unsigned long lock_flags;
struct npcm_i2c *bus = platform_get_drvdata(pdev);
@@ -2371,7 +2371,6 @@ static int npcm_i2c_remove_bus(struct platform_device *pdev)
npcm_i2c_disable(bus);
spin_unlock_irqrestore(&bus->lock, lock_flags);
i2c_del_adapter(&bus->adap);
- return 0;
}
static const struct of_device_id npcm_i2c_bus_of_table[] = {
@@ -2383,7 +2382,7 @@ MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table);
static struct platform_driver npcm_i2c_bus_driver = {
.probe = npcm_i2c_probe_bus,
- .remove = npcm_i2c_remove_bus,
+ .remove_new = npcm_i2c_remove_bus,
.driver = {
.name = "nuvoton-i2c",
.of_match_table = npcm_i2c_bus_of_table,
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index a8b99e7f6262..26622d24bb1b 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
#include <asm/unaligned.h>
@@ -261,6 +262,8 @@ MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
static const struct property_entry ccgx_props[] = {
/* Use FW built for NVIDIA GPU only */
PROPERTY_ENTRY_STRING("firmware-name", "nvidia,gpu"),
+ /* USB-C doesn't power the system */
+ PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
{ }
};
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 2e575856c5cd..4ac77e57bbbf 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -552,28 +552,20 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
&clock_frequency);
i2c->bus_clock_khz = 100;
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
-
- if (!IS_ERR(i2c->clk)) {
- int ret = clk_prepare_enable(i2c->clk);
-
- if (ret) {
- dev_err(&pdev->dev,
- "clk_prepare_enable failed: %d\n", ret);
- return ret;
- }
- i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
- if (clock_frequency_present)
- i2c->bus_clock_khz = clock_frequency / 1000;
- }
-
+ i2c->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "devm_clk_get_optional_enabled failed\n");
+
+ i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
+ if (clock_frequency_present)
+ i2c->bus_clock_khz = clock_frequency / 1000;
if (i2c->ip_clock_khz == 0) {
if (of_property_read_u32(np, "opencores,ip-clock-frequency",
&val)) {
if (!clock_frequency_present) {
dev_err(&pdev->dev,
"Missing required parameter 'opencores,ip-clock-frequency'\n");
- clk_disable_unprepare(i2c->clk);
return -ENODEV;
}
i2c->ip_clock_khz = clock_frequency / 1000;
@@ -678,8 +670,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
i2c->reg_io_width);
- ret = -EINVAL;
- goto err_clk;
+ return -EINVAL;
}
}
@@ -710,13 +701,13 @@ static int ocores_i2c_probe(struct platform_device *pdev)
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_clk;
+ return ret;
}
}
ret = ocores_init(&pdev->dev, i2c);
if (ret)
- goto err_clk;
+ return ret;
/* hook up driver to tree */
platform_set_drvdata(pdev, i2c);
@@ -728,7 +719,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret)
- goto err_clk;
+ return ret;
/* add in known devices to the bus */
if (pdata) {
@@ -737,13 +728,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
}
return 0;
-
-err_clk:
- clk_disable_unprepare(i2c->clk);
- return ret;
}
-static int ocores_i2c_remove(struct platform_device *pdev)
+static void ocores_i2c_remove(struct platform_device *pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@@ -754,11 +741,6 @@ static int ocores_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
-
- if (!IS_ERR(i2c->clk))
- clk_disable_unprepare(i2c->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -771,28 +753,22 @@ static int ocores_i2c_suspend(struct device *dev)
ctrl &= ~(OCI2C_CTRL_EN | OCI2C_CTRL_IEN);
oc_setreg(i2c, OCI2C_CONTROL, ctrl);
- if (!IS_ERR(i2c->clk))
- clk_disable_unprepare(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
return 0;
}
static int ocores_i2c_resume(struct device *dev)
{
struct ocores_i2c *i2c = dev_get_drvdata(dev);
+ unsigned long rate;
+ int ret;
- if (!IS_ERR(i2c->clk)) {
- unsigned long rate;
- int ret = clk_prepare_enable(i2c->clk);
-
- if (ret) {
- dev_err(dev,
- "clk_prepare_enable failed: %d\n", ret);
- return ret;
- }
- rate = clk_get_rate(i2c->clk) / 1000;
- if (rate)
- i2c->ip_clock_khz = rate;
- }
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "clk_prepare_enable failed\n");
+ rate = clk_get_rate(i2c->clk) / 1000;
+ if (rate)
+ i2c->ip_clock_khz = rate;
return ocores_init(dev, i2c);
}
@@ -804,7 +780,7 @@ static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
- .remove = ocores_i2c_remove,
+ .remove_new = ocores_i2c_remove,
.driver = {
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c
index 0c227963c8d6..7d54b3203f71 100644
--- a/drivers/i2c/busses/i2c-octeon-platdrv.c
+++ b/drivers/i2c/busses/i2c-octeon-platdrv.c
@@ -253,12 +253,11 @@ out:
return result;
};
-static int octeon_i2c_remove(struct platform_device *pdev)
+static void octeon_i2c_remove(struct platform_device *pdev)
{
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
- return 0;
};
static const struct of_device_id octeon_i2c_match[] = {
@@ -270,7 +269,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match);
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
- .remove = octeon_i2c_remove,
+ .remove_new = octeon_i2c_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = octeon_i2c_match,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 4199f57a6bf2..58fd6fa3edf1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1519,7 +1519,7 @@ err_disable_pm:
return r;
}
-static int omap_i2c_remove(struct platform_device *pdev)
+static void omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *omap = platform_get_drvdata(pdev);
int ret;
@@ -1535,8 +1535,6 @@ static int omap_i2c_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
@@ -1588,7 +1586,7 @@ static const struct dev_pm_ops omap_i2c_pm_ops = {
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove = omap_i2c_remove,
+ .remove_new = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.pm = &omap_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 9f773b4f5ed8..17ef87d50f7c 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -232,13 +232,11 @@ static int i2c_opal_probe(struct platform_device *pdev)
return rc;
}
-static int i2c_opal_remove(struct platform_device *pdev)
+static void i2c_opal_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
i2c_del_adapter(adapter);
-
- return 0;
}
static const struct of_device_id i2c_opal_of_match[] = {
@@ -251,7 +249,7 @@ MODULE_DEVICE_TABLE(of, i2c_opal_of_match);
static struct platform_driver i2c_opal_driver = {
.probe = i2c_opal_probe,
- .remove = i2c_opal_remove,
+ .remove_new = i2c_opal_remove,
.driver = {
.name = "i2c-opal",
.of_match_table = i2c_opal_of_match,
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index 99ddd8894964..5f0ef8c35141 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -461,21 +461,16 @@ static int owl_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- i2c_dev->clk = devm_clk_get(dev, NULL);
+ i2c_dev->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
- dev_err(dev, "failed to get clock\n");
+ dev_err(dev, "failed to enable clock\n");
return PTR_ERR(i2c_dev->clk);
}
- ret = clk_prepare_enable(i2c_dev->clk);
- if (ret)
- return ret;
-
i2c_dev->clk_rate = clk_get_rate(i2c_dev->clk);
if (!i2c_dev->clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
- ret = -EINVAL;
- goto disable_clk;
+ return -EINVAL;
}
init_completion(&i2c_dev->msg_complete);
@@ -496,15 +491,10 @@ static int owl_i2c_probe(struct platform_device *pdev)
i2c_dev);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto disable_clk;
+ return ret;
}
return i2c_add_adapter(&i2c_dev->adap);
-
-disable_clk:
- clk_disable_unprepare(i2c_dev->clk);
-
- return ret;
}
static const struct of_device_id owl_i2c_of_match[] = {
diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c
index e35945a91dbe..5fbfb9b41744 100644
--- a/drivers/i2c/busses/i2c-pasemi-platform.c
+++ b/drivers/i2c/busses/i2c-pasemi-platform.c
@@ -66,22 +66,18 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &frequency))
frequency = I2C_MAX_STANDARD_MODE_FREQ;
- data->clk_ref = devm_clk_get(dev, NULL);
+ data->clk_ref = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(data->clk_ref))
return PTR_ERR(data->clk_ref);
- error = clk_prepare_enable(data->clk_ref);
- if (error)
- return error;
-
error = pasemi_platform_i2c_calc_clk_div(data, frequency);
if (error)
- goto out_clk_disable;
+ return error;
smbus->adapter.dev.of_node = pdev->dev.of_node;
error = pasemi_i2c_common_probe(smbus);
if (error)
- goto out_clk_disable;
+ return error;
irq_num = platform_get_irq(pdev, 0);
error = devm_request_irq(smbus->dev, irq_num, pasemi_irq_handler, 0, "pasemi_apple_i2c", (void *)smbus);
@@ -91,20 +87,9 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
return 0;
-
-out_clk_disable:
- clk_disable_unprepare(data->clk_ref);
-
- return error;
}
-static int pasemi_platform_i2c_remove(struct platform_device *pdev)
-{
- struct pasemi_platform_i2c_data *data = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(data->clk_ref);
- return 0;
-}
+static void pasemi_platform_i2c_remove(struct platform_device *pdev) { }
static const struct of_device_id pasemi_platform_i2c_of_match[] = {
{ .compatible = "apple,t8103-i2c" },
@@ -119,7 +104,7 @@ static struct platform_driver pasemi_platform_i2c_driver = {
.of_match_table = pasemi_platform_i2c_of_match,
},
.probe = pasemi_platform_i2c_probe,
- .remove = pasemi_platform_i2c_remove,
+ .remove_new = pasemi_platform_i2c_remove,
};
module_platform_driver(pasemi_platform_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 86d4f75ef8d3..d2a9e7b61c1a 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -221,13 +221,11 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
return 0;
}
-static int i2c_pca_pf_remove(struct platform_device *pdev)
+static void i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -241,7 +239,7 @@ MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table);
static struct platform_driver i2c_pca_pf_driver = {
.probe = i2c_pca_pf_probe,
- .remove = i2c_pca_pf_remove,
+ .remove_new = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
.of_match_table = of_match_ptr(i2c_pca_of_match_table),
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 50f21cdbe90d..82400057f810 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -743,14 +743,12 @@ out_clock:
return ret;
}
-static int i2c_pnx_remove(struct platform_device *pdev)
+static void i2c_pnx_remove(struct platform_device *pdev)
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
i2c_del_adapter(&alg_data->adapter);
clk_disable_unprepare(alg_data->clk);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -768,7 +766,7 @@ static struct platform_driver i2c_pnx_driver = {
.pm = PNX_I2C_PM,
},
.probe = i2c_pnx_probe,
- .remove = i2c_pnx_remove,
+ .remove_new = i2c_pnx_remove,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index ec706a3aba26..4996a628fdae 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -188,14 +188,12 @@ static const struct i2c_adapter_quirks i2c_powermac_quirks = {
.max_num_msgs = 1,
};
-static int i2c_powermac_remove(struct platform_device *dev)
+static void i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
i2c_del_adapter(adapter);
memset(adapter, 0, sizeof(*adapter));
-
- return 0;
}
static u32 i2c_powermac_get_addr(struct i2c_adapter *adap,
@@ -439,7 +437,7 @@ static int i2c_powermac_probe(struct platform_device *dev)
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
- .remove = i2c_powermac_remove,
+ .remove_new = i2c_powermac_remove,
.driver = {
.name = "i2c-powermac",
.bus = &platform_bus_type,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f9fa5308556b..937f7eebe906 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1482,15 +1482,13 @@ ereqirq:
return ret;
}
-static int i2c_pxa_remove(struct platform_device *dev)
+static void i2c_pxa_remove(struct platform_device *dev)
{
struct pxa_i2c *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
clk_disable_unprepare(i2c->clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1525,7 +1523,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
- .remove = i2c_pxa_remove,
+ .remove_new = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
.pm = I2C_PXA_DEV_PM_OPS,
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 01358472680c..622dc14add9d 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -581,8 +581,7 @@ static int cci_probe(struct platform_device *pdev)
/* Memory */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cci->base = devm_ioremap_resource(dev, r);
+ cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(cci->base))
return PTR_ERR(cci->base);
@@ -675,7 +674,7 @@ disable_clocks:
return ret;
}
-static int cci_remove(struct platform_device *pdev)
+static void cci_remove(struct platform_device *pdev)
{
struct cci *cci = platform_get_drvdata(pdev);
int i;
@@ -691,8 +690,6 @@ static int cci_remove(struct platform_device *pdev)
disable_irq(cci->irq);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
}
static const struct cci_data cci_v1_data = {
@@ -829,7 +826,7 @@ MODULE_DEVICE_TABLE(of, cci_dt_match);
static struct platform_driver qcom_cci_driver = {
.probe = cci_probe,
- .remove = cci_remove,
+ .remove_new = cci_remove,
.driver = {
.name = "i2c-qcom-cci",
.of_match_table = cci_dt_match,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 83909b02a03e..b670a67c4fdd 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -936,14 +936,13 @@ err_dma:
return ret;
}
-static int geni_i2c_remove(struct platform_device *pdev)
+static void geni_i2c_remove(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
i2c_del_adapter(&gi2c->adap);
release_gpi_dma(gi2c);
pm_runtime_disable(gi2c->se.dev);
- return 0;
}
static void geni_i2c_shutdown(struct platform_device *pdev)
@@ -1041,7 +1040,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
static struct platform_driver geni_i2c_driver = {
.probe = geni_i2c_probe,
- .remove = geni_i2c_remove,
+ .remove_new = geni_i2c_remove,
.shutdown = geni_i2c_shutdown,
.driver = {
.name = "geni_i2c",
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 78682388e02e..ae90170023b0 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1911,7 +1911,7 @@ fail_dma:
return ret;
}
-static int qup_i2c_remove(struct platform_device *pdev)
+static void qup_i2c_remove(struct platform_device *pdev)
{
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
@@ -1925,7 +1925,6 @@ static int qup_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&qup->adap);
pm_runtime_disable(qup->dev);
pm_runtime_set_suspended(qup->dev);
- return 0;
}
#ifdef CONFIG_PM
@@ -1985,7 +1984,7 @@ MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
static struct platform_driver qup_i2c_driver = {
.probe = qup_i2c_probe,
- .remove = qup_i2c_remove,
+ .remove_new = qup_i2c_remove,
.driver = {
.name = "i2c_qup",
.pm = &qup_i2c_qup_pm_ops,
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index cef82b205c26..2d9c37410ebd 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1155,7 +1155,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return ret;
}
-static int rcar_i2c_remove(struct platform_device *pdev)
+static void rcar_i2c_remove(struct platform_device *pdev)
{
struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -1167,8 +1167,6 @@ static int rcar_i2c_remove(struct platform_device *pdev)
if (priv->flags & ID_P_PM_BLOCKED)
pm_runtime_put(dev);
pm_runtime_disable(dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1204,7 +1202,7 @@ static struct platform_driver rcar_i2c_driver = {
.pm = DEV_PM_OPS,
},
.probe = rcar_i2c_probe,
- .remove = rcar_i2c_remove,
+ .remove_new = rcar_i2c_remove,
};
module_platform_driver(rcar_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 849848ccb080..5f8c0bd508d2 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -477,7 +477,7 @@ out:
return ret;
}
-static int riic_i2c_remove(struct platform_device *pdev)
+static void riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
@@ -486,8 +486,6 @@ static int riic_i2c_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
i2c_del_adapter(&riic->adapter);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id riic_i2c_dt_ids[] = {
@@ -497,7 +495,7 @@ static const struct of_device_id riic_i2c_dt_ids[] = {
static struct platform_driver riic_i2c_driver = {
.probe = riic_i2c_probe,
- .remove = riic_i2c_remove,
+ .remove_new = riic_i2c_remove,
.driver = {
.name = "i2c-riic",
.of_match_table = riic_i2c_dt_ids,
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index b31cf4f18f85..a044ca0c35a1 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1372,7 +1372,7 @@ err_clk:
return ret;
}
-static int rk3x_i2c_remove(struct platform_device *pdev)
+static void rk3x_i2c_remove(struct platform_device *pdev)
{
struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
@@ -1381,15 +1381,13 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
clk_unprepare(i2c->pclk);
clk_unprepare(i2c->clk);
-
- return 0;
}
static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
static struct platform_driver rk3x_i2c_driver = {
.probe = rk3x_i2c_probe,
- .remove = rk3x_i2c_remove,
+ .remove_new = rk3x_i2c_remove,
.driver = {
.name = "rk3x-i2c",
.of_match_table = rk3x_i2c_match,
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 56d0faee5c46..b0bfc96b9ede 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -50,9 +50,6 @@
#define IICB0MDSC BIT(7) /* Bus Mode */
#define IICB0SLSE BIT(1) /* Start condition output */
-#define bit_setl(addr, val) writel(readl(addr) | (val), (addr))
-#define bit_clrl(addr, val) writel(readl(addr) & ~(val), (addr))
-
struct rzv2m_i2c_priv {
void __iomem *base;
struct i2c_adapter adap;
@@ -78,6 +75,16 @@ static const struct bitrate_config bitrate_configs[] = {
[RZV2M_I2C_400K] = { 52, 900 },
};
+static inline void bit_setl(void __iomem *addr, u32 val)
+{
+ writel(readl(addr) | val, addr);
+}
+
+static inline void bit_clrl(void __iomem *addr, u32 val)
+{
+ writel(readl(addr) & ~val, addr);
+}
+
static irqreturn_t rzv2m_i2c_tia_irq_handler(int this_irq, void *dev_id)
{
struct rzv2m_i2c_priv *priv = dev_id;
@@ -382,6 +389,20 @@ static u32 rzv2m_i2c_func(struct i2c_adapter *adap)
I2C_FUNC_10BIT_ADDR;
}
+static int rzv2m_i2c_disable(struct device *dev, struct rzv2m_i2c_priv *priv)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ bit_clrl(priv->base + IICB0CTL0, IICB0IICE);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
@@ -454,37 +475,29 @@ static int rzv2m_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
- if (ret < 0)
+ if (ret < 0) {
+ rzv2m_i2c_disable(dev, priv);
pm_runtime_disable(dev);
+ }
return ret;
}
-static int rzv2m_i2c_remove(struct platform_device *pdev)
+static void rzv2m_i2c_remove(struct platform_device *pdev)
{
struct rzv2m_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = priv->adap.dev.parent;
i2c_del_adapter(&priv->adap);
- bit_clrl(priv->base + IICB0CTL0, IICB0IICE);
+ rzv2m_i2c_disable(dev, priv);
pm_runtime_disable(dev);
-
- return 0;
}
static int rzv2m_i2c_suspend(struct device *dev)
{
struct rzv2m_i2c_priv *priv = dev_get_drvdata(dev);
- int ret;
- ret = pm_runtime_resume_and_get(dev);
- if (ret < 0)
- return ret;
-
- bit_clrl(priv->base + IICB0CTL0, IICB0IICE);
- pm_runtime_put(dev);
-
- return 0;
+ return rzv2m_i2c_disable(dev, priv);
}
static int rzv2m_i2c_resume(struct device *dev)
@@ -523,7 +536,7 @@ static struct platform_driver rzv2m_i2c_driver = {
.pm = pm_sleep_ptr(&rzv2m_i2c_pm_ops),
},
.probe = rzv2m_i2c_probe,
- .remove = rzv2m_i2c_remove,
+ .remove_new = rzv2m_i2c_remove,
};
module_platform_driver(rzv2m_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 45e9df81345a..28f0e5c64f32 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1114,7 +1114,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int s3c24xx_i2c_remove(struct platform_device *pdev)
+static void s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
@@ -1123,8 +1123,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
i2c_del_adapter(&i2c->adap);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1172,7 +1170,7 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
- .remove = s3c24xx_i2c_remove,
+ .remove_new = s3c24xx_i2c_remove,
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c-i2c",
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 0239e134b90f..421735acfa14 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -13,9 +13,6 @@
#include <linux/i2c.h>
#include <linux/acpi.h>
-/* SMBUS HID definition as supported by Microsoft Windows */
-#define ACPI_SMBUS_MS_HID "SMB0001"
-
struct smbus_methods_t {
char *mt_info;
char *mt_sbr;
@@ -404,19 +401,17 @@ err:
return ret;
}
-static int smbus_cmi_remove(struct platform_device *device)
+static void smbus_cmi_remove(struct platform_device *device)
{
struct acpi_smbus_cmi *smbus_cmi = platform_get_drvdata(device);
i2c_del_adapter(&smbus_cmi->adapter);
kfree(smbus_cmi);
-
- return 0;
}
static struct platform_driver smbus_cmi_driver = {
.probe = smbus_cmi_probe,
- .remove = smbus_cmi_remove,
+ .remove_new = smbus_cmi_remove,
.driver = {
.name = "smbus_cmi",
.acpi_match_table = acpi_smbus_cmi_ids,
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 319d1fa617c8..1ad2a26156d1 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -443,9 +443,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out0;
}
- id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
- dev_err(&pdev->dev, "no mem for private data\n");
ret = -ENOMEM;
goto out0;
}
@@ -536,7 +535,7 @@ out0:
return ret;
}
-static int sh7760_i2c_remove(struct platform_device *pdev)
+static void sh7760_i2c_remove(struct platform_device *pdev)
{
struct cami2c *id = platform_get_drvdata(pdev);
@@ -546,8 +545,6 @@ static int sh7760_i2c_remove(struct platform_device *pdev)
release_resource(id->ioarea);
kfree(id->ioarea);
kfree(id);
-
- return 0;
}
static struct platform_driver sh7760_i2c_drv = {
@@ -555,7 +552,7 @@ static struct platform_driver sh7760_i2c_drv = {
.name = SH7760_I2C_DEVNAME,
},
.probe = sh7760_i2c_probe,
- .remove = sh7760_i2c_remove,
+ .remove_new = sh7760_i2c_remove,
};
module_platform_driver(sh7760_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 29330ee64c9c..21717b943a9e 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -956,14 +956,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
return 0;
}
-static int sh_mobile_i2c_remove(struct platform_device *dev)
+static void sh_mobile_i2c_remove(struct platform_device *dev)
{
struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
i2c_del_adapter(&pd->adap);
sh_mobile_i2c_release_dma(pd);
pm_runtime_disable(&dev->dev);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1000,7 +999,7 @@ static struct platform_driver sh_mobile_i2c_driver = {
.pm = DEV_PM_OPS,
},
.probe = sh_mobile_i2c_probe,
- .remove = sh_mobile_i2c_remove,
+ .remove_new = sh_mobile_i2c_remove,
};
static int __init sh_mobile_i2c_adap_init(void)
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 87701744752f..18516bc64e04 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -126,7 +126,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
return ret;
}
-static int simtec_i2c_remove(struct platform_device *dev)
+static void simtec_i2c_remove(struct platform_device *dev)
{
struct simtec_i2c_data *pd = platform_get_drvdata(dev);
@@ -135,8 +135,6 @@ static int simtec_i2c_remove(struct platform_device *dev)
iounmap(pd->reg);
release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
kfree(pd);
-
- return 0;
}
/* device driver */
@@ -146,7 +144,7 @@ static struct platform_driver simtec_i2c_driver = {
.name = "simtec-i2c",
},
.probe = simtec_i2c_probe,
- .remove = simtec_i2c_remove,
+ .remove_new = simtec_i2c_remove,
};
module_platform_driver(simtec_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index f823913b75a6..25c3521cae0e 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -876,13 +876,11 @@ static int st_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int st_i2c_remove(struct platform_device *pdev)
+static void st_i2c_remove(struct platform_device *pdev)
{
struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adap);
-
- return 0;
}
static const struct of_device_id st_i2c_match[] = {
@@ -899,7 +897,7 @@ static struct platform_driver st_i2c_driver = {
.pm = pm_sleep_ptr(&st_i2c_pm),
},
.probe = st_i2c_probe,
- .remove = st_i2c_remove,
+ .remove_new = st_i2c_remove,
};
module_platform_driver(st_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index eebce7ecef25..6ad06a5a22b4 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -861,15 +861,13 @@ clk_free:
return ret;
}
-static int stm32f4_i2c_remove(struct platform_device *pdev)
+static void stm32f4_i2c_remove(struct platform_device *pdev)
{
struct stm32f4_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adap);
clk_unprepare(i2c_dev->clk);
-
- return 0;
}
static const struct of_device_id stm32f4_i2c_match[] = {
@@ -884,7 +882,7 @@ static struct platform_driver stm32f4_i2c_driver = {
.of_match_table = stm32f4_i2c_match,
},
.probe = stm32f4_i2c_probe,
- .remove = stm32f4_i2c_remove,
+ .remove_new = stm32f4_i2c_remove,
};
module_platform_driver(stm32f4_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d1c59d83a65b..e897d9101434 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -2309,7 +2309,7 @@ clk_free:
return ret;
}
-static int stm32f7_i2c_remove(struct platform_device *pdev)
+static void stm32f7_i2c_remove(struct platform_device *pdev)
{
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
@@ -2341,8 +2341,6 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
clk_disable_unprepare(i2c_dev->clk);
-
- return 0;
}
static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
@@ -2486,7 +2484,7 @@ static struct platform_driver stm32f7_i2c_driver = {
.pm = &stm32f7_i2c_pm_ops,
},
.probe = stm32f7_i2c_probe,
- .remove = stm32f7_i2c_remove,
+ .remove_new = stm32f7_i2c_remove,
};
module_platform_driver(stm32f7_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 9e3483f507ff..ad8270cdbd3e 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -239,15 +239,9 @@ static int p2wi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- p2wi->clk = devm_clk_get(dev, NULL);
+ p2wi->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(p2wi->clk)) {
ret = PTR_ERR(p2wi->clk);
- dev_err(dev, "failed to retrieve clk: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(p2wi->clk);
- if (ret) {
dev_err(dev, "failed to enable clk: %d\n", ret);
return ret;
}
@@ -256,15 +250,14 @@ static int p2wi_probe(struct platform_device *pdev)
p2wi->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(p2wi->rstc)) {
- ret = PTR_ERR(p2wi->rstc);
dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- goto err_clk_disable;
+ return PTR_ERR(p2wi->rstc);
}
ret = reset_control_deassert(p2wi->rstc);
if (ret) {
dev_err(dev, "failed to deassert reset line: %d\n", ret);
- goto err_clk_disable;
+ return ret;
}
init_completion(&p2wi->complete);
@@ -307,26 +300,20 @@ static int p2wi_probe(struct platform_device *pdev)
err_reset_assert:
reset_control_assert(p2wi->rstc);
-err_clk_disable:
- clk_disable_unprepare(p2wi->clk);
-
return ret;
}
-static int p2wi_remove(struct platform_device *dev)
+static void p2wi_remove(struct platform_device *dev)
{
struct p2wi *p2wi = platform_get_drvdata(dev);
reset_control_assert(p2wi->rstc);
- clk_disable_unprepare(p2wi->clk);
i2c_del_adapter(&p2wi->adapter);
-
- return 0;
}
static struct platform_driver p2wi_driver = {
.probe = p2wi_probe,
- .remove = p2wi_remove,
+ .remove_new = p2wi_remove,
.driver = {
.name = "i2c-sunxi-p2wi",
.of_match_table = p2wi_of_match_table,
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 50d19cf99a03..4cc196ca8f6d 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -618,15 +618,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int synquacer_i2c_remove(struct platform_device *pdev)
+static void synquacer_i2c_remove(struct platform_device *pdev)
{
struct synquacer_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adapter);
if (!IS_ERR(i2c->pclk))
clk_disable_unprepare(i2c->pclk);
-
- return 0;
};
static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = {
@@ -645,7 +643,7 @@ MODULE_DEVICE_TABLE(acpi, synquacer_i2c_acpi_ids);
static struct platform_driver synquacer_i2c_driver = {
.probe = synquacer_i2c_probe,
- .remove = synquacer_i2c_remove,
+ .remove_new = synquacer_i2c_remove,
.driver = {
.name = "synquacer_i2c",
.of_match_table = of_match_ptr(synquacer_i2c_dt_ids),
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index 95139985b2d5..bc3f94561746 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -316,13 +316,11 @@ static int tegra_bpmp_i2c_probe(struct platform_device *pdev)
return i2c_add_adapter(&i2c->adapter);
}
-static int tegra_bpmp_i2c_remove(struct platform_device *pdev)
+static void tegra_bpmp_i2c_remove(struct platform_device *pdev)
{
struct tegra_bpmp_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adapter);
-
- return 0;
}
static const struct of_device_id tegra_bpmp_i2c_of_match[] = {
@@ -337,7 +335,7 @@ static struct platform_driver tegra_bpmp_i2c_driver = {
.of_match_table = tegra_bpmp_i2c_of_match,
},
.probe = tegra_bpmp_i2c_probe,
- .remove = tegra_bpmp_i2c_remove,
+ .remove_new = tegra_bpmp_i2c_remove,
};
module_platform_driver(tegra_bpmp_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 157066f06a32..bcbbf23aa530 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -249,8 +249,7 @@ struct tegra_i2c_hw_feature {
* @msg_read: indicates that the transfer is a read access
* @timings: i2c timings information like bus frequency
* @multimaster_mode: indicates that I2C controller is in multi-master mode
- * @tx_dma_chan: DMA transmit channel
- * @rx_dma_chan: DMA receive channel
+ * @dma_chan: DMA channel
* @dma_phys: handle to DMA resources
* @dma_buf: pointer to allocated DMA buffer
* @dma_buf_size: DMA buffer size
@@ -283,8 +282,7 @@ struct tegra_i2c_dev {
u8 *msg_buf;
struct completion dma_complete;
- struct dma_chan *tx_dma_chan;
- struct dma_chan *rx_dma_chan;
+ struct dma_chan *dma_chan;
unsigned int dma_buf_size;
struct device *dma_dev;
dma_addr_t dma_phys;
@@ -298,6 +296,9 @@ struct tegra_i2c_dev {
bool is_vi;
};
+#define IS_DVC(dev) (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && (dev)->is_dvc)
+#define IS_VI(dev) (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) && (dev)->is_vi)
+
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
unsigned int reg)
{
@@ -315,9 +316,9 @@ static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
*/
static u32 tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
{
- if (i2c_dev->is_dvc)
+ if (IS_DVC(i2c_dev))
reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
- else if (i2c_dev->is_vi)
+ else if (IS_VI(i2c_dev))
reg = 0xc00 + (reg << 2);
return reg;
@@ -330,7 +331,7 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
/* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
- else if (i2c_dev->is_vi)
+ else if (IS_VI(i2c_dev))
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
}
@@ -393,16 +394,14 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
{
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction dir;
- struct dma_chan *chan;
dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len);
reinit_completion(&i2c_dev->dma_complete);
dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
- chan = i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan;
- dma_desc = dmaengine_prep_slave_single(chan, i2c_dev->dma_phys,
+ dma_desc = dmaengine_prep_slave_single(i2c_dev->dma_chan, i2c_dev->dma_phys,
len, dir, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!dma_desc) {
@@ -415,7 +414,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
dma_desc->callback_param = i2c_dev;
dmaengine_submit(dma_desc);
- dma_async_issue_pending(chan);
+ dma_async_issue_pending(i2c_dev->dma_chan);
return 0;
}
@@ -428,25 +427,19 @@ static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
i2c_dev->dma_buf = NULL;
}
- if (i2c_dev->tx_dma_chan) {
- dma_release_channel(i2c_dev->tx_dma_chan);
- i2c_dev->tx_dma_chan = NULL;
- }
-
- if (i2c_dev->rx_dma_chan) {
- dma_release_channel(i2c_dev->rx_dma_chan);
- i2c_dev->rx_dma_chan = NULL;
+ if (i2c_dev->dma_chan) {
+ dma_release_channel(i2c_dev->dma_chan);
+ i2c_dev->dma_chan = NULL;
}
}
static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
{
- struct dma_chan *chan;
dma_addr_t dma_phys;
u32 *dma_buf;
int err;
- if (i2c_dev->is_vi)
+ if (IS_VI(i2c_dev))
return 0;
if (!i2c_dev->hw->has_apb_dma) {
@@ -459,25 +452,18 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
return 0;
}
- chan = dma_request_chan(i2c_dev->dev, "rx");
- if (IS_ERR(chan)) {
- err = PTR_ERR(chan);
- goto err_out;
- }
-
- i2c_dev->rx_dma_chan = chan;
-
- chan = dma_request_chan(i2c_dev->dev, "tx");
- if (IS_ERR(chan)) {
- err = PTR_ERR(chan);
+ /*
+ * The same channel will be used for both RX and TX.
+ * Keeping the name as "tx" for backward compatibility
+ * with existing devicetrees.
+ */
+ i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx");
+ if (IS_ERR(i2c_dev->dma_chan)) {
+ err = PTR_ERR(i2c_dev->dma_chan);
goto err_out;
}
- i2c_dev->tx_dma_chan = chan;
-
- WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device);
- i2c_dev->dma_dev = chan->device->dev;
-
+ i2c_dev->dma_dev = i2c_dev->dma_chan->device->dev;
i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
I2C_PACKET_HEADER_SIZE;
@@ -639,7 +625,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
WARN_ON_ONCE(err);
- if (i2c_dev->is_dvc)
+ if (IS_DVC(i2c_dev))
tegra_dvc_init(i2c_dev);
val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
@@ -651,7 +637,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
- if (i2c_dev->is_vi)
+ if (IS_VI(i2c_dev))
tegra_i2c_vi_init(i2c_dev);
switch (t->bus_freq_hz) {
@@ -703,7 +689,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
return err;
}
- if (!i2c_dev->is_dvc && !i2c_dev->is_vi) {
+ if (!IS_DVC(i2c_dev) && !IS_VI(i2c_dev)) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
@@ -846,7 +832,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
- if (i2c_dev->is_vi)
+ if (IS_VI(i2c_dev))
i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
else
i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
@@ -933,7 +919,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
}
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
+ if (IS_DVC(i2c_dev))
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
/*
@@ -972,15 +958,11 @@ err:
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
+ if (IS_DVC(i2c_dev))
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
if (i2c_dev->dma_mode) {
- if (i2c_dev->msg_read)
- dmaengine_terminate_async(i2c_dev->rx_dma_chan);
- else
- dmaengine_terminate_async(i2c_dev->tx_dma_chan);
-
+ dmaengine_terminate_async(i2c_dev->dma_chan);
complete(&i2c_dev->dma_complete);
}
@@ -994,7 +976,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
{
struct dma_slave_config slv_config = {0};
u32 val, reg, dma_burst, reg_offset;
- struct dma_chan *chan;
int err;
if (i2c_dev->hw->has_mst_fifo)
@@ -1011,7 +992,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
dma_burst = 8;
if (i2c_dev->msg_read) {
- chan = i2c_dev->rx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_RX_FIFO);
slv_config.src_addr = i2c_dev->base_phys + reg_offset;
@@ -1023,7 +1003,6 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
else
val = I2C_FIFO_CONTROL_RX_TRIG(dma_burst);
} else {
- chan = i2c_dev->tx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_TX_FIFO);
slv_config.dst_addr = i2c_dev->base_phys + reg_offset;
@@ -1037,7 +1016,7 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
}
slv_config.device_fc = true;
- err = dmaengine_slave_config(chan, &slv_config);
+ err = dmaengine_slave_config(i2c_dev->dma_chan, &slv_config);
if (err) {
dev_err(i2c_dev->dev, "DMA config failed: %d\n", err);
dev_err(i2c_dev->dev, "falling back to PIO\n");
@@ -1347,13 +1326,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
* performs synchronization after the transfer's termination
* and we want to get a completion if transfer succeeded.
*/
- dmaengine_synchronize(i2c_dev->msg_read ?
- i2c_dev->rx_dma_chan :
- i2c_dev->tx_dma_chan);
-
- dmaengine_terminate_sync(i2c_dev->msg_read ?
- i2c_dev->rx_dma_chan :
- i2c_dev->tx_dma_chan);
+ dmaengine_synchronize(i2c_dev->dma_chan);
+ dmaengine_terminate_sync(i2c_dev->dma_chan);
if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
dev_err(i2c_dev->dev, "DMA transfer timed out\n");
@@ -1654,13 +1628,17 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-i2c-vi", .data = &tegra210_i2c_hw, },
+#endif
{ .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
+#endif
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
@@ -1675,10 +1653,12 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master");
i2c_dev->multimaster_mode = multi_mode;
- if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
+ of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
i2c_dev->is_dvc = true;
- if (of_device_is_compatible(np, "nvidia,tegra210-i2c-vi"))
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) &&
+ of_device_is_compatible(np, "nvidia,tegra210-i2c-vi"))
i2c_dev->is_vi = true;
}
@@ -1707,7 +1687,7 @@ static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw)
i2c_dev->clocks[i2c_dev->nclocks++].id = "fast-clk";
- if (i2c_dev->is_vi)
+ if (IS_VI(i2c_dev))
i2c_dev->clocks[i2c_dev->nclocks++].id = "slow";
err = devm_clk_bulk_get(i2c_dev->dev, i2c_dev->nclocks,
@@ -1825,7 +1805,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
* VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
* be used for atomic transfers.
*/
- if (!i2c_dev->is_vi)
+ if (!IS_VI(i2c_dev))
pm_runtime_irq_safe(i2c_dev->dev);
pm_runtime_enable(i2c_dev->dev);
@@ -1868,7 +1848,7 @@ release_clocks:
return err;
}
-static int tegra_i2c_remove(struct platform_device *pdev)
+static void tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
@@ -1877,8 +1857,6 @@ static int tegra_i2c_remove(struct platform_device *pdev)
tegra_i2c_release_dma(i2c_dev);
tegra_i2c_release_clocks(i2c_dev);
-
- return 0;
}
static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
@@ -1899,7 +1877,7 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
* power ON/OFF during runtime PM resume/suspend, meaning that
* controller needs to be re-initialized after power ON.
*/
- if (i2c_dev->is_vi) {
+ if (IS_VI(i2c_dev)) {
err = tegra_i2c_init(i2c_dev);
if (err)
goto disable_clocks;
@@ -1987,7 +1965,7 @@ MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
- .remove = tegra_i2c_remove,
+ .remove_new = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.of_match_table = tegra_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 7279ca0eaa2d..d1fa9ff5aeab 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -226,10 +226,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev)
goto error;
- }
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index d7b622891e52..dbc91c7c3788 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -540,21 +540,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv->clk = devm_clk_get(dev, NULL);
+ priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
+ dev_err(dev, "failed to enable clock\n");
return PTR_ERR(priv->clk);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
clk_rate = clk_get_rate(priv->clk);
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
- ret = -EINVAL;
- goto disable_clk;
+ return -EINVAL;
}
priv->clk_cycle = clk_rate / bus_speed;
@@ -575,25 +570,17 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
pdev->name, priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto disable_clk;
+ return ret;
}
- ret = i2c_add_adapter(&priv->adap);
-disable_clk:
- if (ret)
- clk_disable_unprepare(priv->clk);
-
- return ret;
+ return i2c_add_adapter(&priv->adap);
}
-static int uniphier_fi2c_remove(struct platform_device *pdev)
+static void uniphier_fi2c_remove(struct platform_device *pdev)
{
struct uniphier_fi2c_priv *priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused uniphier_fi2c_suspend(struct device *dev)
@@ -631,7 +618,7 @@ MODULE_DEVICE_TABLE(of, uniphier_fi2c_match);
static struct platform_driver uniphier_fi2c_drv = {
.probe = uniphier_fi2c_probe,
- .remove = uniphier_fi2c_remove,
+ .remove_new = uniphier_fi2c_remove,
.driver = {
.name = "uniphier-fi2c",
.of_match_table = uniphier_fi2c_match,
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index e3ebae381f08..854ac25b5862 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -335,21 +335,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv->clk = devm_clk_get(dev, NULL);
+ priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
+ dev_err(dev, "failed to enable clock\n");
return PTR_ERR(priv->clk);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
clk_rate = clk_get_rate(priv->clk);
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
- ret = -EINVAL;
- goto disable_clk;
+ return -EINVAL;
}
priv->clk_cycle = clk_rate / bus_speed;
@@ -369,25 +364,17 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto disable_clk;
+ return ret;
}
- ret = i2c_add_adapter(&priv->adap);
-disable_clk:
- if (ret)
- clk_disable_unprepare(priv->clk);
-
- return ret;
+ return i2c_add_adapter(&priv->adap);
}
-static int uniphier_i2c_remove(struct platform_device *pdev)
+static void uniphier_i2c_remove(struct platform_device *pdev)
{
struct uniphier_i2c_priv *priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused uniphier_i2c_suspend(struct device *dev)
@@ -425,7 +412,7 @@ MODULE_DEVICE_TABLE(of, uniphier_i2c_match);
static struct platform_driver uniphier_i2c_drv = {
.probe = uniphier_i2c_probe,
- .remove = uniphier_i2c_remove,
+ .remove_new = uniphier_i2c_remove,
.driver = {
.name = "uniphier-i2c",
.of_match_table = uniphier_i2c_match,
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 1ab419f8fa52..76abfa77e200 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -64,15 +64,13 @@ static const struct i2c_algo_bit_data i2c_versatile_algo = {
static int i2c_versatile_probe(struct platform_device *dev)
{
struct i2c_versatile *i2c;
- struct resource *r;
int ret;
i2c = devm_kzalloc(&dev->dev, sizeof(struct i2c_versatile), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- i2c->base = devm_ioremap_resource(&dev->dev, r);
+ i2c->base = devm_platform_get_and_ioremap_resource(dev, 0, NULL);
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
@@ -96,12 +94,11 @@ static int i2c_versatile_probe(struct platform_device *dev)
return 0;
}
-static int i2c_versatile_remove(struct platform_device *dev)
+static void i2c_versatile_remove(struct platform_device *dev)
{
struct i2c_versatile *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
- return 0;
}
static const struct of_device_id i2c_versatile_match[] = {
@@ -112,7 +109,7 @@ MODULE_DEVICE_TABLE(of, i2c_versatile_match);
static struct platform_driver i2c_versatile_driver = {
.probe = i2c_versatile_probe,
- .remove = i2c_versatile_remove,
+ .remove_new = i2c_versatile_remove,
.driver = {
.name = "versatile-i2c",
.of_match_table = i2c_versatile_match,
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 8b5322c3bce0..9e153b5b0e8e 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -407,20 +407,18 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int vprbrd_i2c_remove(struct platform_device *pdev)
+static void vprbrd_i2c_remove(struct platform_device *pdev)
{
struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&vb_i2c->i2c);
-
- return 0;
}
static struct platform_driver vprbrd_i2c_driver = {
.driver.name = "viperboard-i2c",
.driver.owner = THIS_MODULE,
.probe = vprbrd_i2c_probe,
- .remove = vprbrd_i2c_remove,
+ .remove_new = vprbrd_i2c_remove,
};
static int __init vprbrd_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 7d4bc8736079..76118abc6e10 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -372,7 +372,6 @@ static int wmt_i2c_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct wmt_i2c_dev *i2c_dev;
struct i2c_adapter *adap;
- struct resource *res;
int err;
u32 clk_rate;
@@ -380,8 +379,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
@@ -436,7 +434,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int wmt_i2c_remove(struct platform_device *pdev)
+static void wmt_i2c_remove(struct platform_device *pdev)
{
struct wmt_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
@@ -444,8 +442,6 @@ static int wmt_i2c_remove(struct platform_device *pdev)
writew(0, i2c_dev->base + REG_IMR);
clk_disable_unprepare(i2c_dev->clk);
i2c_del_adapter(&i2c_dev->adapter);
-
- return 0;
}
static const struct of_device_id wmt_i2c_dt_ids[] = {
@@ -455,7 +451,7 @@ static const struct of_device_id wmt_i2c_dt_ids[] = {
static struct platform_driver wmt_i2c_driver = {
.probe = wmt_i2c_probe,
- .remove = wmt_i2c_remove,
+ .remove_new = wmt_i2c_remove,
.driver = {
.name = "wmt-i2c",
.of_match_table = wmt_i2c_dt_ids,
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 3538d36368a9..fbc1ffbd2fa7 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -560,7 +560,7 @@ mbox_err:
return rc;
}
-static int xgene_slimpro_i2c_remove(struct platform_device *pdev)
+static void xgene_slimpro_i2c_remove(struct platform_device *pdev)
{
struct slimpro_i2c_dev *ctx = platform_get_drvdata(pdev);
@@ -570,8 +570,6 @@ static int xgene_slimpro_i2c_remove(struct platform_device *pdev)
mbox_free_channel(ctx->mbox_chan);
else
pcc_mbox_free_channel(ctx->pcc_chan);
-
- return 0;
}
static const struct of_device_id xgene_slimpro_i2c_dt_ids[] = {
@@ -591,7 +589,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids);
static struct platform_driver xgene_slimpro_i2c_driver = {
.probe = xgene_slimpro_i2c_probe,
- .remove = xgene_slimpro_i2c_remove,
+ .remove_new = xgene_slimpro_i2c_remove,
.driver = {
.name = "xgene-slimpro-i2c",
.of_match_table = of_match_ptr(xgene_slimpro_i2c_dt_ids),
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 8a3d9817cb41..b3bb97762c85 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -721,6 +721,8 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
wakeup_req = 1;
wakeup_code = STATE_ERROR;
}
+ /* don't try to handle other events */
+ goto out;
}
if (pend & XIIC_INTR_RX_FULL_MASK) {
/* Receive register/FIFO is full */
@@ -1256,16 +1258,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
mutex_init(&i2c->lock);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
- "input clock not found.\n");
+ "failed to enable input clock.\n");
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
- return ret;
- }
i2c->dev = &pdev->dev;
pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
pm_runtime_use_autosuspend(i2c->dev);
@@ -1286,7 +1283,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_clk_dis;
+ goto err_pm_disable;
}
i2c->singlemaster =
@@ -1307,14 +1304,14 @@ static int xiic_i2c_probe(struct platform_device *pdev)
ret = xiic_reinit(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot xiic_reinit\n");
- goto err_clk_dis;
+ goto err_pm_disable;
}
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
xiic_deinit(i2c);
- goto err_clk_dis;
+ goto err_pm_disable;
}
if (pdata) {
@@ -1328,14 +1325,14 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return 0;
-err_clk_dis:
+err_pm_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(i2c->clk);
+
return ret;
}
-static int xiic_i2c_remove(struct platform_device *pdev)
+static void xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
int ret;
@@ -1352,12 +1349,9 @@ static int xiic_i2c_remove(struct platform_device *pdev)
xiic_deinit(i2c);
pm_runtime_put_sync(i2c->dev);
- clk_disable_unprepare(i2c->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
-
- return 0;
}
static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
@@ -1390,7 +1384,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = {
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
- .remove = xiic_i2c_remove,
+ .remove_new = xiic_i2c_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(xiic_of_match),
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 4e3b11c0f732..f59e8c544f36 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -559,7 +559,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int xlp9xx_i2c_remove(struct platform_device *pdev)
+static void xlp9xx_i2c_remove(struct platform_device *pdev)
{
struct xlp9xx_i2c_dev *priv;
@@ -568,8 +568,6 @@ static int xlp9xx_i2c_remove(struct platform_device *pdev)
synchronize_irq(priv->irq);
i2c_del_adapter(&priv->adapter);
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, 0);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -583,7 +581,7 @@ MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids);
static struct platform_driver xlp9xx_i2c_driver = {
.probe = xlp9xx_i2c_probe,
- .remove = xlp9xx_i2c_remove,
+ .remove_new = xlp9xx_i2c_remove,
.driver = {
.name = "xlp9xx-i2c",
.acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids),
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 7b42a18bd05c..83c1db610f54 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -523,14 +523,12 @@ static void scx200_cleanup_iface(struct scx200_acb_iface *iface)
kfree(iface);
}
-static int scx200_remove(struct platform_device *pdev)
+static void scx200_remove(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
iface = platform_get_drvdata(pdev);
scx200_cleanup_iface(iface);
-
- return 0;
}
static struct platform_driver scx200_pci_driver = {
@@ -538,7 +536,7 @@ static struct platform_driver scx200_pci_driver = {
.name = "cs5535-smb",
},
.probe = scx200_probe,
- .remove = scx200_remove,
+ .remove_new = scx200_remove,
};
static const struct pci_device_id scx200_isa[] = {
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ae3af738b03f..60746652fd52 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -114,6 +114,25 @@ const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
}
EXPORT_SYMBOL_GPL(i2c_match_id);
+const void *i2c_get_match_data(const struct i2c_client *client)
+{
+ struct i2c_driver *driver = to_i2c_driver(client->dev.driver);
+ const struct i2c_device_id *match;
+ const void *data;
+
+ data = device_get_match_data(&client->dev);
+ if (!data) {
+ match = i2c_match_id(driver->id_table, client);
+ if (!match)
+ return NULL;
+
+ data = (const void *)match->driver_data;
+ }
+
+ return data;
+}
+EXPORT_SYMBOL(i2c_get_match_data);
+
static int i2c_device_match(struct device *dev, struct device_driver *drv)
{
struct i2c_client *client = i2c_verify_client(dev);
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 1c78657631f4..24168e9f7df4 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -174,13 +174,12 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
return ret;
}
-static int i2c_arbitrator_remove(struct platform_device *pdev)
+static void i2c_arbitrator_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
i2c_put_adapter(muxc->parent);
- return 0;
}
static const struct of_device_id i2c_arbitrator_of_match[] = {
@@ -191,7 +190,7 @@ MODULE_DEVICE_TABLE(of, i2c_arbitrator_of_match);
static struct platform_driver i2c_arbitrator_driver = {
.probe = i2c_arbitrator_probe,
- .remove = i2c_arbitrator_remove,
+ .remove_new = i2c_arbitrator_remove,
.driver = {
.name = "i2c-arb-gpio-challenge",
.of_match_table = i2c_arbitrator_of_match,
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index f7a7405d4350..a3a122fae71e 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -282,7 +282,7 @@ err_rollback:
return err;
}
-static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
+static void i2c_demux_pinctrl_remove(struct platform_device *pdev)
{
struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -296,8 +296,6 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
of_node_put(priv->chan[i].parent_np);
of_changeset_destroy(&priv->chan[i].chgset);
}
-
- return 0;
}
static const struct of_device_id i2c_demux_pinctrl_of_match[] = {
@@ -312,7 +310,7 @@ static struct platform_driver i2c_demux_pinctrl_driver = {
.of_match_table = i2c_demux_pinctrl_of_match,
},
.probe = i2c_demux_pinctrl_probe,
- .remove = i2c_demux_pinctrl_remove,
+ .remove_new = i2c_demux_pinctrl_remove,
};
module_platform_driver(i2c_demux_pinctrl_driver);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 73a23e117ebe..5d5cbe0130cd 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -225,14 +225,12 @@ alloc_failed:
return ret;
}
-static int i2c_mux_gpio_remove(struct platform_device *pdev)
+static void i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
i2c_put_adapter(muxc->parent);
-
- return 0;
}
static const struct of_device_id i2c_mux_gpio_of_match[] = {
@@ -243,7 +241,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match);
static struct platform_driver i2c_mux_gpio_driver = {
.probe = i2c_mux_gpio_probe,
- .remove = i2c_mux_gpio_remove,
+ .remove_new = i2c_mux_gpio_remove,
.driver = {
.name = "i2c-mux-gpio",
.of_match_table = i2c_mux_gpio_of_match,
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index 33024acaac02..0405af0e1510 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -142,19 +142,17 @@ err_parent:
return ret;
}
-static int i2c_mux_remove(struct platform_device *pdev)
+static void i2c_mux_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
i2c_put_adapter(muxc->parent);
-
- return 0;
}
static struct platform_driver i2c_mux_driver = {
.probe = i2c_mux_probe,
- .remove = i2c_mux_remove,
+ .remove_new = i2c_mux_remove,
.driver = {
.name = "i2c-mux-gpmux",
.of_match_table = i2c_mux_of_match,
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 1a879f6a31ef..3dda00f1df78 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -170,12 +170,11 @@ virt_reg_failed:
return err;
}
-static int mlxcpld_mux_remove(struct platform_device *pdev)
+static void mlxcpld_mux_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
- return 0;
}
static struct platform_driver mlxcpld_mux_driver = {
@@ -183,7 +182,7 @@ static struct platform_driver mlxcpld_mux_driver = {
.name = "i2c-mux-mlxcpld",
},
.probe = mlxcpld_mux_probe,
- .remove = mlxcpld_mux_remove,
+ .remove_new = mlxcpld_mux_remove,
};
module_platform_driver(mlxcpld_mux_driver);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index d5ad904756fd..18236b9fa14a 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -166,14 +166,12 @@ err_put_parent:
return ret;
}
-static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
+static void i2c_mux_pinctrl_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
i2c_put_adapter(muxc->parent);
-
- return 0;
}
static const struct of_device_id i2c_mux_pinctrl_of_match[] = {
@@ -188,7 +186,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = {
.of_match_table = i2c_mux_pinctrl_of_match,
},
.probe = i2c_mux_pinctrl_probe,
- .remove = i2c_mux_pinctrl_remove,
+ .remove_new = i2c_mux_pinctrl_remove,
};
module_platform_driver(i2c_mux_pinctrl_driver);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 30a6de1694e0..9efc1ed01577 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -233,14 +233,12 @@ err_put_parent:
return ret;
}
-static int i2c_mux_reg_remove(struct platform_device *pdev)
+static void i2c_mux_reg_remove(struct platform_device *pdev)
{
struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
i2c_put_adapter(muxc->parent);
-
- return 0;
}
static const struct of_device_id i2c_mux_reg_of_match[] = {
@@ -251,7 +249,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match);
static struct platform_driver i2c_mux_reg_driver = {
.probe = i2c_mux_reg_probe,
- .remove = i2c_mux_reg_remove,
+ .remove_new = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
.of_match_table = of_match_ptr(i2c_mux_reg_of_match),
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index e3f454123805..0d63b732ef0c 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -92,6 +92,7 @@
#define SVC_I3C_MINTCLR 0x094
#define SVC_I3C_MINTMASKED 0x098
#define SVC_I3C_MERRWARN 0x09C
+#define SVC_I3C_MERRWARN_NACK BIT(2)
#define SVC_I3C_MDMACTRL 0x0A0
#define SVC_I3C_MDATACTRL 0x0AC
#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
@@ -145,6 +146,11 @@ struct svc_i3c_xfer {
struct svc_i3c_cmd cmds[];
};
+struct svc_i3c_regs_save {
+ u32 mconfig;
+ u32 mdynaddr;
+};
+
/**
* struct svc_i3c_master - Silvaco I3C Master structure
* @base: I3C master controller
@@ -173,6 +179,7 @@ struct svc_i3c_master {
struct i3c_master_controller base;
struct device *dev;
void __iomem *regs;
+ struct svc_i3c_regs_save saved_regs;
u32 free_slots;
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
@@ -1008,6 +1015,11 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
if (ret)
goto emit_stop;
+ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
if (rnw)
ret = svc_i3c_master_read(master, in, xfer_len);
else
@@ -1090,12 +1102,6 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
if (!xfer)
return;
- ret = pm_runtime_resume_and_get(master->dev);
- if (ret < 0) {
- dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
- return;
- }
-
svc_i3c_master_clear_merrwarn(master);
svc_i3c_master_flush_fifo(master);
@@ -1110,9 +1116,6 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
break;
}
- pm_runtime_mark_last_busy(master->dev);
- pm_runtime_put_autosuspend(master->dev);
-
xfer->ret = ret;
complete(&xfer->comp);
@@ -1133,6 +1136,13 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
struct svc_i3c_xfer *xfer)
{
unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
init_completion(&xfer->comp);
spin_lock_irqsave(&master->xferqueue.lock, flags);
@@ -1143,6 +1153,9 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
svc_i3c_master_start_xfer_locked(master);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
}
static bool
@@ -1579,10 +1592,28 @@ static void svc_i3c_master_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
+static void svc_i3c_save_regs(struct svc_i3c_master *master)
+{
+ master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
+ master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
+}
+
+static void svc_i3c_restore_regs(struct svc_i3c_master *master)
+{
+ if (readl(master->regs + SVC_I3C_MDYNADDR) !=
+ master->saved_regs.mdynaddr) {
+ writel(master->saved_regs.mconfig,
+ master->regs + SVC_I3C_MCONFIG);
+ writel(master->saved_regs.mdynaddr,
+ master->regs + SVC_I3C_MDYNADDR);
+ }
+}
+
static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ svc_i3c_save_regs(master);
svc_i3c_master_unprepare_clks(master);
pinctrl_pm_select_sleep_state(dev);
@@ -1596,6 +1627,8 @@ static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
svc_i3c_master_prepare_clks(master);
+ svc_i3c_restore_regs(master);
+
return 0;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 34201d7ef33e..b930036edbbe 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -2147,7 +2147,7 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
* All our short idle states are dominated by vmexit/vmenter latencies,
* not the underlying hardware latencies so we keep our values for these.
*/
-static void matchup_vm_state_with_baremetal(void)
+static void __init matchup_vm_state_with_baremetal(void)
{
int cstate;
diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
index 99cc7fc29488..524327ea3663 100644
--- a/drivers/iio/accel/adxl313_i2c.c
+++ b/drivers/iio/accel/adxl313_i2c.c
@@ -85,7 +85,7 @@ static struct i2c_driver adxl313_i2c_driver = {
.name = "adxl313_i2c",
.of_match_table = adxl313_of_match,
},
- .probe_new = adxl313_i2c_probe,
+ .probe = adxl313_i2c_probe,
.id_table = adxl313_i2c_id,
};
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index 098cd83f95b2..e47d12f19602 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -56,7 +56,7 @@ static struct i2c_driver adxl345_i2c_driver = {
.of_match_table = adxl345_of_match,
.acpi_match_table = adxl345_acpi_match,
},
- .probe_new = adxl345_i2c_probe,
+ .probe = adxl345_i2c_probe,
.id_table = adxl345_i2c_id,
};
module_i2c_driver(adxl345_i2c_driver);
diff --git a/drivers/iio/accel/adxl355_i2c.c b/drivers/iio/accel/adxl355_i2c.c
index 6cde5ccac06b..d5beea61479d 100644
--- a/drivers/iio/accel/adxl355_i2c.c
+++ b/drivers/iio/accel/adxl355_i2c.c
@@ -68,7 +68,7 @@ static struct i2c_driver adxl355_i2c_driver = {
.name = "adxl355_i2c",
.of_match_table = adxl355_of_match,
},
- .probe_new = adxl355_i2c_probe,
+ .probe = adxl355_i2c_probe,
.id_table = adxl355_i2c_id,
};
module_i2c_driver(adxl355_i2c_driver);
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index 070aad724abd..b595fe94f3a3 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -77,7 +77,7 @@ static struct i2c_driver adxl367_i2c_driver = {
.name = "adxl367_i2c",
.of_match_table = adxl367_of_match,
},
- .probe_new = adxl367_i2c_probe,
+ .probe = adxl367_i2c_probe,
.id_table = adxl367_i2c_id,
};
diff --git a/drivers/iio/accel/adxl372_i2c.c b/drivers/iio/accel/adxl372_i2c.c
index e5f310ea65ff..d0690417fd36 100644
--- a/drivers/iio/accel/adxl372_i2c.c
+++ b/drivers/iio/accel/adxl372_i2c.c
@@ -58,7 +58,7 @@ static struct i2c_driver adxl372_i2c_driver = {
.name = "adxl372_i2c",
.of_match_table = adxl372_of_match,
},
- .probe_new = adxl372_i2c_probe,
+ .probe = adxl372_i2c_probe,
.id_table = adxl372_i2c_id,
};
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index eb697eeb4301..e8ab0d249351 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -1134,7 +1134,7 @@ static struct i2c_driver bma180_driver = {
.pm = pm_sleep_ptr(&bma180_pm_ops),
.of_match_table = bma180_of_match,
},
- .probe_new = bma180_probe,
+ .probe = bma180_probe,
.remove = bma180_remove,
.id_table = bma180_ids,
};
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index a68b845f5b4f..e90e2f01550a 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -868,8 +868,7 @@ static int bma400_init(struct bma400_data *data)
ARRAY_SIZE(regulator_names),
regulator_names);
if (ret)
- return dev_err_probe(data->dev, ret, "Failed to get regulators: %d\n",
- ret);
+ return dev_err_probe(data->dev, ret, "Failed to get regulators\n");
/* Try to read chip_id register. It must return 0x90. */
ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
index 688b06dae669..adf4e3fd2e1d 100644
--- a/drivers/iio/accel/bma400_i2c.c
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -44,7 +44,7 @@ static struct i2c_driver bma400_i2c_driver = {
.name = "bma400",
.of_match_table = bma400_of_i2c_match,
},
- .probe_new = bma400_i2c_probe,
+ .probe = bma400_i2c_probe,
.id_table = bma400_i2c_ids,
};
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index 509cab2bd694..ee1ba134ad42 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -269,7 +269,7 @@ static struct i2c_driver bmc150_accel_driver = {
.acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
.pm = &bmc150_accel_pm_ops,
},
- .probe_new = bmc150_accel_probe,
+ .probe = bmc150_accel_probe,
.remove = bmc150_accel_remove,
.id_table = bmc150_accel_id,
};
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 38a7d811610e..2f27a5ded94c 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -184,7 +184,7 @@ static struct i2c_driver da280_driver = {
.acpi_match_table = ACPI_PTR(da280_acpi_match),
.pm = pm_sleep_ptr(&da280_pm_ops),
},
- .probe_new = da280_probe,
+ .probe = da280_probe,
.id_table = da280_i2c_id,
};
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index 080335fa2ad6..8f919920ced5 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -278,7 +278,7 @@ static struct i2c_driver da311_driver = {
.name = "da311",
.pm = pm_sleep_ptr(&da311_pm_ops),
},
- .probe_new = da311_probe,
+ .probe = da311_probe,
.id_table = da311_i2c_id,
};
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 7390509aaac0..2e719d60fff8 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -217,7 +217,7 @@ static const struct of_device_id dmard06_of_match[] = {
MODULE_DEVICE_TABLE(of, dmard06_of_match);
static struct i2c_driver dmard06_driver = {
- .probe_new = dmard06_probe,
+ .probe = dmard06_probe,
.id_table = dmard06_id,
.driver = {
.name = DMARD06_DRV_NAME,
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index 4b7a537f617d..fa98623de579 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -135,7 +135,7 @@ static struct i2c_driver dmard09_driver = {
.driver = {
.name = DMARD09_DRV_NAME
},
- .probe_new = dmard09_probe,
+ .probe = dmard09_probe,
.id_table = dmard09_id,
};
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index 07e68aed8a13..7745b6ffd1ad 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -241,7 +241,7 @@ static struct i2c_driver dmard10_driver = {
.name = "dmard10",
.pm = pm_sleep_ptr(&dmard10_pm_ops),
},
- .probe_new = dmard10_probe,
+ .probe = dmard10_probe,
.id_table = dmard10_i2c_id,
};
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 0d672b1469e8..be8a15cb945f 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -724,8 +724,7 @@ static const struct iio_event_spec fxls8962af_event[] = {
.sign = 's', \
.realbits = 12, \
.storagebits = 16, \
- .shift = 4, \
- .endianness = IIO_BE, \
+ .endianness = IIO_LE, \
}, \
.event_spec = fxls8962af_event, \
.num_event_specs = ARRAY_SIZE(fxls8962af_event), \
@@ -904,9 +903,10 @@ static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
int total_length = samples * sample_length;
int ret;
- if (i2c_verify_client(dev))
+ if (i2c_verify_client(dev) &&
+ data->chip_info->chip_id == FXLS8962AF_DEVICE_ID)
/*
- * Due to errata bug:
+ * Due to errata bug (only applicable on fxls8962af):
* E3: FIFO burst read operation error using I2C interface
* We have to avoid burst reads on I2C..
*/
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index 22640eaebac7..160124673308 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -47,7 +47,7 @@ static struct i2c_driver fxls8962af_driver = {
.of_match_table = fxls8962af_of_match,
.pm = pm_ptr(&fxls8962af_pm_ops),
},
- .probe_new = fxls8962af_probe,
+ .probe = fxls8962af_probe,
.id_table = fxls8962af_id,
};
module_i2c_driver(fxls8962af_driver);
diff --git a/drivers/iio/accel/kionix-kx022a-i2c.c b/drivers/iio/accel/kionix-kx022a-i2c.c
index e6fd02d931b6..b0ac78e85dad 100644
--- a/drivers/iio/accel/kionix-kx022a-i2c.c
+++ b/drivers/iio/accel/kionix-kx022a-i2c.c
@@ -40,8 +40,9 @@ static struct i2c_driver kx022a_i2c_driver = {
.driver = {
.name = "kx022a-i2c",
.of_match_table = kx022a_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = kx022a_i2c_probe,
+ .probe = kx022a_i2c_probe,
};
module_i2c_driver(kx022a_i2c_driver);
diff --git a/drivers/iio/accel/kionix-kx022a-spi.c b/drivers/iio/accel/kionix-kx022a-spi.c
index 9cd047f7b346..f45a46899a5f 100644
--- a/drivers/iio/accel/kionix-kx022a-spi.c
+++ b/drivers/iio/accel/kionix-kx022a-spi.c
@@ -46,6 +46,7 @@ static struct spi_driver kx022a_spi_driver = {
.driver = {
.name = "kx022a-spi",
.of_match_table = kx022a_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = kx022a_spi_probe,
.id_table = kx022a_id,
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index b8636fa8eaeb..4ea3c6718ed4 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -516,17 +516,6 @@ static int kx022a_read_raw(struct iio_dev *idev,
return -EINVAL;
};
-static int kx022a_validate_trigger(struct iio_dev *idev,
- struct iio_trigger *trig)
-{
- struct kx022a_data *data = iio_priv(idev);
-
- if (data->trig != trig)
- return -EINVAL;
-
- return 0;
-}
-
static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val)
{
struct kx022a_data *data = iio_priv(idev);
@@ -725,7 +714,7 @@ static const struct iio_info kx022a_info = {
.write_raw = &kx022a_write_raw,
.read_avail = &kx022a_read_avail,
- .validate_trigger = kx022a_validate_trigger,
+ .validate_trigger = iio_validate_own_trigger,
.hwfifo_set_watermark = kx022a_set_watermark,
.hwfifo_flush_to_buffer = kx022a_fifo_flush,
};
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 98da4bda22df..894709286b0c 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1732,7 +1732,7 @@ static struct i2c_driver kxcjk1013_driver = {
.of_match_table = kxcjk1013_of_match,
.pm = &kxcjk1013_pm_ops,
},
- .probe_new = kxcjk1013_probe,
+ .probe = kxcjk1013_probe,
.remove = kxcjk1013_remove,
.id_table = kxcjk1013_id,
};
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 6b3683ddce36..3bc9ee1f9db3 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -54,7 +54,7 @@ static struct i2c_driver kxsd9_i2c_driver = {
.of_match_table = kxsd9_of_match,
.pm = pm_ptr(&kxsd9_dev_pm_ops),
},
- .probe_new = kxsd9_i2c_probe,
+ .probe = kxsd9_i2c_probe,
.remove = kxsd9_i2c_remove,
.id_table = kxsd9_i2c_id,
};
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index efc21871de42..6b87c2c9945c 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -190,7 +190,7 @@ static struct i2c_driver mc3230_driver = {
.name = "mc3230",
.pm = pm_sleep_ptr(&mc3230_pm_ops),
},
- .probe_new = mc3230_probe,
+ .probe = mc3230_probe,
.remove = mc3230_remove,
.id_table = mc3230_i2c_id,
};
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index a3864dbe2761..14f7850a22f0 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -46,7 +46,7 @@ static const struct of_device_id mma7455_of_match[] = {
MODULE_DEVICE_TABLE(of, mma7455_of_match);
static struct i2c_driver mma7455_i2c_driver = {
- .probe_new = mma7455_i2c_probe,
+ .probe = mma7455_i2c_probe,
.remove = mma7455_i2c_remove,
.id_table = mma7455_i2c_ids,
.driver = {
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index b279ca4dcdc0..260cbceaa151 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -266,7 +266,7 @@ static struct i2c_driver mma7660_driver = {
.of_match_table = mma7660_of_match,
.acpi_match_table = mma7660_acpi_id,
},
- .probe_new = mma7660_probe,
+ .probe = mma7660_probe,
.remove = mma7660_remove,
.id_table = mma7660_i2c_id,
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index ea14e3aaa30a..6e7399e72221 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1846,7 +1846,7 @@ static struct i2c_driver mma8452_driver = {
.of_match_table = mma8452_dt_ids,
.pm = &mma8452_pm_ops,
},
- .probe_new = mma8452_probe,
+ .probe = mma8452_probe,
.remove = mma8452_remove,
.id_table = mma8452_id,
};
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index aa4f5842859e..d823f2edc6d4 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -607,7 +607,7 @@ static struct i2c_driver mma9551_driver = {
.acpi_match_table = ACPI_PTR(mma9551_acpi_match),
.pm = pm_ptr(&mma9551_pm_ops),
},
- .probe_new = mma9551_probe,
+ .probe = mma9551_probe,
.remove = mma9551_remove,
.id_table = mma9551_id,
};
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 0af578ef9d3d..d01aba4aecba 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1246,7 +1246,7 @@ static struct i2c_driver mma9553_driver = {
.acpi_match_table = ACPI_PTR(mma9553_acpi_match),
.pm = pm_ptr(&mma9553_pm_ops),
},
- .probe_new = mma9553_probe,
+ .probe = mma9553_probe,
.remove = mma9553_remove,
.id_table = mma9553_id,
};
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 6690fa37da8f..6ddcc3c2f840 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -1294,7 +1294,7 @@ static struct i2c_driver msa311_driver = {
.of_match_table = msa311_of_match,
.pm = pm_ptr(&msa311_pm_ops),
},
- .probe_new = msa311_probe,
+ .probe = msa311_probe,
.id_table = msa311_i2c_id,
};
module_i2c_driver(msa311_driver);
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index b146fc82738f..75d142bc14b4 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -488,7 +488,7 @@ static struct i2c_driver mxc4005_driver = {
.name = MXC4005_DRV_NAME,
.acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
},
- .probe_new = mxc4005_probe,
+ .probe = mxc4005_probe,
.id_table = mxc4005_id,
};
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index aa2e660545f8..33c2253561e6 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -183,7 +183,7 @@ static struct i2c_driver mxc6255_driver = {
.name = MXC6255_DRV_NAME,
.acpi_match_table = ACPI_PTR(mxc6255_acpi_match),
},
- .probe_new = mxc6255_probe,
+ .probe = mxc6255_probe,
.id_table = mxc6255_id,
};
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 282e539157f8..d2104e14e255 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1007,6 +1007,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM9DS0_IMU_DEV_NAME,
+ [1] = LSM303D_IMU_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
.odr = {
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index fb9e2d6f4210..71ee861b2980 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -206,7 +206,7 @@ static struct i2c_driver st_accel_driver = {
.of_match_table = st_accel_of_match,
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
- .probe_new = st_accel_i2c_probe,
+ .probe = st_accel_i2c_probe,
.id_table = st_accel_id_table,
};
module_i2c_driver(st_accel_driver);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 68f680db7505..ef0ae7672253 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -644,7 +644,7 @@ static struct i2c_driver stk8312_driver = {
.name = STK8312_DRIVER_NAME,
.pm = pm_sleep_ptr(&stk8312_pm_ops),
},
- .probe_new = stk8312_probe,
+ .probe = stk8312_probe,
.remove = stk8312_remove,
.id_table = stk8312_i2c_id,
};
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 44f6e0fbdfcc..3415ac1b4495 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -543,7 +543,7 @@ static struct i2c_driver stk8ba50_driver = {
.pm = pm_sleep_ptr(&stk8ba50_pm_ops),
.acpi_match_table = ACPI_PTR(stk8ba50_acpi_id),
},
- .probe_new = stk8ba50_probe,
+ .probe = stk8ba50_probe,
.remove = stk8ba50_remove,
.id_table = stk8ba50_i2c_id,
};
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index eb2b09ef5d5b..dc14bde31ac1 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -145,7 +145,7 @@ config AD7606
config AD7606_IFACE_PARALLEL
tristate "Analog Devices AD7606 ADC driver with parallel interface support"
- depends on HAS_IOMEM
+ depends on HAS_IOPORT
select AD7606
help
Say yes here to build parallel interface support for Analog Devices:
@@ -735,6 +735,17 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
+config MAX77541_ADC
+ tristate "Analog Devices MAX77541 ADC driver"
+ depends on MFD_MAX77541
+ help
+ This driver controls a Analog Devices MAX77541 ADC
+ via I2C bus. This device has one adc. Say yes here to build
+ support for Analog Devices MAX77541 ADC interface.
+
+ To compile this driver as a module, choose M here:
+ the module will be called max77541-adc.
+
config MAX9611
tristate "Maxim max9611/max9612 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index e07e4a3e6237..eb6e891790fb 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_MAX11205) += max11205.o
obj-$(CONFIG_MAX11410) += max11410.o
obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
index 7d6709da1005..2f048527b7b7 100644
--- a/drivers/iio/adc/ad7091r5.c
+++ b/drivers/iio/adc/ad7091r5.c
@@ -103,7 +103,7 @@ static struct i2c_driver ad7091r5_driver = {
.name = "ad7091r5",
.of_match_table = ad7091r5_dt_ids,
},
- .probe_new = ad7091r5_i2c_probe,
+ .probe = ad7091r5_i2c_probe,
.id_table = ad7091r5_i2c_ids,
};
module_i2c_driver(ad7091r5_driver);
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 99bb604b78c8..8685e0b58a83 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -367,7 +367,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
clock_sel = AD7192_CLK_INT;
/* use internal clock */
- if (st->mclk) {
+ if (!st->mclk) {
if (of_property_read_bool(np, "adi,int-clock-output-enable"))
clock_sel = AD7192_CLK_INT_CO;
} else {
@@ -380,9 +380,9 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
return clock_sel;
}
-static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
+static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
{
- struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
+ struct ad7192_state *st = iio_priv(indio_dev);
bool rej60_en, refin2_en;
bool buf_en, bipolar, burnout_curr_en;
unsigned long long scale_uv;
@@ -1069,7 +1069,7 @@ static int ad7192_probe(struct spi_device *spi)
}
}
- ret = ad7192_setup(st, spi->dev.of_node);
+ ret = ad7192_setup(indio_dev, spi->dev.of_node);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index f9ee189925de..14d02b085d3b 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -553,7 +553,7 @@ static struct i2c_driver ad7291_driver = {
.name = KBUILD_MODNAME,
.of_match_table = ad7291_of_match,
},
- .probe_new = ad7291_probe,
+ .probe = ad7291_probe,
.id_table = ad7291_id,
};
module_i2c_driver(ad7291_driver);
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 8f0a3a35e727..b757cc45c4de 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -968,7 +968,7 @@ static struct i2c_driver ad799x_driver = {
.name = "ad799x",
.pm = pm_sleep_ptr(&ad799x_pm_ops),
},
- .probe_new = ad799x_probe,
+ .probe = ad799x_probe,
.remove = ad799x_remove,
.id_table = ad799x_id,
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 38d9d7b2313e..213526c1592f 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -1090,7 +1090,7 @@ static struct i2c_driver ina2xx_driver = {
.name = KBUILD_MODNAME,
.of_match_table = ina2xx_of_match,
},
- .probe_new = ina2xx_probe,
+ .probe = ina2xx_probe,
.remove = ina2xx_remove,
.id_table = ina2xx_id,
};
diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c
index eeb2945829eb..97c417c3a4eb 100644
--- a/drivers/iio/adc/ltc2471.c
+++ b/drivers/iio/adc/ltc2471.c
@@ -146,7 +146,7 @@ static struct i2c_driver ltc2471_i2c_driver = {
.driver = {
.name = "ltc2471",
},
- .probe_new = ltc2471_i2c_probe,
+ .probe = ltc2471_i2c_probe,
.id_table = ltc2471_i2c_id,
};
diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c
index 6a23427344ec..859e4314cfa2 100644
--- a/drivers/iio/adc/ltc2485.c
+++ b/drivers/iio/adc/ltc2485.c
@@ -133,7 +133,7 @@ static struct i2c_driver ltc2485_driver = {
.driver = {
.name = "ltc2485",
},
- .probe_new = ltc2485_probe,
+ .probe = ltc2485_probe,
.id_table = ltc2485_id,
};
module_i2c_driver(ltc2485_driver);
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index ec198c6f13d6..5bdd40729611 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -163,7 +163,7 @@ static struct i2c_driver ltc2497_driver = {
.name = "ltc2497",
.of_match_table = ltc2497_of_match,
},
- .probe_new = ltc2497_probe,
+ .probe = ltc2497_probe,
.remove = ltc2497_remove,
.id_table = ltc2497_id,
};
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 73b783b430d7..b31581616ce3 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1718,7 +1718,7 @@ static struct i2c_driver max1363_driver = {
.name = "max1363",
.of_match_table = max1363_of_match,
},
- .probe_new = max1363_probe,
+ .probe = max1363_probe,
.id_table = max1363_id,
};
module_i2c_driver(max1363_driver);
diff --git a/drivers/iio/adc/max77541-adc.c b/drivers/iio/adc/max77541-adc.c
new file mode 100644
index 000000000000..21d024bde16b
--- /dev/null
+++ b/drivers/iio/adc/max77541-adc.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 Analog Devices, Inc.
+ * ADI MAX77541 ADC Driver with IIO interface
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <linux/mfd/max77541.h>
+
+enum max77541_adc_range {
+ LOW_RANGE,
+ MID_RANGE,
+ HIGH_RANGE,
+};
+
+enum max77541_adc_channel {
+ MAX77541_ADC_VSYS_V,
+ MAX77541_ADC_VOUT1_V,
+ MAX77541_ADC_VOUT2_V,
+ MAX77541_ADC_TEMP,
+};
+
+static int max77541_adc_offset(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ switch (chan->channel) {
+ case MAX77541_ADC_TEMP:
+ *val = DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS, 1725);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max77541_adc_scale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct regmap **regmap = iio_priv(indio_dev);
+ unsigned int reg_val;
+ int ret;
+
+ switch (chan->channel) {
+ case MAX77541_ADC_VSYS_V:
+ *val = 25;
+ return IIO_VAL_INT;
+ case MAX77541_ADC_VOUT1_V:
+ case MAX77541_ADC_VOUT2_V:
+ ret = regmap_read(*regmap, MAX77541_REG_M2_CFG1, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val = FIELD_GET(MAX77541_BITS_MX_CFG1_RNG, reg_val);
+ switch (reg_val) {
+ case LOW_RANGE:
+ *val = 6;
+ *val2 = 250000;
+ break;
+ case MID_RANGE:
+ *val = 12;
+ *val2 = 500000;
+ break;
+ case HIGH_RANGE:
+ *val = 25;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case MAX77541_ADC_TEMP:
+ *val = 1725;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max77541_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct regmap **regmap = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_read(*regmap, chan->address, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+#define MAX77541_ADC_CHANNEL_V(_channel, _name, _type, _reg) \
+ { \
+ .type = _type, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .address = _reg, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _name, \
+ }
+
+#define MAX77541_ADC_CHANNEL_TEMP(_channel, _name, _type, _reg) \
+ { \
+ .type = _type, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .address = _reg, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) |\
+ BIT(IIO_CHAN_INFO_OFFSET),\
+ .datasheet_name = _name, \
+ }
+
+static const struct iio_chan_spec max77541_adc_channels[] = {
+ MAX77541_ADC_CHANNEL_V(MAX77541_ADC_VSYS_V, "vsys_v", IIO_VOLTAGE,
+ MAX77541_REG_ADC_DATA_CH1),
+ MAX77541_ADC_CHANNEL_V(MAX77541_ADC_VOUT1_V, "vout1_v", IIO_VOLTAGE,
+ MAX77541_REG_ADC_DATA_CH2),
+ MAX77541_ADC_CHANNEL_V(MAX77541_ADC_VOUT2_V, "vout2_v", IIO_VOLTAGE,
+ MAX77541_REG_ADC_DATA_CH3),
+ MAX77541_ADC_CHANNEL_TEMP(MAX77541_ADC_TEMP, "temp", IIO_TEMP,
+ MAX77541_REG_ADC_DATA_CH6),
+};
+
+static int max77541_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return max77541_adc_offset(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return max77541_adc_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_RAW:
+ return max77541_adc_raw(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max77541_adc_info = {
+ .read_raw = max77541_adc_read_raw,
+};
+
+static int max77541_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct regmap **regmap;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*regmap));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = iio_priv(indio_dev);
+
+ *regmap = dev_get_regmap(dev->parent, NULL);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ indio_dev->name = "max77541";
+ indio_dev->info = &max77541_adc_info;
+ indio_dev->channels = max77541_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max77541_adc_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct platform_device_id max77541_adc_platform_id[] = {
+ { "max77541-adc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, max77541_adc_platform_id);
+
+static struct platform_driver max77541_adc_driver = {
+ .driver = {
+ .name = "max77541-adc",
+ },
+ .probe = max77541_adc_probe,
+ .id_table = max77541_adc_platform_id,
+};
+module_platform_driver(max77541_adc_driver);
+
+MODULE_AUTHOR("Okan Sahin <Okan.Sahin@analog.com>");
+MODULE_DESCRIPTION("MAX77541 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index cb7f4785423a..76e517b7b1e4 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -556,7 +556,7 @@ static struct i2c_driver max9611_driver = {
.name = DRIVER_NAME,
.of_match_table = max9611_of_table,
},
- .probe_new = max9611_probe,
+ .probe = max9611_probe,
};
module_i2c_driver(max9611_driver);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index ada844c3f7ec..0778a8fb6866 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -417,7 +417,7 @@ static struct i2c_driver mcp3422_driver = {
.name = "mcp3422",
.of_match_table = mcp3422_of_match,
},
- .probe_new = mcp3422_probe,
+ .probe = mcp3422_probe,
.id_table = mcp3422_id,
};
module_i2c_driver(mcp3422_driver);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 18937a262af6..af6bfcc19075 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -72,7 +72,7 @@
#define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK GENMASK(20, 18)
#define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK GENMASK(17, 16)
#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT 10
- #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 5
+ #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 6
#define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK GENMASK(9, 8)
#define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK GENMASK(7, 0)
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index c1261ecd400c..d9e1696df7ae 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -544,7 +544,7 @@ static const struct of_device_id nau7802_dt_ids[] = {
MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
static struct i2c_driver nau7802_driver = {
- .probe_new = nau7802_probe,
+ .probe = nau7802_probe,
.id_table = nau7802_i2c_id,
.driver = {
.name = "nau7802",
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 7dfc9c927a23..27b2632c1037 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/i2c.h>
#include <linux/pm.h>
#include <linux/mfd/palmas.h>
#include <linux/completion.h>
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index c2d5e06f137a..0a4fd3a46113 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -114,7 +114,7 @@ enum adc5_cal_val {
* that is an average of multiple measurements.
* @scale_fn_type: Represents the scaling function to convert voltage
* physical units desired by the client for the channel.
- * @datasheet_name: Channel name used in device tree.
+ * @channel_name: Channel name used in device tree.
*/
struct adc5_channel_prop {
unsigned int channel;
@@ -126,7 +126,7 @@ struct adc5_channel_prop {
unsigned int hw_settle_time;
unsigned int avg_samples;
enum vadc_scale_fn_type scale_fn_type;
- const char *datasheet_name;
+ const char *channel_name;
};
/**
@@ -657,8 +657,7 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
chan = chan & ADC_CHANNEL_MASK;
}
- if (chan > ADC5_PARALLEL_ISENSE_VBAT_IDATA ||
- !data->adc_chans[chan].datasheet_name) {
+ if (chan > ADC5_PARALLEL_ISENSE_VBAT_IDATA) {
dev_err(dev, "%s invalid channel number %d\n", name, chan);
return -EINVAL;
}
@@ -669,9 +668,9 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
ret = fwnode_property_read_string(fwnode, "label", &channel_name);
if (ret)
- channel_name = name;
+ channel_name = data->adc_chans[chan].datasheet_name;
- prop->datasheet_name = channel_name;
+ prop->channel_name = channel_name;
ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &value);
if (!ret) {
@@ -861,8 +860,8 @@ static int adc5_get_fw_data(struct adc5_chip *adc)
adc_chan = &adc->data->adc_chans[prop.channel];
iio_chan->channel = prop.channel;
- iio_chan->datasheet_name = prop.datasheet_name;
- iio_chan->extend_name = prop.datasheet_name;
+ iio_chan->datasheet_name = adc_chan->datasheet_name;
+ iio_chan->extend_name = prop.channel_name;
iio_chan->info_mask_separate = adc_chan->info_mask;
iio_chan->type = adc_chan->type;
iio_chan->address = index;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index bcff0f62b70e..f5c6f1f27b2c 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -84,6 +84,7 @@
* that is an average of multiple measurements.
* @scale_fn_type: Represents the scaling function to convert voltage
* physical units desired by the client for the channel.
+ * @channel_name: Channel name used in device tree.
*/
struct vadc_channel_prop {
unsigned int channel;
@@ -93,6 +94,7 @@ struct vadc_channel_prop {
unsigned int hw_settle_time;
unsigned int avg_samples;
enum vadc_scale_fn_type scale_fn_type;
+ const char *channel_name;
};
/**
@@ -495,8 +497,18 @@ static int vadc_fwnode_xlate(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int vadc_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct vadc_priv *vadc = iio_priv(indio_dev);
+ const char *name = vadc->chan_props[chan->address].channel_name;
+
+ return sysfs_emit(label, "%s\n", name);
+}
+
static const struct iio_info vadc_info = {
.read_raw = vadc_read_raw,
+ .read_label = vadc_read_label,
.fwnode_xlate = vadc_fwnode_xlate,
};
@@ -652,7 +664,7 @@ static int vadc_get_fw_channel_data(struct device *dev,
struct vadc_channel_prop *prop,
struct fwnode_handle *fwnode)
{
- const char *name = fwnode_get_name(fwnode);
+ const char *name = fwnode_get_name(fwnode), *label;
u32 chan, value, varr[2];
int ret;
@@ -667,6 +679,11 @@ static int vadc_get_fw_channel_data(struct device *dev,
return -EINVAL;
}
+ ret = fwnode_property_read_string(fwnode, "label", &label);
+ if (ret)
+ label = vadc_chans[chan].datasheet_name;
+ prop->channel_name = label;
+
/* the channel has DT description */
prop->channel = chan;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 79448c5ffc2a..4b011f7eddec 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -4,6 +4,7 @@
* Copyright (C) 2014 ROCKCHIP, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -38,10 +39,31 @@
#define SARADC_TIMEOUT msecs_to_jiffies(100)
#define SARADC_MAX_CHANNELS 8
+/* v2 registers */
+#define SARADC2_CONV_CON 0x000
+#define SARADC_T_PD_SOC 0x004
+#define SARADC_T_DAS_SOC 0x00c
+#define SARADC2_END_INT_EN 0x104
+#define SARADC2_ST_CON 0x108
+#define SARADC2_STATUS 0x10c
+#define SARADC2_END_INT_ST 0x110
+#define SARADC2_DATA_BASE 0x120
+
+#define SARADC2_EN_END_INT BIT(0)
+#define SARADC2_START BIT(4)
+#define SARADC2_SINGLE_MODE BIT(5)
+
+#define SARADC2_CONV_CHANNELS GENMASK(15, 0)
+
+struct rockchip_saradc;
+
struct rockchip_saradc_data {
const struct iio_chan_spec *channels;
int num_channels;
unsigned long clk_rate;
+ void (*start)(struct rockchip_saradc *info, int chn);
+ int (*read)(struct rockchip_saradc *info);
+ void (*power_down)(struct rockchip_saradc *info);
};
struct rockchip_saradc {
@@ -60,27 +82,81 @@ struct rockchip_saradc {
struct notifier_block nb;
};
-static void rockchip_saradc_power_down(struct rockchip_saradc *info)
+static void rockchip_saradc_reset_controller(struct reset_control *reset);
+
+static void rockchip_saradc_start_v1(struct rockchip_saradc *info, int chn)
+{
+ /* 8 clock periods as delay between power up and start cmd */
+ writel_relaxed(8, info->regs + SARADC_DLY_PU_SOC);
+ /* Select the channel to be used and trigger conversion */
+ writel(SARADC_CTRL_POWER_CTRL | (chn & SARADC_CTRL_CHN_MASK) |
+ SARADC_CTRL_IRQ_ENABLE, info->regs + SARADC_CTRL);
+}
+
+static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn)
+{
+ int val;
+
+ if (info->reset)
+ rockchip_saradc_reset_controller(info->reset);
+
+ writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
+ writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
+ val = FIELD_PREP(SARADC2_EN_END_INT, 1);
+ val |= val << 16;
+ writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
+ val = FIELD_PREP(SARADC2_START, 1) |
+ FIELD_PREP(SARADC2_SINGLE_MODE, 1) |
+ FIELD_PREP(SARADC2_CONV_CHANNELS, chn);
+ val |= val << 16;
+ writel(val, info->regs + SARADC2_CONV_CON);
+}
+
+static void rockchip_saradc_start(struct rockchip_saradc *info, int chn)
+{
+ info->data->start(info, chn);
+}
+
+static int rockchip_saradc_read_v1(struct rockchip_saradc *info)
+{
+ return readl_relaxed(info->regs + SARADC_DATA);
+}
+
+static int rockchip_saradc_read_v2(struct rockchip_saradc *info)
+{
+ int offset;
+
+ /* Clear irq */
+ writel_relaxed(0x1, info->regs + SARADC2_END_INT_ST);
+
+ offset = SARADC2_DATA_BASE + info->last_chan->channel * 0x4;
+
+ return readl_relaxed(info->regs + offset);
+}
+
+static int rockchip_saradc_read(struct rockchip_saradc *info)
+{
+ return info->data->read(info);
+}
+
+static void rockchip_saradc_power_down_v1(struct rockchip_saradc *info)
{
- /* Clear irq & power down adc */
writel_relaxed(0, info->regs + SARADC_CTRL);
}
+static void rockchip_saradc_power_down(struct rockchip_saradc *info)
+{
+ if (info->data->power_down)
+ info->data->power_down(info);
+}
+
static int rockchip_saradc_conversion(struct rockchip_saradc *info,
- struct iio_chan_spec const *chan)
+ struct iio_chan_spec const *chan)
{
reinit_completion(&info->completion);
- /* 8 clock periods as delay between power up and start cmd */
- writel_relaxed(8, info->regs + SARADC_DLY_PU_SOC);
-
info->last_chan = chan;
-
- /* Select the channel to be used and trigger conversion */
- writel(SARADC_CTRL_POWER_CTRL
- | (chan->channel & SARADC_CTRL_CHN_MASK)
- | SARADC_CTRL_IRQ_ENABLE,
- info->regs + SARADC_CTRL);
+ rockchip_saradc_start(info, chan->channel);
if (!wait_for_completion_timeout(&info->completion, SARADC_TIMEOUT))
return -ETIMEDOUT;
@@ -123,7 +199,7 @@ static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
struct rockchip_saradc *info = dev_id;
/* Read value */
- info->last_val = readl_relaxed(info->regs + SARADC_DATA);
+ info->last_val = rockchip_saradc_read(info);
info->last_val &= GENMASK(info->last_chan->scan_type.realbits - 1, 0);
rockchip_saradc_power_down(info);
@@ -163,6 +239,9 @@ static const struct rockchip_saradc_data saradc_data = {
.channels = rockchip_saradc_iio_channels,
.num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels),
.clk_rate = 1000000,
+ .start = rockchip_saradc_start_v1,
+ .read = rockchip_saradc_read_v1,
+ .power_down = rockchip_saradc_power_down_v1,
};
static const struct iio_chan_spec rockchip_rk3066_tsadc_iio_channels[] = {
@@ -174,6 +253,9 @@ static const struct rockchip_saradc_data rk3066_tsadc_data = {
.channels = rockchip_rk3066_tsadc_iio_channels,
.num_channels = ARRAY_SIZE(rockchip_rk3066_tsadc_iio_channels),
.clk_rate = 50000,
+ .start = rockchip_saradc_start_v1,
+ .read = rockchip_saradc_read_v1,
+ .power_down = rockchip_saradc_power_down_v1,
};
static const struct iio_chan_spec rockchip_rk3399_saradc_iio_channels[] = {
@@ -189,6 +271,9 @@ static const struct rockchip_saradc_data rk3399_saradc_data = {
.channels = rockchip_rk3399_saradc_iio_channels,
.num_channels = ARRAY_SIZE(rockchip_rk3399_saradc_iio_channels),
.clk_rate = 1000000,
+ .start = rockchip_saradc_start_v1,
+ .read = rockchip_saradc_read_v1,
+ .power_down = rockchip_saradc_power_down_v1,
};
static const struct iio_chan_spec rockchip_rk3568_saradc_iio_channels[] = {
@@ -206,6 +291,28 @@ static const struct rockchip_saradc_data rk3568_saradc_data = {
.channels = rockchip_rk3568_saradc_iio_channels,
.num_channels = ARRAY_SIZE(rockchip_rk3568_saradc_iio_channels),
.clk_rate = 1000000,
+ .start = rockchip_saradc_start_v1,
+ .read = rockchip_saradc_read_v1,
+ .power_down = rockchip_saradc_power_down_v1,
+};
+
+static const struct iio_chan_spec rockchip_rk3588_saradc_iio_channels[] = {
+ SARADC_CHANNEL(0, "adc0", 12),
+ SARADC_CHANNEL(1, "adc1", 12),
+ SARADC_CHANNEL(2, "adc2", 12),
+ SARADC_CHANNEL(3, "adc3", 12),
+ SARADC_CHANNEL(4, "adc4", 12),
+ SARADC_CHANNEL(5, "adc5", 12),
+ SARADC_CHANNEL(6, "adc6", 12),
+ SARADC_CHANNEL(7, "adc7", 12),
+};
+
+static const struct rockchip_saradc_data rk3588_saradc_data = {
+ .channels = rockchip_rk3588_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3588_saradc_iio_channels),
+ .clk_rate = 1000000,
+ .start = rockchip_saradc_start_v2,
+ .read = rockchip_saradc_read_v2,
};
static const struct of_device_id rockchip_saradc_match[] = {
@@ -221,6 +328,9 @@ static const struct of_device_id rockchip_saradc_match[] = {
}, {
.compatible = "rockchip,rk3568-saradc",
.data = &rk3568_saradc_data,
+ }, {
+ .compatible = "rockchip,rk3588-saradc",
+ .data = &rk3588_saradc_data,
},
{},
};
@@ -236,20 +346,6 @@ static void rockchip_saradc_reset_controller(struct reset_control *reset)
reset_control_deassert(reset);
}
-static void rockchip_saradc_clk_disable(void *data)
-{
- struct rockchip_saradc *info = data;
-
- clk_disable_unprepare(info->clk);
-}
-
-static void rockchip_saradc_pclk_disable(void *data)
-{
- struct rockchip_saradc *info = data;
-
- clk_disable_unprepare(info->pclk);
-}
-
static void rockchip_saradc_regulator_disable(void *data)
{
struct rockchip_saradc *info = data;
@@ -298,8 +394,7 @@ out:
}
static int rockchip_saradc_volt_notify(struct notifier_block *nb,
- unsigned long event,
- void *data)
+ unsigned long event, void *data)
{
struct rockchip_saradc *info =
container_of(nb, struct rockchip_saradc, nb);
@@ -319,10 +414,10 @@ static void rockchip_saradc_regulator_unreg_notifier(void *data)
static int rockchip_saradc_probe(struct platform_device *pdev)
{
+ const struct rockchip_saradc_data *match_data;
struct rockchip_saradc *info = NULL;
struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev = NULL;
- const struct of_device_id *match;
int ret;
int irq;
@@ -330,25 +425,23 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
return -ENODEV;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
- return -ENOMEM;
- }
+ if (!indio_dev)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed allocating iio device\n");
+
info = iio_priv(indio_dev);
- match = of_match_device(rockchip_saradc_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "failed to match device\n");
- return -ENODEV;
- }
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to match device\n");
- info->data = match->data;
+ info->data = match_data;
/* Sanity check for possible later IP variants with more channels */
- if (info->data->num_channels > SARADC_MAX_CHANNELS) {
- dev_err(&pdev->dev, "max channels exceeded");
- return -EINVAL;
- }
+ if (info->data->num_channels > SARADC_MAX_CHANNELS)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "max channels exceeded");
info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
@@ -383,16 +476,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
return ret;
}
- info->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(info->pclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(info->pclk),
- "failed to get pclk\n");
-
- info->clk = devm_clk_get(&pdev->dev, "saradc");
- if (IS_ERR(info->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
- "failed to get adc clock\n");
-
info->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(info->vref))
return dev_err_probe(&pdev->dev, PTR_ERR(info->vref),
@@ -406,23 +489,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
* This may become user-configurable in the future.
*/
ret = clk_set_rate(info->clk, info->data->clk_rate);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to set adc clk rate, %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to set adc clk rate\n");
ret = regulator_enable(info->vref);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable vref regulator\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable vref regulator\n");
+
ret = devm_add_action_or_reset(&pdev->dev,
rockchip_saradc_regulator_disable, info);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devm action, %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register devm action\n");
ret = regulator_get_voltage(info->vref);
if (ret < 0)
@@ -430,31 +510,15 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
info->uv_vref = ret;
- ret = clk_prepare_enable(info->pclk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable pclk\n");
- return ret;
- }
- ret = devm_add_action_or_reset(&pdev->dev,
- rockchip_saradc_pclk_disable, info);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devm action, %d\n",
- ret);
- return ret;
- }
+ info->pclk = devm_clk_get_enabled(&pdev->dev, "apb_pclk");
+ if (IS_ERR(info->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->pclk),
+ "failed to get pclk\n");
- ret = clk_prepare_enable(info->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable converter clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(&pdev->dev,
- rockchip_saradc_clk_disable, info);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devm action, %d\n",
- ret);
- return ret;
- }
+ info->clk = devm_clk_get_enabled(&pdev->dev, "saradc");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ "failed to get adc clock\n");
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index c1b2e8dc9a26..ad4cea6839b2 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -652,7 +652,7 @@ static struct i2c_driver rtq6056_driver = {
.of_match_table = rtq6056_device_match,
.pm = pm_ptr(&rtq6056_pm_ops),
},
- .probe_new = rtq6056_probe,
+ .probe = rtq6056_probe,
};
module_i2c_driver(rtq6056_driver);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index bd7e2408bf28..f7613efb870d 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1993,6 +1993,8 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
int num_channels = 0, ret;
+ dev_dbg(&indio_dev->dev, "using legacy channel config\n");
+
ret = device_property_count_u32(dev, "st,adc-channels");
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index c663dc59d459..50c450e7a55f 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -235,7 +235,7 @@ static struct i2c_driver adc081c_driver = {
.of_match_table = adc081c_of_match,
.acpi_match_table = adc081c_acpi_match,
},
- .probe_new = adc081c_probe,
+ .probe = adc081c_probe,
.id_table = adc081c_id,
};
module_i2c_driver(adc081c_driver);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 56af5e148802..075c75a87544 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -1195,7 +1195,7 @@ static struct i2c_driver ads1015_driver = {
.of_match_table = ads1015_of_match,
.pm = &ads1015_pm_ops,
},
- .probe_new = ads1015_probe,
+ .probe = ads1015_probe,
.remove = ads1015_remove,
.id_table = ads1015_id,
};
diff --git a/drivers/iio/adc/ti-ads1100.c b/drivers/iio/adc/ti-ads1100.c
index 6b5aebb82455..1e46f07a9ca6 100644
--- a/drivers/iio/adc/ti-ads1100.c
+++ b/drivers/iio/adc/ti-ads1100.c
@@ -434,7 +434,7 @@ static struct i2c_driver ads1100_driver = {
.of_match_table = ads1100_of_match,
.pm = pm_ptr(&ads1100_pm_ops),
},
- .probe_new = ads1100_probe,
+ .probe = ads1100_probe,
.id_table = ads1100_id,
};
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index b02abb026966..afdbd04778a8 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -463,7 +463,7 @@ static struct i2c_driver ads7924_driver = {
.name = "ads7924",
.of_match_table = ads7924_of_match,
},
- .probe_new = ads7924_probe,
+ .probe = ads7924_probe,
.id_table = ads7924_id,
};
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 34cf336b3490..f0b71a1220e0 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1263,7 +1263,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
struct device *dev = indio_dev->dev.parent;
struct fwnode_handle *child = NULL;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- size_t ams_size, dev_size;
+ size_t ams_size;
int ret, ch_cnt = 0, i, rising_off, falling_off;
unsigned int num_channels = 0;
@@ -1320,11 +1320,8 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
}
}
- dev_size = array_size(sizeof(*dev_channels), num_channels);
- if (dev_size == SIZE_MAX)
- return -ENOMEM;
-
- dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
+ dev_channels = devm_krealloc_array(dev, ams_channels, num_channels,
+ sizeof(*dev_channels), GFP_KERNEL);
if (!dev_channels)
return -ENOMEM;
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 292f2892d223..dba73300f894 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -613,20 +613,17 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *mask)
{
struct xadc *xadc = iio_priv(indio_dev);
- size_t new_size, n;
+ size_t n;
void *data;
n = bitmap_weight(mask, indio_dev->masklength);
- if (check_mul_overflow(n, sizeof(*xadc->data), &new_size))
- return -ENOMEM;
-
- data = devm_krealloc(indio_dev->dev.parent, xadc->data,
- new_size, GFP_KERNEL);
+ data = devm_krealloc_array(indio_dev->dev.parent, xadc->data,
+ n, sizeof(*xadc->data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- memset(data, 0, new_size);
+ memset(data, 0, n * sizeof(*xadc->data));
xadc->data = data;
return 0;
@@ -1281,9 +1278,9 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, unsigned int *conf, int irq)
}
indio_dev->num_channels = num_channels;
- indio_dev->channels = devm_krealloc(dev, channels,
- sizeof(*channels) * num_channels,
- GFP_KERNEL);
+ indio_dev->channels = devm_krealloc_array(dev, channels,
+ num_channels, sizeof(*channels),
+ GFP_KERNEL);
/* If we can't resize the channels array, just use the original */
if (!indio_dev->channels)
indio_dev->channels = channels;
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index e3366cf5eb31..6b0e8218f150 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1317,13 +1317,14 @@ static int ad74413r_setup_gpios(struct ad74413r_state *st)
}
if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC ||
- config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER)
+ config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER) {
st->comp_gpio_offsets[comp_gpio_i++] = i;
- strength = config->drive_strength;
- ret = ad74413r_set_comp_drive_strength(st, i, strength);
- if (ret)
- return ret;
+ strength = config->drive_strength;
+ ret = ad74413r_set_comp_drive_strength(st, i, strength);
+ if (ret)
+ return ret;
+ }
ret = ad74413r_set_gpo_config(st, i, gpo_config);
if (ret)
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index f2c2ea79a07f..8d8c8ea94258 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -281,7 +281,7 @@ static int ad8366_probe(struct spi_device *spi)
indio_dev->info = &ad8366_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = ad8366_write(indio_dev, 0 , 0);
+ ret = ad8366_write(indio_dev, 0, 0);
if (ret < 0)
goto error_disable_reg;
diff --git a/drivers/iio/cdc/ad7150.c b/drivers/iio/cdc/ad7150.c
index 79aeb0aaea67..d656d2f12755 100644
--- a/drivers/iio/cdc/ad7150.c
+++ b/drivers/iio/cdc/ad7150.c
@@ -647,7 +647,7 @@ static struct i2c_driver ad7150_driver = {
.name = "ad7150",
.of_match_table = ad7150_of_match,
},
- .probe_new = ad7150_probe,
+ .probe = ad7150_probe,
.id_table = ad7150_id,
};
module_i2c_driver(ad7150_driver);
diff --git a/drivers/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c
index a1db5469f2d1..d11bc3496dda 100644
--- a/drivers/iio/cdc/ad7746.c
+++ b/drivers/iio/cdc/ad7746.c
@@ -809,7 +809,7 @@ static struct i2c_driver ad7746_driver = {
.name = KBUILD_MODNAME,
.of_match_table = ad7746_of_match,
},
- .probe_new = ad7746_probe,
+ .probe = ad7746_probe,
.id_table = ad7746_id,
};
module_i2c_driver(ad7746_driver);
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index 0a0fbcdd4469..164facac5db6 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -179,7 +179,7 @@ static struct i2c_driver ams_iaqcore_driver = {
.name = "ams-iaq-core",
.of_match_table = ams_iaqcore_dt_ids,
},
- .probe_new = ams_iaqcore_probe,
+ .probe = ams_iaqcore_probe,
.id_table = ams_iaqcore_id,
};
module_i2c_driver(ams_iaqcore_driver);
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
index 307c3488f4bd..8fc926a2d33b 100644
--- a/drivers/iio/chemical/atlas-ezo-sensor.c
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -238,7 +238,7 @@ static struct i2c_driver atlas_ezo_driver = {
.name = ATLAS_EZO_DRV_NAME,
.of_match_table = atlas_ezo_dt_ids,
},
- .probe_new = atlas_ezo_probe,
+ .probe = atlas_ezo_probe,
.id_table = atlas_ezo_id,
};
module_i2c_driver(atlas_ezo_driver);
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 024657bc59e1..fb15bb216019 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -767,7 +767,7 @@ static struct i2c_driver atlas_driver = {
.of_match_table = atlas_dt_ids,
.pm = pm_ptr(&atlas_pm_ops),
},
- .probe_new = atlas_probe,
+ .probe = atlas_probe,
.remove = atlas_remove,
.id_table = atlas_id,
};
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 61b12079858d..1c7076cf91ca 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -52,7 +52,7 @@ static struct i2c_driver bme680_i2c_driver = {
.name = "bme680_i2c",
.of_match_table = bme680_of_i2c_match,
},
- .probe_new = bme680_i2c_probe,
+ .probe = bme680_i2c_probe,
.id_table = bme680_i2c_id,
};
module_i2c_driver(bme680_i2c_driver);
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 6ead80c08924..87741f155c32 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -567,7 +567,7 @@ static struct i2c_driver ccs811_driver = {
.name = "ccs811",
.of_match_table = ccs811_dt_ids,
},
- .probe_new = ccs811_probe,
+ .probe = ccs811_probe,
.remove = ccs811_remove,
.id_table = ccs811_id,
};
diff --git a/drivers/iio/chemical/scd30_i2c.c b/drivers/iio/chemical/scd30_i2c.c
index bae479a4721f..bd3b01ded246 100644
--- a/drivers/iio/chemical/scd30_i2c.c
+++ b/drivers/iio/chemical/scd30_i2c.c
@@ -130,7 +130,7 @@ static struct i2c_driver scd30_i2c_driver = {
.of_match_table = scd30_i2c_of_match,
.pm = pm_sleep_ptr(&scd30_pm_ops),
},
- .probe_new = scd30_i2c_probe,
+ .probe = scd30_i2c_probe,
};
module_i2c_driver(scd30_i2c_driver);
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index f7ed9455b3c8..a4f22d926400 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -690,7 +690,7 @@ static struct i2c_driver scd4x_i2c_driver = {
.of_match_table = scd4x_dt_ids,
.pm = pm_sleep_ptr(&scd4x_pm_ops),
},
- .probe_new = scd4x_probe,
+ .probe = scd4x_probe,
};
module_i2c_driver(scd4x_i2c_driver);
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 9d0c68485b63..b509cff9ce37 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -575,7 +575,7 @@ static struct i2c_driver sgp_driver = {
.name = "sgp30",
.of_match_table = sgp_dt_ids,
},
- .probe_new = sgp_probe,
+ .probe = sgp_probe,
.remove = sgp_remove,
.id_table = sgp_id,
};
diff --git a/drivers/iio/chemical/sgp40.c b/drivers/iio/chemical/sgp40.c
index c0ea01300908..7f0de14a1956 100644
--- a/drivers/iio/chemical/sgp40.c
+++ b/drivers/iio/chemical/sgp40.c
@@ -368,7 +368,7 @@ static struct i2c_driver sgp40_driver = {
.name = "sgp40",
.of_match_table = sgp40_dt_ids,
},
- .probe_new = sgp40_probe,
+ .probe = sgp40_probe,
.id_table = sgp40_id,
};
module_i2c_driver(sgp40_driver);
diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c
index 0cb5d9b65d62..5c31299813ec 100644
--- a/drivers/iio/chemical/sps30_i2c.c
+++ b/drivers/iio/chemical/sps30_i2c.c
@@ -249,7 +249,7 @@ static struct i2c_driver sps30_i2c_driver = {
.of_match_table = sps30_i2c_of_match,
},
.id_table = sps30_i2c_id,
- .probe_new = sps30_i2c_probe,
+ .probe = sps30_i2c_probe,
};
module_i2c_driver(sps30_i2c_driver);
diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c
index 8440dc0c77cf..cdb8696a4e81 100644
--- a/drivers/iio/chemical/sunrise_co2.c
+++ b/drivers/iio/chemical/sunrise_co2.c
@@ -528,7 +528,7 @@ static struct i2c_driver sunrise_driver = {
.name = DRIVER_NAME,
.of_match_table = sunrise_of_match,
},
- .probe_new = sunrise_probe,
+ .probe = sunrise_probe,
};
module_i2c_driver(sunrise_driver);
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index d4604f7ccd1e..13555f4f401a 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -402,7 +402,7 @@ static struct i2c_driver vz89x_driver = {
.name = "vz89x",
.of_match_table = vz89x_dt_ids,
},
- .probe_new = vz89x_probe,
+ .probe = vz89x_probe,
.id_table = vz89x_id,
};
module_i2c_driver(vz89x_driver);
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index f01249c1ba93..7712dc6be608 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -1056,7 +1056,7 @@ static struct i2c_driver ad5064_i2c_driver = {
.driver = {
.name = "ad5064",
},
- .probe_new = ad5064_i2c_probe,
+ .probe = ad5064_i2c_probe,
.id_table = ad5064_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 64b4519f8f5e..2e3e33f92bc0 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -589,7 +589,7 @@ static struct i2c_driver ad5380_i2c_driver = {
.driver = {
.name = "ad5380",
},
- .probe_new = ad5380_i2c_probe,
+ .probe = ad5380_i2c_probe,
.remove = ad5380_i2c_remove,
.id_table = ad5380_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index aa3130b33456..8103d2cd13f6 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -595,7 +595,7 @@ static struct i2c_driver ad5446_i2c_driver = {
.driver = {
.name = "ad5446",
},
- .probe_new = ad5446_i2c_probe,
+ .probe = ad5446_i2c_probe,
.remove = ad5446_i2c_remove,
.id_table = ad5446_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index d311567ab324..fae5e5a0e72f 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -138,7 +138,7 @@ static struct i2c_driver ad5593r_driver = {
.of_match_table = ad5593r_of_match,
.acpi_match_table = ad5593r_acpi_match,
},
- .probe_new = ad5593r_i2c_probe,
+ .probe = ad5593r_i2c_probe,
.remove = ad5593r_i2c_remove,
.id_table = ad5593r_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 8a95f0278018..81541f755a3e 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -115,7 +115,7 @@ static struct i2c_driver ad5686_i2c_driver = {
.name = "ad5696",
.of_match_table = ad5686_of_match,
},
- .probe_new = ad5686_i2c_probe,
+ .probe = ad5686_i2c_probe,
.remove = ad5686_i2c_remove,
.id_table = ad5686_i2c_id,
};
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index a16a6a934d9d..e89e4c054653 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -312,7 +312,7 @@ static struct i2c_driver ds4424_driver = {
.of_match_table = ds4424_of_match,
.pm = pm_sleep_ptr(&ds4424_pm_ops),
},
- .probe_new = ds4424_probe,
+ .probe = ds4424_probe,
.remove = ds4424_remove,
.id_table = ds4424_id,
};
diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c
index b692459b0f23..ae53baccec91 100644
--- a/drivers/iio/dac/m62332.c
+++ b/drivers/iio/dac/m62332.c
@@ -238,7 +238,7 @@ static struct i2c_driver m62332_driver = {
.name = "m62332",
.pm = pm_sleep_ptr(&m62332_pm_ops),
},
- .probe_new = m62332_probe,
+ .probe = m62332_probe,
.remove = m62332_remove,
.id_table = m62332_id,
};
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 25967c39365d..685980184d3c 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -203,7 +203,7 @@ static struct i2c_driver max517_driver = {
.name = MAX517_DRV_NAME,
.pm = pm_sleep_ptr(&max517_pm_ops),
},
- .probe_new = max517_probe,
+ .probe = max517_probe,
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 23da345b9250..18ba3eaaad75 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -377,7 +377,7 @@ static struct i2c_driver max5821_driver = {
.of_match_table = max5821_of_match,
.pm = pm_sleep_ptr(&max5821_pm_ops),
},
- .probe_new = max5821_probe,
+ .probe = max5821_probe,
.id_table = max5821_id,
};
module_i2c_driver(max5821_driver);
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 3f5661a3718f..f4a3124d29f2 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -536,7 +536,7 @@ static struct i2c_driver mcp4725_driver = {
.of_match_table = mcp4725_of_match,
.pm = pm_sleep_ptr(&mcp4725_pm_ops),
},
- .probe_new = mcp4725_probe,
+ .probe = mcp4725_probe,
.remove = mcp4725_remove,
.id_table = mcp4725_id,
};
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 40191947fea3..bab11b9adc25 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -426,7 +426,7 @@ static struct i2c_driver dac5571_driver = {
.name = "ti-dac5571",
.of_match_table = dac5571_of_id,
},
- .probe_new = dac5571_probe,
+ .probe = dac5571_probe,
.remove = dac5571_remove,
.id_table = dac5571_id,
};
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 2b019ee5b2eb..2f9675596138 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -70,7 +70,7 @@ static struct i2c_driver bmg160_i2c_driver = {
.of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
- .probe_new = bmg160_i2c_probe,
+ .probe = bmg160_i2c_probe,
.remove = bmg160_i2c_remove,
.id_table = bmg160_i2c_id,
};
diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c
index 9e2d0f34a672..ee7f21b718e2 100644
--- a/drivers/iio/gyro/fxas21002c_i2c.c
+++ b/drivers/iio/gyro/fxas21002c_i2c.c
@@ -56,7 +56,7 @@ static struct i2c_driver fxas21002c_i2c_driver = {
.pm = pm_ptr(&fxas21002c_pm_ops),
.of_match_table = fxas21002c_i2c_of_match,
},
- .probe_new = fxas21002c_i2c_probe,
+ .probe = fxas21002c_i2c_probe,
.remove = fxas21002c_i2c_remove,
.id_table = fxas21002c_i2c_id,
};
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index ceacd863d3ea..53fb92f0ac7e 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -405,7 +405,7 @@ static struct i2c_driver itg3200_driver = {
.pm = pm_sleep_ptr(&itg3200_pm_ops),
},
.id_table = itg3200_id,
- .probe_new = itg3200_probe,
+ .probe = itg3200_probe,
.remove = itg3200_remove,
};
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 2116798226bf..52b6feed2637 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -108,7 +108,7 @@ static const struct of_device_id mpu3050_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
static struct i2c_driver mpu3050_i2c_driver = {
- .probe_new = mpu3050_i2c_probe,
+ .probe = mpu3050_i2c_probe,
.remove = mpu3050_i2c_remove,
.id_table = mpu3050_i2c_id,
.driver = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 797a1c6a0402..5a10a3556ab0 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -111,7 +111,7 @@ static struct i2c_driver st_gyro_driver = {
.name = "st-gyro-i2c",
.of_match_table = st_gyro_of_match,
},
- .probe_new = st_gyro_i2c_probe,
+ .probe = st_gyro_i2c_probe,
.id_table = st_gyro_id_table,
};
module_i2c_driver(st_gyro_driver);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 21a6378b7052..ede1e8201311 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -609,7 +609,7 @@ static struct i2c_driver afe4404_i2c_driver = {
.of_match_table = afe4404_of_match,
.pm = pm_sleep_ptr(&afe4404_pm_ops),
},
- .probe_new = afe4404_probe,
+ .probe = afe4404_probe,
.remove = afe4404_remove,
.id_table = afe4404_ids,
};
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index a80fa9852c22..6236b4d96137 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -499,7 +499,7 @@ static struct i2c_driver max30100_driver = {
.name = MAX30100_DRV_NAME,
.of_match_table = max30100_dt_ids,
},
- .probe_new = max30100_probe,
+ .probe = max30100_probe,
.remove = max30100_remove,
.id_table = max30100_id,
};
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 7edcf9e05687..37e619827e8a 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -631,7 +631,7 @@ static struct i2c_driver max30102_driver = {
.name = MAX30102_DRV_NAME,
.of_match_table = max30102_dt_ids,
},
- .probe_new = max30102_probe,
+ .probe = max30102_probe,
.remove = max30102_remove,
.id_table = max30102_id,
};
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index f246516bd45e..37a35d1153d5 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -262,7 +262,7 @@ static struct i2c_driver am2315_driver = {
.driver = {
.name = "am2315",
},
- .probe_new = am2315_probe,
+ .probe = am2315_probe,
.id_table = am2315_i2c_id,
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 49a950d739e4..202014da2785 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -428,7 +428,7 @@ static struct i2c_driver hdc100x_driver = {
.of_match_table = hdc100x_dt_ids,
.acpi_match_table = hdc100x_acpi_match,
},
- .probe_new = hdc100x_probe,
+ .probe = hdc100x_probe,
.id_table = hdc100x_id,
};
module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
index c8fddd612e06..f5867659e00f 100644
--- a/drivers/iio/humidity/hdc2010.c
+++ b/drivers/iio/humidity/hdc2010.c
@@ -338,7 +338,7 @@ static struct i2c_driver hdc2010_driver = {
.name = "hdc2010",
.of_match_table = hdc2010_dt_ids,
},
- .probe_new = hdc2010_probe,
+ .probe = hdc2010_probe,
.remove = hdc2010_remove,
.id_table = hdc2010_id,
};
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index d81869423cf0..30f2068ea156 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -65,7 +65,7 @@ static struct i2c_driver hts221_driver = {
.of_match_table = hts221_i2c_of_match,
.acpi_match_table = ACPI_PTR(hts221_acpi_match),
},
- .probe_new = hts221_i2c_probe,
+ .probe = hts221_i2c_probe,
.id_table = hts221_i2c_id_table,
};
module_i2c_driver(hts221_driver);
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 8411a9f3e828..39e886075299 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -244,7 +244,7 @@ static const struct of_device_id htu21_of_match[] = {
MODULE_DEVICE_TABLE(of, htu21_of_match);
static struct i2c_driver htu21_driver = {
- .probe_new = htu21_probe,
+ .probe = htu21_probe,
.id_table = htu21_id,
.driver = {
.name = "htu21",
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index fa1faf168c8d..ebfb79bc9edc 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -173,7 +173,7 @@ static struct i2c_driver si7005_driver = {
.driver = {
.name = "si7005",
},
- .probe_new = si7005_probe,
+ .probe = si7005_probe,
.id_table = si7005_id,
};
module_i2c_driver(si7005_driver);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 3e50592e8e68..fb1006649328 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -155,7 +155,7 @@ static struct i2c_driver si7020_driver = {
.name = "si7020",
.of_match_table = si7020_dt_ids,
},
- .probe_new = si7020_probe,
+ .probe = si7020_probe,
.id_table = si7020_id,
};
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 2ca907d396a0..81652c08e644 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -60,7 +60,7 @@ static struct i2c_driver bmi160_i2c_driver = {
.acpi_match_table = bmi160_acpi_match,
.of_match_table = bmi160_of_match,
},
- .probe_new = bmi160_i2c_probe,
+ .probe = bmi160_i2c_probe,
.id_table = bmi160_i2c_id,
};
module_i2c_driver(bmi160_i2c_driver);
diff --git a/drivers/iio/imu/bno055/bno055_i2c.c b/drivers/iio/imu/bno055/bno055_i2c.c
index c1bbc0fe34f9..6ecd750c6b76 100644
--- a/drivers/iio/imu/bno055/bno055_i2c.c
+++ b/drivers/iio/imu/bno055/bno055_i2c.c
@@ -46,7 +46,7 @@ static struct i2c_driver bno055_driver = {
.name = "bno055-i2c",
.of_match_table = bno055_i2c_of_match,
},
- .probe_new = bno055_i2c_probe,
+ .probe = bno055_i2c_probe,
.id_table = bno055_i2c_id,
};
module_i2c_driver(bno055_driver);
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
index a74a15fda8cb..2ace306d0f9a 100644
--- a/drivers/iio/imu/fxos8700_i2c.c
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -60,7 +60,7 @@ static struct i2c_driver fxos8700_i2c_driver = {
.acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
.of_match_table = fxos8700_of_match,
},
- .probe_new = fxos8700_i2c_probe,
+ .probe = fxos8700_i2c_probe,
.id_table = fxos8700_i2c_id,
};
module_i2c_driver(fxos8700_i2c_driver);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index eb2681ad375f..1af559403ba6 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -98,7 +98,7 @@ static struct i2c_driver inv_icm42600_driver = {
.of_match_table = inv_icm42600_of_matches,
.pm = pm_ptr(&inv_icm42600_pm_ops),
},
- .probe_new = inv_icm42600_probe,
+ .probe = inv_icm42600_probe,
};
module_i2c_driver(inv_icm42600_driver);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c
index 7f2dc41f807b..37cbf08acb3a 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c
@@ -93,8 +93,8 @@ static bool inv_validate_period(uint32_t period, uint32_t mult)
return false;
}
-static bool inv_compute_chip_period(struct inv_icm42600_timestamp *ts,
- uint32_t mult, uint32_t period)
+static bool inv_update_chip_period(struct inv_icm42600_timestamp *ts,
+ uint32_t mult, uint32_t period)
{
uint32_t new_chip_period;
@@ -104,10 +104,31 @@ static bool inv_compute_chip_period(struct inv_icm42600_timestamp *ts,
/* update chip internal period estimation */
new_chip_period = period / mult;
inv_update_acc(&ts->chip_period, new_chip_period);
+ ts->period = ts->mult * ts->chip_period.val;
return true;
}
+static void inv_align_timestamp_it(struct inv_icm42600_timestamp *ts)
+{
+ int64_t delta, jitter;
+ int64_t adjust;
+
+ /* delta time between last sample and last interrupt */
+ delta = ts->it.lo - ts->timestamp;
+
+ /* adjust timestamp while respecting jitter */
+ jitter = div_s64((int64_t)ts->period * INV_ICM42600_TIMESTAMP_JITTER, 100);
+ if (delta > jitter)
+ adjust = jitter;
+ else if (delta < -jitter)
+ adjust = -jitter;
+ else
+ adjust = 0;
+
+ ts->timestamp += adjust;
+}
+
void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
uint32_t fifo_period, size_t fifo_nb,
size_t sensor_nb, int64_t timestamp)
@@ -116,7 +137,6 @@ void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
int64_t delta, interval;
const uint32_t fifo_mult = fifo_period / INV_ICM42600_TIMESTAMP_PERIOD;
uint32_t period = ts->period;
- int32_t m;
bool valid = false;
if (fifo_nb == 0)
@@ -130,10 +150,7 @@ void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
if (it->lo != 0) {
/* compute period: delta time divided by number of samples */
period = div_s64(delta, fifo_nb);
- valid = inv_compute_chip_period(ts, fifo_mult, period);
- /* update sensor period if chip internal period is updated */
- if (valid)
- ts->period = ts->mult * ts->chip_period.val;
+ valid = inv_update_chip_period(ts, fifo_mult, period);
}
/* no previous data, compute theoritical value from interrupt */
@@ -145,22 +162,8 @@ void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
}
/* if interrupt interval is valid, sync with interrupt timestamp */
- if (valid) {
- /* compute measured fifo_period */
- fifo_period = fifo_mult * ts->chip_period.val;
- /* delta time between last sample and last interrupt */
- delta = it->lo - ts->timestamp;
- /* if there are multiple samples, go back to first one */
- while (delta >= (fifo_period * 3 / 2))
- delta -= fifo_period;
- /* compute maximal adjustment value */
- m = INV_ICM42600_TIMESTAMP_MAX_PERIOD(ts->period) - ts->period;
- if (delta > m)
- delta = m;
- else if (delta < -m)
- delta = -m;
- ts->timestamp += delta;
- }
+ if (valid)
+ inv_align_timestamp_it(ts);
}
void inv_icm42600_timestamp_apply_odr(struct inv_icm42600_timestamp *ts,
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 3636b1bc90f1..64dd73dcc4ba 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -16,7 +16,7 @@ config INV_MPU6050_I2C
select REGMAP_I2C
help
This driver supports the Invensense MPU6050/9150,
- MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20600/20602/20690
and IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
select REGMAP_SPI
help
This driver supports the Invensense MPU6000,
- MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
+ MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20600/20602/20690
and IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 8a129120b73d..592a6e60b413 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -245,6 +245,15 @@ static const struct inv_mpu6050_hw hw_info[] = {
.startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
},
{
+ .whoami = INV_ICM20600_WHOAMI_VALUE,
+ .name = "ICM20600",
+ .reg = &reg_set_icm20602,
+ .config = &chip_config_6500,
+ .fifo_size = 1008,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ .startup_time = {INV_ICM20602_GYRO_STARTUP_TIME, INV_ICM20602_ACCEL_STARTUP_TIME},
+ },
+ {
.whoami = INV_ICM20602_WHOAMI_VALUE,
.name = "ICM20602",
.reg = &reg_set_icm20602,
@@ -1597,6 +1606,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
break;
+ case INV_ICM20600:
case INV_ICM20602:
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 2f2da4cb7321..410ea39fd495 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -32,6 +32,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
case INV_ICM20608D:
case INV_ICM20609:
case INV_ICM20689:
+ case INV_ICM20600:
case INV_ICM20602:
case INV_IAM20680:
/* no i2c auxiliary bus on the chip */
@@ -183,6 +184,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
+ {"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
{"iam20680", INV_IAM20680},
@@ -237,6 +239,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20689
},
{
+ .compatible = "invensense,icm20600",
+ .data = (void *)INV_ICM20600
+ },
+ {
.compatible = "invensense,icm20602",
.data = (void *)INV_ICM20602
},
@@ -259,7 +265,7 @@ static const struct acpi_device_id inv_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
static struct i2c_driver inv_mpu_driver = {
- .probe_new = inv_mpu_probe,
+ .probe = inv_mpu_probe,
.remove = inv_mpu_remove,
.id_table = inv_mpu_id,
.driver = {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 94b54c501ec0..b4ab2c397d0f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -79,6 +79,7 @@ enum inv_devices {
INV_ICM20608D,
INV_ICM20609,
INV_ICM20689,
+ INV_ICM20600,
INV_ICM20602,
INV_ICM20690,
INV_IAM20680,
@@ -398,6 +399,7 @@ struct inv_mpu6050_state {
#define INV_ICM20608D_WHOAMI_VALUE 0xAE
#define INV_ICM20609_WHOAMI_VALUE 0xA6
#define INV_ICM20689_WHOAMI_VALUE 0x98
+#define INV_ICM20600_WHOAMI_VALUE 0x11
#define INV_ICM20602_WHOAMI_VALUE 0x12
#define INV_ICM20690_WHOAMI_VALUE 0x20
#define INV_IAM20680_WHOAMI_VALUE 0xA9
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 89f46c2f213d..05451ca1580b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -76,6 +76,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"icm20608d", INV_ICM20608D},
{"icm20609", INV_ICM20609},
{"icm20689", INV_ICM20689},
+ {"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
{"iam20680", INV_IAM20680},
@@ -126,6 +127,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20689
},
{
+ .compatible = "invensense,icm20600",
+ .data = (void *)INV_ICM20600
+ },
+ {
.compatible = "invensense,icm20602",
.data = (void *)INV_ICM20602
},
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 53ba020fa5d0..958167b31241 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1517,7 +1517,7 @@ static struct i2c_driver kmx61_driver = {
.acpi_match_table = ACPI_PTR(kmx61_acpi_match),
.pm = pm_ptr(&kmx61_pm_ops),
},
- .probe_new = kmx61_probe,
+ .probe = kmx61_probe,
.remove = kmx61_remove,
.id_table = kmx61_id,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 020717f92363..911444ec57c0 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -179,7 +179,7 @@ static struct i2c_driver st_lsm6dsx_driver = {
.of_match_table = st_lsm6dsx_i2c_of_match,
.acpi_match_table = st_lsm6dsx_i2c_acpi_match,
},
- .probe_new = st_lsm6dsx_i2c_probe,
+ .probe = st_lsm6dsx_i2c_probe,
.id_table = st_lsm6dsx_i2c_id_table,
};
module_i2c_driver(st_lsm6dsx_driver);
diff --git a/drivers/iio/imu/st_lsm9ds0/Kconfig b/drivers/iio/imu/st_lsm9ds0/Kconfig
index d29558edee60..7aef714b6ecb 100644
--- a/drivers/iio/imu/st_lsm9ds0/Kconfig
+++ b/drivers/iio/imu/st_lsm9ds0/Kconfig
@@ -10,7 +10,8 @@ config IIO_ST_LSM9DS0
help
Say yes here to build support for STMicroelectronics LSM9DS0 IMU
- sensor. Supported devices: accelerometer/magnetometer of lsm9ds0.
+ sensor. Supported devices: accelerometer/magnetometer of lsm9ds0
+ and lsm303d.
To compile this driver as a module, choose M here: the module
will be called st_lsm9ds0.
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index a90138d8b06a..61d855083aa0 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -19,6 +19,10 @@
static const struct of_device_id st_lsm9ds0_of_match[] = {
{
+ .compatible = "st,lsm303d-imu",
+ .data = LSM303D_IMU_DEV_NAME,
+ },
+ {
.compatible = "st,lsm9ds0-imu",
.data = LSM9DS0_IMU_DEV_NAME,
},
@@ -27,11 +31,18 @@ static const struct of_device_id st_lsm9ds0_of_match[] = {
MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
static const struct i2c_device_id st_lsm9ds0_id_table[] = {
+ { LSM303D_IMU_DEV_NAME },
{ LSM9DS0_IMU_DEV_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, st_lsm9ds0_id_table);
+static const struct acpi_device_id st_lsm9ds0_acpi_match[] = {
+ {"ACCL0001", (kernel_ulong_t)LSM303D_IMU_DEV_NAME},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, st_lsm9ds0_acpi_match);
+
static const struct regmap_config st_lsm9ds0_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -68,8 +79,9 @@ static struct i2c_driver st_lsm9ds0_driver = {
.driver = {
.name = "st-lsm9ds0-i2c",
.of_match_table = st_lsm9ds0_of_match,
+ .acpi_match_table = st_lsm9ds0_acpi_match,
},
- .probe_new = st_lsm9ds0_i2c_probe,
+ .probe = st_lsm9ds0_i2c_probe,
.id_table = st_lsm9ds0_id_table,
};
module_i2c_driver(st_lsm9ds0_driver);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index b743bf3546a7..8cc041d56cf7 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -19,6 +19,10 @@
static const struct of_device_id st_lsm9ds0_of_match[] = {
{
+ .compatible = "st,lsm303d-imu",
+ .data = LSM303D_IMU_DEV_NAME,
+ },
+ {
.compatible = "st,lsm9ds0-imu",
.data = LSM9DS0_IMU_DEV_NAME,
},
@@ -27,6 +31,7 @@ static const struct of_device_id st_lsm9ds0_of_match[] = {
MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
static const struct spi_device_id st_lsm9ds0_id_table[] = {
+ { LSM303D_IMU_DEV_NAME },
{ LSM9DS0_IMU_DEV_NAME },
{}
};
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a7a080bed180..176d31d9f9d8 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -194,7 +194,7 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
written = 0;
add_wait_queue(&rb->pollq, &wait);
do {
- if (indio_dev->info == NULL)
+ if (!indio_dev->info)
return -ENODEV;
if (!iio_buffer_space_available(rb)) {
@@ -210,7 +210,7 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
}
wait_woken(&wait, TASK_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
+ MAX_SCHEDULE_TIMEOUT);
continue;
}
@@ -242,7 +242,7 @@ static __poll_t iio_buffer_poll(struct file *filp,
struct iio_buffer *rb = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
- if (!indio_dev->info || rb == NULL)
+ if (!indio_dev->info || !rb)
return 0;
poll_wait(filp, &rb->pollq, wait);
@@ -407,9 +407,9 @@ static ssize_t iio_scan_el_show(struct device *dev,
/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
- unsigned int masklength,
- const unsigned long *mask,
- bool strict)
+ unsigned int masklength,
+ const unsigned long *mask,
+ bool strict)
{
if (bitmap_empty(mask, masklength))
return NULL;
@@ -427,7 +427,7 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
}
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
if (!indio_dev->setup_ops->validate_scan_mask)
return true;
@@ -446,7 +446,7 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
* individual buffers request is plausible.
*/
static int iio_scan_mask_set(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit)
+ struct iio_buffer *buffer, int bit)
{
const unsigned long *mask;
unsigned long *trialmask;
@@ -539,7 +539,6 @@ error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret < 0 ? ret : len;
-
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
@@ -706,7 +705,7 @@ static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
- const unsigned long *mask, bool timestamp)
+ const unsigned long *mask, bool timestamp)
{
unsigned int bytes = 0;
int length, i, largest = 0;
@@ -732,7 +731,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
}
static void iio_buffer_activate(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -753,12 +752,12 @@ static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
struct iio_buffer *buffer, *_buffer;
list_for_each_entry_safe(buffer, _buffer,
- &iio_dev_opaque->buffer_list, buffer_list)
+ &iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_deactivate(buffer);
}
static int iio_buffer_enable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+ struct iio_dev *indio_dev)
{
if (!buffer->access->enable)
return 0;
@@ -766,7 +765,7 @@ static int iio_buffer_enable(struct iio_buffer *buffer,
}
static int iio_buffer_disable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+ struct iio_dev *indio_dev)
{
if (!buffer->access->disable)
return 0;
@@ -774,7 +773,7 @@ static int iio_buffer_disable(struct iio_buffer *buffer,
}
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
unsigned int bytes;
@@ -782,13 +781,13 @@ static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
return;
bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
- buffer->scan_timestamp);
+ buffer->scan_timestamp);
buffer->access->set_bytes_per_datum(buffer, bytes);
}
static int iio_buffer_request_update(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
int ret;
@@ -797,7 +796,7 @@ static int iio_buffer_request_update(struct iio_dev *indio_dev,
ret = buffer->access->request_update(buffer);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: buffer parameter update failed (%d)\n",
+ "Buffer not started: buffer parameter update failed (%d)\n",
ret);
return ret;
}
@@ -807,7 +806,7 @@ static int iio_buffer_request_update(struct iio_dev *indio_dev,
}
static void iio_free_scan_mask(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
/* If the mask is dynamically allocated free it, otherwise do nothing */
if (!indio_dev->available_scan_masks)
@@ -823,8 +822,9 @@ struct iio_device_config {
};
static int iio_verify_update(struct iio_dev *indio_dev,
- struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
- struct iio_device_config *config)
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer,
+ struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned long *compound_mask;
@@ -864,7 +864,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (insert_buffer) {
modes &= insert_buffer->access->modes;
config->watermark = min(config->watermark,
- insert_buffer->watermark);
+ insert_buffer->watermark);
}
/* Definitely possible for devices to support both of these. */
@@ -890,7 +890,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
/* What scan mask do we actually have? */
compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
- if (compound_mask == NULL)
+ if (!compound_mask)
return -ENOMEM;
scan_timestamp = false;
@@ -911,18 +911,18 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (indio_dev->available_scan_masks) {
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- compound_mask,
- strict_scanmask);
+ indio_dev->masklength,
+ compound_mask,
+ strict_scanmask);
bitmap_free(compound_mask);
- if (scan_mask == NULL)
+ if (!scan_mask)
return -EINVAL;
} else {
scan_mask = compound_mask;
}
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
- scan_mask, scan_timestamp);
+ scan_mask, scan_timestamp);
config->scan_mask = scan_mask;
config->scan_timestamp = scan_timestamp;
@@ -954,16 +954,16 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
}
static int iio_buffer_add_demux(struct iio_buffer *buffer,
- struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
- unsigned int length)
+ struct iio_demux_table **p, unsigned int in_loc,
+ unsigned int out_loc,
+ unsigned int length)
{
-
if (*p && (*p)->from + (*p)->length == in_loc &&
- (*p)->to + (*p)->length == out_loc) {
+ (*p)->to + (*p)->length == out_loc) {
(*p)->length += length;
} else {
*p = kmalloc(sizeof(**p), GFP_KERNEL);
- if (*p == NULL)
+ if (!(*p))
return -ENOMEM;
(*p)->from = in_loc;
(*p)->to = out_loc;
@@ -1027,7 +1027,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
out_loc += length;
}
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
- if (buffer->demux_bounce == NULL) {
+ if (!buffer->demux_bounce) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
@@ -1060,7 +1060,7 @@ error_clear_mux_table:
}
static int iio_enable_buffers(struct iio_dev *indio_dev,
- struct iio_device_config *config)
+ struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *tmp = NULL;
@@ -1078,7 +1078,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: buffer preenable failed (%d)\n", ret);
+ "Buffer not started: buffer preenable failed (%d)\n", ret);
goto err_undo_config;
}
}
@@ -1118,7 +1118,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: postenable failed (%d)\n", ret);
+ "Buffer not started: postenable failed (%d)\n", ret);
goto err_detach_pollfunc;
}
}
@@ -1194,15 +1194,15 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
}
static int __iio_update_buffers(struct iio_dev *indio_dev,
- struct iio_buffer *insert_buffer,
- struct iio_buffer *remove_buffer)
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_device_config new_config;
int ret;
ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
- &new_config);
+ &new_config);
if (ret)
return ret;
@@ -1258,7 +1258,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
return 0;
if (insert_buffer &&
- (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
+ insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock);
@@ -1275,7 +1275,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
goto out_unlock;
}
- if (indio_dev->info == NULL) {
+ if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}
@@ -1615,7 +1615,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
buffer_attrcount = 0;
if (buffer->attrs) {
- while (buffer->attrs[buffer_attrcount] != NULL)
+ while (buffer->attrs[buffer_attrcount])
buffer_attrcount++;
}
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
@@ -1643,7 +1643,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
}
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
- &channels[i]);
+ &channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
scan_el_attrcount += ret;
@@ -1651,10 +1651,10 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
- if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ if (indio_dev->masklength && !buffer->scan_mask) {
buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
GFP_KERNEL);
- if (buffer->scan_mask == NULL) {
+ if (!buffer->scan_mask) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
}
@@ -1771,7 +1771,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
goto error_unwind_sysfs_and_mask;
}
- sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
+ sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
if (!iio_dev_opaque->buffer_ioctl_handler) {
ret = -ENOMEM;
@@ -1820,14 +1820,14 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
* a time.
*/
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
static const void *iio_demux(struct iio_buffer *buffer,
- const void *datain)
+ const void *datain)
{
struct iio_demux_table *t;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 784dc1e00310..f207e36b12cc 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -322,7 +322,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig,
* this is the case if the IIO device and the trigger device share the
* same parent device.
*/
- if (pf->indio_dev->dev.parent == trig->dev.parent)
+ if (iio_validate_own_trigger(pf->indio_dev, trig))
trig->attached_own_device = true;
return ret;
@@ -729,6 +729,26 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
EXPORT_SYMBOL(iio_trigger_using_own);
/**
+ * iio_validate_own_trigger - Check if a trigger and IIO device belong to
+ * the same device
+ * @idev: the IIO device to check
+ * @trig: the IIO trigger to check
+ *
+ * This function can be used as the validate_trigger callback for triggers that
+ * can only be attached to their own device.
+ *
+ * Return: 0 if both the trigger and the IIO device belong to the same
+ * device, -EINVAL otherwise.
+ */
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
+{
+ if (idev->dev.parent != trig->dev.parent)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_validate_own_trigger);
+
+/**
* iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
* the same device
* @trig: The IIO trigger to check
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 6fa31fcd71a1..45edba797e4c 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -289,6 +289,20 @@ config JSA1212
To compile this driver as a module, choose M here:
the module will be called jsa1212.
+config ROHM_BU27008
+ tristate "ROHM BU27008 color (RGB+C/IR) sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_GTS_HELPER
+ help
+ Enable support for the ROHM BU27008 color sensor.
+ The ROHM BU27008 is a sensor with 5 photodiodes (red, green,
+ blue, clear and IR) with four configurable channels. Red and
+ green being always available and two out of the rest three
+ (blue, clear, IR) can be selected to be simultaneously measured.
+ Typical application is adjusting LCD backlight of TVs,
+ mobile phones and tablet PCs.
+
config ROHM_BU27034
tristate "ROHM BU27034 ambient light sensor"
depends on I2C
@@ -413,6 +427,17 @@ config OPT3001
If built as a dynamically linked module, it will be called
opt3001.
+config OPT4001
+ tristate "Texas Instruments OPT4001 Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say Y or M here, you get support for Texas Instruments
+ OPT4001 Ambient Light Sensor.
+
+ If built as a dynamically linked module, it will be called
+ opt4001.
+
config PA12203001
tristate "TXC PA12203001 light and proximity sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 985f6feaccd4..c0db4c4c36ec 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -37,7 +37,9 @@ obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_MAX44009) += max44009.o
obj-$(CONFIG_NOA1305) += noa1305.o
obj-$(CONFIG_OPT3001) += opt3001.o
+obj-$(CONFIG_OPT4001) += opt4001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
+obj-$(CONFIG_ROHM_BU27008) += rohm-bu27008.o
obj-$(CONFIG_ROHM_BU27034) += rohm-bu27034.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SI1133) += si1133.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 210a90f44c53..5fd775a20176 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -270,7 +270,7 @@ static struct i2c_driver adjd_s311_driver = {
.driver = {
.name = ADJD_S311_DRV_NAME,
},
- .probe_new = adjd_s311_probe,
+ .probe = adjd_s311_probe,
.id_table = adjd_s311_id,
};
module_i2c_driver(adjd_s311_driver);
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
index 606075350d01..aa4a6c78f0aa 100644
--- a/drivers/iio/light/adux1020.c
+++ b/drivers/iio/light/adux1020.c
@@ -837,7 +837,7 @@ static struct i2c_driver adux1020_driver = {
.name = ADUX1020_DRV_NAME,
.of_match_table = adux1020_of_match,
},
- .probe_new = adux1020_probe,
+ .probe = adux1020_probe,
.id_table = adux1020_id,
};
module_i2c_driver(adux1020_driver);
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
index 69cc723e2ac4..8f0119f392b7 100644
--- a/drivers/iio/light/al3010.c
+++ b/drivers/iio/light/al3010.c
@@ -229,7 +229,7 @@ static struct i2c_driver al3010_driver = {
.of_match_table = al3010_of_match,
.pm = pm_sleep_ptr(&al3010_pm_ops),
},
- .probe_new = al3010_probe,
+ .probe = al3010_probe,
.id_table = al3010_id,
};
module_i2c_driver(al3010_driver);
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index 9ff28bbf34bb..d5957d85c278 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -16,6 +16,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -247,13 +248,20 @@ static const struct of_device_id al3320a_of_match[] = {
};
MODULE_DEVICE_TABLE(of, al3320a_of_match);
+static const struct acpi_device_id al3320a_acpi_match[] = {
+ {"CALS0001"},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, al3320a_acpi_match);
+
static struct i2c_driver al3320a_driver = {
.driver = {
.name = AL3320A_DRV_NAME,
.of_match_table = al3320a_of_match,
.pm = pm_sleep_ptr(&al3320a_pm_ops),
+ .acpi_match_table = al3320a_acpi_match,
},
- .probe_new = al3320a_probe,
+ .probe = al3320a_probe,
.id_table = al3320a_id,
};
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 15dfb753734f..0f978b30a232 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -504,7 +504,7 @@ static struct i2c_driver apds9300_driver = {
.name = APDS9300_DRV_NAME,
.pm = pm_sleep_ptr(&apds9300_pm_ops),
},
- .probe_new = apds9300_probe,
+ .probe = apds9300_probe,
.remove = apds9300_remove,
.id_table = apds9300_id,
};
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index cc5974a95bd3..1065a340b12b 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1131,7 +1131,7 @@ static struct i2c_driver apds9960_driver = {
.pm = &apds9960_pm_ops,
.acpi_match_table = apds9960_acpi_match,
},
- .probe_new = apds9960_probe,
+ .probe = apds9960_probe,
.remove = apds9960_remove,
.id_table = apds9960_id,
};
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 2307fc531752..ec97a3a46839 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -790,7 +790,7 @@ static struct i2c_driver as73211_driver = {
.of_match_table = as73211_of_match,
.pm = pm_sleep_ptr(&as73211_pm_ops),
},
- .probe_new = as73211_probe,
+ .probe = as73211_probe,
.id_table = as73211_id,
};
module_i2c_driver(as73211_driver);
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 390c5b3ad4f6..4b869fa9e5b1 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -320,7 +320,7 @@ static struct i2c_driver bh1750_driver = {
.of_match_table = bh1750_of_match,
.pm = pm_sleep_ptr(&bh1750_pm_ops),
},
- .probe_new = bh1750_probe,
+ .probe = bh1750_probe,
.remove = bh1750_remove,
.id_table = bh1750_id,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index da9039e5a839..b84166c5fa06 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -269,7 +269,7 @@ static const struct of_device_id of_bh1780_match[] = {
MODULE_DEVICE_TABLE(of, of_bh1780_match);
static struct i2c_driver bh1780_driver = {
- .probe_new = bh1780_probe,
+ .probe = bh1780_probe,
.remove = bh1780_remove,
.id_table = bh1780_id,
.driver = {
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index d4a34a3bf00d..9df85b3999fa 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -542,7 +542,7 @@ static struct i2c_driver cm32181_driver = {
.of_match_table = cm32181_of_match,
.pm = pm_sleep_ptr(&cm32181_pm_ops),
},
- .probe_new = cm32181_probe,
+ .probe = cm32181_probe,
};
module_i2c_driver(cm32181_driver);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 43e492f5051d..d48a70efca69 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -417,7 +417,7 @@ static struct i2c_driver cm3232_driver = {
.pm = pm_sleep_ptr(&cm3232_pm_ops),
},
.id_table = cm3232_id,
- .probe_new = cm3232_probe,
+ .probe = cm3232_probe,
.remove = cm3232_remove,
};
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index e5ce7d0fd272..35d20207a648 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -266,7 +266,7 @@ static struct i2c_driver cm3323_driver = {
.name = CM3323_DRV_NAME,
.of_match_table = cm3323_of_match,
},
- .probe_new = cm3323_probe,
+ .probe = cm3323_probe,
.id_table = cm3323_id,
};
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 1707dbf2ce26..97e559acba2b 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -730,7 +730,7 @@ static struct i2c_driver cm36651_driver = {
.name = "cm36651",
.of_match_table = cm36651_of_match,
},
- .probe_new = cm36651_probe,
+ .probe = cm36651_probe,
.remove = cm36651_remove,
.id_table = cm36651_id,
};
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index c0430db0038a..fec10d5e037e 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -710,7 +710,7 @@ static struct i2c_driver gp2ap002_driver = {
.of_match_table = gp2ap002_of_match,
.pm = pm_ptr(&gp2ap002_dev_pm_ops),
},
- .probe_new = gp2ap002_probe,
+ .probe = gp2ap002_probe,
.remove = gp2ap002_remove,
.id_table = gp2ap002_id_table,
};
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index a5bf9da0d2f3..9f41724819b6 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1609,7 +1609,7 @@ static struct i2c_driver gp2ap020a00f_driver = {
.name = GP2A_I2C_NAME,
.of_match_table = gp2ap020a00f_of_match,
},
- .probe_new = gp2ap020a00f_probe,
+ .probe = gp2ap020a00f_probe,
.remove = gp2ap020a00f_remove,
.id_table = gp2ap020a00f_id,
};
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 141845fb47f9..43484c18b101 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -865,7 +865,7 @@ static struct i2c_driver isl29018_driver = {
.pm = pm_sleep_ptr(&isl29018_pm_ops),
.of_match_table = isl29018_of_match,
},
- .probe_new = isl29018_probe,
+ .probe = isl29018_probe,
.id_table = isl29018_id,
};
module_i2c_driver(isl29018_driver);
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index bcf3a556e41a..5694683389be 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -698,7 +698,7 @@ static struct i2c_driver isl29028_driver = {
.pm = pm_ptr(&isl29028_pm_ops),
.of_match_table = isl29028_of_match,
},
- .probe_new = isl29028_probe,
+ .probe = isl29028_probe,
.remove = isl29028_remove,
.id_table = isl29028_id,
};
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index b4bd656ca169..f1d3356d3369 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -337,7 +337,7 @@ static struct i2c_driver isl29125_driver = {
.name = ISL29125_DRV_NAME,
.pm = pm_sleep_ptr(&isl29125_pm_ops),
},
- .probe_new = isl29125_probe,
+ .probe = isl29125_probe,
.remove = isl29125_remove,
.id_table = isl29125_id,
};
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index d3834d0a0635..37e2807041a1 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -440,7 +440,7 @@ static struct i2c_driver jsa1212_driver = {
.pm = pm_sleep_ptr(&jsa1212_pm_ops),
.acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
},
- .probe_new = jsa1212_probe,
+ .probe = jsa1212_probe,
.remove = jsa1212_remove,
.id_table = jsa1212_id,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index bdbd918213e4..061c122fdc5e 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1641,7 +1641,7 @@ static struct i2c_driver ltr501_driver = {
.pm = pm_sleep_ptr(&ltr501_pm_ops),
.acpi_match_table = ACPI_PTR(ltr_acpi_match),
},
- .probe_new = ltr501_probe,
+ .probe = ltr501_probe,
.remove = ltr501_remove,
.id_table = ltr501_id,
};
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 4b8ef36b6912..8de4dd849936 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -539,7 +539,7 @@ static struct i2c_driver ltrf216a_driver = {
.pm = pm_ptr(&ltrf216a_pm_ops),
.of_match_table = ltrf216a_of_match,
},
- .probe_new = ltrf216a_probe,
+ .probe = ltrf216a_probe,
.id_table = ltrf216a_id,
};
module_i2c_driver(ltrf216a_driver);
diff --git a/drivers/iio/light/lv0104cs.c b/drivers/iio/light/lv0104cs.c
index c041fa0faa5d..a5445d58fddf 100644
--- a/drivers/iio/light/lv0104cs.c
+++ b/drivers/iio/light/lv0104cs.c
@@ -520,7 +520,7 @@ static struct i2c_driver lv0104cs_i2c_driver = {
.name = "lv0104cs",
},
.id_table = lv0104cs_id,
- .probe_new = lv0104cs_probe,
+ .probe = lv0104cs_probe,
};
module_i2c_driver(lv0104cs_i2c_driver);
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 5dcabc43a30e..db96c5b73100 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -616,7 +616,7 @@ static struct i2c_driver max44000_driver = {
.name = MAX44000_DRV_NAME,
.acpi_match_table = ACPI_PTR(max44000_acpi_match),
},
- .probe_new = max44000_probe,
+ .probe = max44000_probe,
.id_table = max44000_id,
};
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 176dcad6e8e8..61ce276e86f7 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -544,7 +544,7 @@ static struct i2c_driver max44009_driver = {
.name = MAX44009_DRV_NAME,
.of_match_table = max44009_of_match,
},
- .probe_new = max44009_probe,
+ .probe = max44009_probe,
.id_table = max44009_id,
};
module_i2c_driver(max44009_driver);
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
index eaf548d4649e..1574310020e3 100644
--- a/drivers/iio/light/noa1305.c
+++ b/drivers/iio/light/noa1305.c
@@ -278,7 +278,7 @@ static struct i2c_driver noa1305_driver = {
.name = NOA1305_DRIVER_NAME,
.of_match_table = noa1305_of_match,
},
- .probe_new = noa1305_probe,
+ .probe = noa1305_probe,
.id_table = noa1305_ids,
};
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index ec4f5c2369c4..cb41e5ee8ec1 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -834,7 +834,7 @@ static const struct of_device_id opt3001_of_match[] = {
MODULE_DEVICE_TABLE(of, opt3001_of_match);
static struct i2c_driver opt3001_driver = {
- .probe_new = opt3001_probe,
+ .probe = opt3001_probe,
.remove = opt3001_remove,
.id_table = opt3001_id,
diff --git a/drivers/iio/light/opt4001.c b/drivers/iio/light/opt4001.c
new file mode 100644
index 000000000000..502946bf9f94
--- /dev/null
+++ b/drivers/iio/light/opt4001.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Axis Communications AB
+ *
+ * Datasheet: https://www.ti.com/lit/gpn/opt4001
+ *
+ * Device driver for the Texas Instruments OPT4001.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+/* OPT4001 register set */
+#define OPT4001_LIGHT1_MSB 0x00
+#define OPT4001_LIGHT1_LSB 0x01
+#define OPT4001_CTRL 0x0A
+#define OPT4001_DEVICE_ID 0x11
+
+/* OPT4001 register mask */
+#define OPT4001_EXPONENT_MASK GENMASK(15, 12)
+#define OPT4001_MSB_MASK GENMASK(11, 0)
+#define OPT4001_LSB_MASK GENMASK(15, 8)
+#define OPT4001_COUNTER_MASK GENMASK(7, 4)
+#define OPT4001_CRC_MASK GENMASK(3, 0)
+
+/* OPT4001 device id mask */
+#define OPT4001_DEVICE_ID_MASK GENMASK(11, 0)
+
+/* OPT4001 control registers mask */
+#define OPT4001_CTRL_QWAKE_MASK GENMASK(15, 15)
+#define OPT4001_CTRL_RANGE_MASK GENMASK(13, 10)
+#define OPT4001_CTRL_CONV_TIME_MASK GENMASK(9, 6)
+#define OPT4001_CTRL_OPER_MODE_MASK GENMASK(5, 4)
+#define OPT4001_CTRL_LATCH_MASK GENMASK(3, 3)
+#define OPT4001_CTRL_INT_POL_MASK GENMASK(2, 2)
+#define OPT4001_CTRL_FAULT_COUNT GENMASK(0, 1)
+
+/* OPT4001 constants */
+#define OPT4001_DEVICE_ID_VAL 0x121
+
+/* OPT4001 operating modes */
+#define OPT4001_CTRL_OPER_MODE_OFF 0x0
+#define OPT4001_CTRL_OPER_MODE_FORCED 0x1
+#define OPT4001_CTRL_OPER_MODE_ONE_SHOT 0x2
+#define OPT4001_CTRL_OPER_MODE_CONTINUOUS 0x3
+
+/* OPT4001 conversion control register definitions */
+#define OPT4001_CTRL_CONVERSION_0_6MS 0x0
+#define OPT4001_CTRL_CONVERSION_1MS 0x1
+#define OPT4001_CTRL_CONVERSION_1_8MS 0x2
+#define OPT4001_CTRL_CONVERSION_3_4MS 0x3
+#define OPT4001_CTRL_CONVERSION_6_5MS 0x4
+#define OPT4001_CTRL_CONVERSION_12_7MS 0x5
+#define OPT4001_CTRL_CONVERSION_25MS 0x6
+#define OPT4001_CTRL_CONVERSION_50MS 0x7
+#define OPT4001_CTRL_CONVERSION_100MS 0x8
+#define OPT4001_CTRL_CONVERSION_200MS 0x9
+#define OPT4001_CTRL_CONVERSION_400MS 0xa
+#define OPT4001_CTRL_CONVERSION_800MS 0xb
+
+/* OPT4001 scale light level range definitions */
+#define OPT4001_CTRL_LIGHT_SCALE_AUTO 12
+
+/* OPT4001 default values */
+#define OPT4001_DEFAULT_CONVERSION_TIME OPT4001_CTRL_CONVERSION_800MS
+
+/*
+ * The different packaging of OPT4001 has different constants used when calculating
+ * lux values.
+ */
+struct opt4001_chip_info {
+ int mul;
+ int div;
+ const char *name;
+};
+
+struct opt4001_chip {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ u8 int_time;
+ const struct opt4001_chip_info *chip_info;
+};
+
+static const struct opt4001_chip_info opt4001_sot_5x3_info = {
+ .mul = 4375,
+ .div = 10000000,
+ .name = "opt4001-sot-5x3"
+};
+
+static const struct opt4001_chip_info opt4001_picostar_info = {
+ .mul = 3125,
+ .div = 10000000,
+ .name = "opt4001-picostar"
+};
+
+static const int opt4001_int_time_available[][2] = {
+ { 0, 600 },
+ { 0, 1000 },
+ { 0, 1800 },
+ { 0, 3400 },
+ { 0, 6500 },
+ { 0, 12700 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 400000 },
+ { 0, 800000 },
+};
+
+/*
+ * Conversion time is integration time + time to set register
+ * this is used as integration time.
+ */
+static const int opt4001_int_time_reg[][2] = {
+ { 600, OPT4001_CTRL_CONVERSION_0_6MS },
+ { 1000, OPT4001_CTRL_CONVERSION_1MS },
+ { 1800, OPT4001_CTRL_CONVERSION_1_8MS },
+ { 3400, OPT4001_CTRL_CONVERSION_3_4MS },
+ { 6500, OPT4001_CTRL_CONVERSION_6_5MS },
+ { 12700, OPT4001_CTRL_CONVERSION_12_7MS },
+ { 25000, OPT4001_CTRL_CONVERSION_25MS },
+ { 50000, OPT4001_CTRL_CONVERSION_50MS },
+ { 100000, OPT4001_CTRL_CONVERSION_100MS },
+ { 200000, OPT4001_CTRL_CONVERSION_200MS },
+ { 400000, OPT4001_CTRL_CONVERSION_400MS },
+ { 800000, OPT4001_CTRL_CONVERSION_800MS },
+};
+
+static int opt4001_als_time_to_index(const u32 als_integration_time)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt4001_int_time_available); i++) {
+ if (als_integration_time == opt4001_int_time_available[i][1])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static u8 opt4001_calculate_crc(u8 exp, u32 mantissa, u8 count)
+{
+ u8 crc;
+
+ crc = (hweight32(mantissa) + hweight32(exp) + hweight32(count)) % 2;
+ crc |= ((hweight32(mantissa & 0xAAAAA) + hweight32(exp & 0xA)
+ + hweight32(count & 0xA)) % 2) << 1;
+ crc |= ((hweight32(mantissa & 0x88888) + hweight32(exp & 0x8)
+ + hweight32(count & 0x8)) % 2) << 2;
+ crc |= (hweight32(mantissa & 0x80808) % 2) << 3;
+
+ return crc;
+}
+
+static int opt4001_read_lux_value(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ struct opt4001_chip *chip = iio_priv(indio_dev);
+ struct device *dev = &chip->client->dev;
+ unsigned int light1;
+ unsigned int light2;
+ u16 msb;
+ u16 lsb;
+ u8 exp;
+ u8 count;
+ u8 crc;
+ u8 calc_crc;
+ u64 lux_raw;
+ int ret;
+
+ ret = regmap_read(chip->regmap, OPT4001_LIGHT1_MSB, &light1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read data bytes");
+ return ret;
+ }
+
+ ret = regmap_read(chip->regmap, OPT4001_LIGHT1_LSB, &light2);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read data bytes");
+ return ret;
+ }
+
+ count = FIELD_GET(OPT4001_COUNTER_MASK, light2);
+ exp = FIELD_GET(OPT4001_EXPONENT_MASK, light1);
+ crc = FIELD_GET(OPT4001_CRC_MASK, light2);
+ msb = FIELD_GET(OPT4001_MSB_MASK, light1);
+ lsb = FIELD_GET(OPT4001_LSB_MASK, light2);
+ lux_raw = (msb << 8) + lsb;
+ calc_crc = opt4001_calculate_crc(exp, lux_raw, count);
+ if (calc_crc != crc)
+ return -EIO;
+
+ lux_raw = lux_raw << exp;
+ lux_raw = lux_raw * chip->chip_info->mul;
+ *val = div_u64_rem(lux_raw, chip->chip_info->div, val2);
+ *val2 = *val2 * 100;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int opt4001_set_conf(struct opt4001_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+ u16 reg;
+ int ret;
+
+ reg = FIELD_PREP(OPT4001_CTRL_RANGE_MASK, OPT4001_CTRL_LIGHT_SCALE_AUTO);
+ reg |= FIELD_PREP(OPT4001_CTRL_CONV_TIME_MASK, chip->int_time);
+ reg |= FIELD_PREP(OPT4001_CTRL_OPER_MODE_MASK, OPT4001_CTRL_OPER_MODE_CONTINUOUS);
+
+ ret = regmap_write(chip->regmap, OPT4001_CTRL, reg);
+ if (ret)
+ dev_err(dev, "Failed to set configuration\n");
+
+ return ret;
+}
+
+static int opt4001_power_down(struct opt4001_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(chip->regmap, OPT4001_DEVICE_ID, &reg);
+ if (ret) {
+ dev_err(dev, "Failed to read configuration\n");
+ return ret;
+ }
+
+ /* MODE_OFF is 0x0 so just set bits to 0 */
+ reg &= ~OPT4001_CTRL_OPER_MODE_MASK;
+
+ ret = regmap_write(chip->regmap, OPT4001_CTRL, reg);
+ if (ret)
+ dev_err(dev, "Failed to set configuration to power down\n");
+
+ return ret;
+}
+
+static void opt4001_chip_off_action(void *data)
+{
+ struct opt4001_chip *chip = data;
+
+ opt4001_power_down(chip);
+}
+
+static const struct iio_chan_spec opt4001_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME)
+ },
+};
+
+static int opt4001_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct opt4001_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ return opt4001_read_lux_value(indio_dev, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = opt4001_int_time_reg[chip->int_time][0];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4001_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct opt4001_chip *chip = iio_priv(indio_dev);
+ int int_time;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ int_time = opt4001_als_time_to_index(val2);
+ if (int_time < 0)
+ return int_time;
+ chip->int_time = int_time;
+ return opt4001_set_conf(chip);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4001_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(opt4001_int_time_available) * 2;
+ *vals = (const int *)opt4001_int_time_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info opt4001_info_no_irq = {
+ .read_raw = opt4001_read_raw,
+ .write_raw = opt4001_write_raw,
+ .read_avail = opt4001_read_available,
+};
+
+static int opt4001_load_defaults(struct opt4001_chip *chip)
+{
+ chip->int_time = OPT4001_DEFAULT_CONVERSION_TIME;
+
+ return opt4001_set_conf(chip);
+}
+
+static bool opt4001_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case OPT4001_LIGHT1_MSB:
+ case OPT4001_LIGHT1_LSB:
+ case OPT4001_CTRL:
+ case OPT4001_DEVICE_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool opt4001_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case OPT4001_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool opt4001_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case OPT4001_LIGHT1_MSB:
+ case OPT4001_LIGHT1_LSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config opt4001_regmap_config = {
+ .name = "opt4001",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = OPT4001_DEVICE_ID,
+ .readable_reg = opt4001_readable_reg,
+ .writeable_reg = opt4001_writable_reg,
+ .volatile_reg = opt4001_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static int opt4001_probe(struct i2c_client *client)
+{
+ struct opt4001_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+ uint dev_id;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+
+ ret = devm_regulator_get_enable(&client->dev, "vdd");
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to enable vdd supply\n");
+
+ chip->regmap = devm_regmap_init_i2c(client, &opt4001_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->regmap),
+ "regmap initialization failed\n");
+ chip->client = client;
+
+ indio_dev->info = &opt4001_info_no_irq;
+
+ ret = regmap_reinit_cache(chip->regmap, &opt4001_regmap_config);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to reinit regmap cache\n");
+
+ ret = regmap_read(chip->regmap, OPT4001_DEVICE_ID, &dev_id);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read the device ID register\n");
+
+ dev_id = FIELD_GET(OPT4001_DEVICE_ID_MASK, dev_id);
+ if (dev_id != OPT4001_DEVICE_ID_VAL)
+ dev_warn(&client->dev, "Device ID: %#04x unknown\n", dev_id);
+
+ chip->chip_info = device_get_match_data(&client->dev);
+
+ indio_dev->channels = opt4001_channels;
+ indio_dev->num_channels = ARRAY_SIZE(opt4001_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->chip_info->name;
+
+ ret = opt4001_load_defaults(chip);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to set sensor defaults\n");
+
+ ret = devm_add_action_or_reset(&client->dev,
+ opt4001_chip_off_action,
+ chip);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to setup power off action\n");
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+/*
+ * The compatible string determines which constants to use depending on
+ * opt4001 packaging
+ */
+static const struct i2c_device_id opt4001_id[] = {
+ { "opt4001-sot-5x3", (kernel_ulong_t)&opt4001_sot_5x3_info },
+ { "opt4001-picostar", (kernel_ulong_t)&opt4001_picostar_info },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, opt4001_id);
+
+static const struct of_device_id opt4001_of_match[] = {
+ { .compatible = "ti,opt4001-sot-5x3", .data = &opt4001_sot_5x3_info},
+ { .compatible = "ti,opt4001-picostar", .data = &opt4001_picostar_info},
+ {}
+};
+MODULE_DEVICE_TABLE(of, opt4001_of_match);
+
+static struct i2c_driver opt4001_driver = {
+ .driver = {
+ .name = "opt4001",
+ .of_match_table = opt4001_of_match,
+ },
+ .probe = opt4001_probe,
+ .id_table = opt4001_id,
+};
+module_i2c_driver(opt4001_driver);
+
+MODULE_AUTHOR("Stefan Windfeldt-Prytz <stefan.windfeldt-prytz@axis.com>");
+MODULE_DESCRIPTION("Texas Instruments opt4001 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 15a666f15c27..ed241598aefb 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -474,7 +474,7 @@ static struct i2c_driver pa12203001_driver = {
.pm = &pa12203001_pm_ops,
.acpi_match_table = ACPI_PTR(pa12203001_acpi_match),
},
- .probe_new = pa12203001_probe,
+ .probe = pa12203001_probe,
.remove = pa12203001_remove,
.id_table = pa12203001_id,
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
new file mode 100644
index 000000000000..489902bed7f0
--- /dev/null
+++ b/drivers/iio/light/rohm-bu27008.c
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BU27008 ROHM Colour Sensor
+ *
+ * Copyright (c) 2023, ROHM Semiconductor.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/iio-gts-helper.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BU27008_REG_SYSTEM_CONTROL 0x40
+#define BU27008_MASK_SW_RESET BIT(7)
+#define BU27008_MASK_PART_ID GENMASK(5, 0)
+#define BU27008_ID 0x1a
+#define BU27008_REG_MODE_CONTROL1 0x41
+#define BU27008_MASK_MEAS_MODE GENMASK(2, 0)
+#define BU27008_MASK_CHAN_SEL GENMASK(3, 2)
+
+#define BU27008_REG_MODE_CONTROL2 0x42
+#define BU27008_MASK_RGBC_GAIN GENMASK(7, 3)
+#define BU27008_MASK_IR_GAIN_LO GENMASK(2, 0)
+#define BU27008_SHIFT_IR_GAIN 3
+
+#define BU27008_REG_MODE_CONTROL3 0x43
+#define BU27008_MASK_VALID BIT(7)
+#define BU27008_MASK_INT_EN BIT(1)
+#define BU27008_INT_EN BU27008_MASK_INT_EN
+#define BU27008_INT_DIS 0
+#define BU27008_MASK_MEAS_EN BIT(0)
+#define BU27008_MEAS_EN BIT(0)
+#define BU27008_MEAS_DIS 0
+
+#define BU27008_REG_DATA0_LO 0x50
+#define BU27008_REG_DATA1_LO 0x52
+#define BU27008_REG_DATA2_LO 0x54
+#define BU27008_REG_DATA3_LO 0x56
+#define BU27008_REG_DATA3_HI 0x57
+#define BU27008_REG_MANUFACTURER_ID 0x92
+#define BU27008_REG_MAX BU27008_REG_MANUFACTURER_ID
+
+/**
+ * enum bu27008_chan_type - BU27008 channel types
+ * @BU27008_RED: Red channel. Always via data0.
+ * @BU27008_GREEN: Green channel. Always via data1.
+ * @BU27008_BLUE: Blue channel. Via data2 (when used).
+ * @BU27008_CLEAR: Clear channel. Via data2 or data3 (when used).
+ * @BU27008_IR: IR channel. Via data3 (when used).
+ * @BU27008_NUM_CHANS: Number of channel types.
+ */
+enum bu27008_chan_type {
+ BU27008_RED,
+ BU27008_GREEN,
+ BU27008_BLUE,
+ BU27008_CLEAR,
+ BU27008_IR,
+ BU27008_NUM_CHANS
+};
+
+/**
+ * enum bu27008_chan - BU27008 physical data channel
+ * @BU27008_DATA0: Always red.
+ * @BU27008_DATA1: Always green.
+ * @BU27008_DATA2: Blue or clear.
+ * @BU27008_DATA3: IR or clear.
+ * @BU27008_NUM_HW_CHANS: Number of physical channels
+ */
+enum bu27008_chan {
+ BU27008_DATA0,
+ BU27008_DATA1,
+ BU27008_DATA2,
+ BU27008_DATA3,
+ BU27008_NUM_HW_CHANS
+};
+
+/* We can always measure red and green at same time */
+#define ALWAYS_SCANNABLE (BIT(BU27008_RED) | BIT(BU27008_GREEN))
+
+/* We use these data channel configs. Ensure scan_masks below follow them too */
+#define BU27008_BLUE2_CLEAR3 0x0 /* buffer is R, G, B, C */
+#define BU27008_CLEAR2_IR3 0x1 /* buffer is R, G, C, IR */
+#define BU27008_BLUE2_IR3 0x2 /* buffer is R, G, B, IR */
+
+static const unsigned long bu27008_scan_masks[] = {
+ /* buffer is R, G, B, C */
+ ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_CLEAR),
+ /* buffer is R, G, C, IR */
+ ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR),
+ /* buffer is R, G, B, IR */
+ ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR),
+ 0
+};
+
+/*
+ * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS
+ * Time impacts to gain: 1x, 2x, 4x, 8x.
+ *
+ * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192
+ *
+ * Max amplification is (HWGAIN * MAX integration-time multiplier) 1024 * 8
+ * = 8192. With NANO scale we get rid of accuracy loss when we start with the
+ * scale 16.0 for HWGAIN1, INT-TIME 55 mS. This way the nano scale for MAX
+ * total gain 8192 will be 1953125
+ */
+#define BU27008_SCALE_1X 16
+
+/* See the data sheet for the "Gain Setting" table */
+#define BU27008_GSEL_1X 0x00
+#define BU27008_GSEL_4X 0x08
+#define BU27008_GSEL_8X 0x09
+#define BU27008_GSEL_16X 0x0a
+#define BU27008_GSEL_32X 0x0b
+#define BU27008_GSEL_64X 0x0c
+#define BU27008_GSEL_256X 0x18
+#define BU27008_GSEL_512X 0x19
+#define BU27008_GSEL_1024X 0x1a
+
+static const struct iio_gain_sel_pair bu27008_gains[] = {
+ GAIN_SCALE_GAIN(1, BU27008_GSEL_1X),
+ GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
+ GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
+ GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
+ GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
+ GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
+ GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
+ GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
+ GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
+};
+
+static const struct iio_gain_sel_pair bu27008_gains_ir[] = {
+ GAIN_SCALE_GAIN(2, BU27008_GSEL_1X),
+ GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
+ GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
+ GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
+ GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
+ GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
+ GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
+ GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
+ GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
+};
+
+#define BU27008_MEAS_MODE_100MS 0x00
+#define BU27008_MEAS_MODE_55MS 0x01
+#define BU27008_MEAS_MODE_200MS 0x02
+#define BU27008_MEAS_MODE_400MS 0x04
+#define BU27008_MEAS_TIME_MAX_MS 400
+
+static const struct iio_itime_sel_mul bu27008_itimes[] = {
+ GAIN_SCALE_ITIME_US(400000, BU27008_MEAS_MODE_400MS, 8),
+ GAIN_SCALE_ITIME_US(200000, BU27008_MEAS_MODE_200MS, 4),
+ GAIN_SCALE_ITIME_US(100000, BU27008_MEAS_MODE_100MS, 2),
+ GAIN_SCALE_ITIME_US(55000, BU27008_MEAS_MODE_55MS, 1),
+};
+
+/*
+ * All the RGBC channels share the same gain.
+ * IR gain can be fine-tuned from the gain set for the RGBC by 2 bit, but this
+ * would yield quite complex gain setting. Especially since not all bit
+ * compinations are supported. And in any case setting GAIN for RGBC will
+ * always also change the IR-gain.
+ *
+ * On top of this, the selector '0' which corresponds to hw-gain 1X on RGBC,
+ * corresponds to gain 2X on IR. Rest of the selctors correspond to same gains
+ * though. This, however, makes it not possible to use shared gain for all
+ * RGBC and IR settings even though they are all changed at the one go.
+ */
+#define BU27008_CHAN(color, data, separate_avail) \
+{ \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_##color, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate_available = (separate_avail), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .address = BU27008_REG_##data##_LO, \
+ .scan_index = BU27008_##color, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+/* For raw reads we always configure DATA3 for CLEAR */
+static const struct iio_chan_spec bu27008_channels[] = {
+ BU27008_CHAN(RED, DATA0, BIT(IIO_CHAN_INFO_SCALE)),
+ BU27008_CHAN(GREEN, DATA1, BIT(IIO_CHAN_INFO_SCALE)),
+ BU27008_CHAN(BLUE, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
+ BU27008_CHAN(CLEAR, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
+ /*
+ * We don't allow setting scale for IR (because of shared gain bits).
+ * Hence we don't advertise available ones either.
+ */
+ BU27008_CHAN(IR, DATA3, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
+};
+
+struct bu27008_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ struct device *dev;
+ struct iio_gts gts;
+ struct iio_gts gts_ir;
+ int irq;
+
+ /*
+ * Prevent changing gain/time config when scale is read/written.
+ * Similarly, protect the integration_time read/change sequence.
+ * Prevent changing gain/time when data is read.
+ */
+ struct mutex mutex;
+};
+
+static const struct regmap_range bu27008_volatile_ranges[] = {
+ {
+ .range_min = BU27008_REG_SYSTEM_CONTROL, /* SWRESET */
+ .range_max = BU27008_REG_SYSTEM_CONTROL,
+ }, {
+ .range_min = BU27008_REG_MODE_CONTROL3, /* VALID */
+ .range_max = BU27008_REG_MODE_CONTROL3,
+ }, {
+ .range_min = BU27008_REG_DATA0_LO, /* DATA */
+ .range_max = BU27008_REG_DATA3_HI,
+ },
+};
+
+static const struct regmap_access_table bu27008_volatile_regs = {
+ .yes_ranges = &bu27008_volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bu27008_volatile_ranges),
+};
+
+static const struct regmap_range bu27008_read_only_ranges[] = {
+ {
+ .range_min = BU27008_REG_DATA0_LO,
+ .range_max = BU27008_REG_DATA3_HI,
+ }, {
+ .range_min = BU27008_REG_MANUFACTURER_ID,
+ .range_max = BU27008_REG_MANUFACTURER_ID,
+ },
+};
+
+static const struct regmap_access_table bu27008_ro_regs = {
+ .no_ranges = &bu27008_read_only_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bu27008_read_only_ranges),
+};
+
+static const struct regmap_config bu27008_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BU27008_REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &bu27008_volatile_regs,
+ .wr_table = &bu27008_ro_regs,
+ /*
+ * All register writes are serialized by the mutex which protects the
+ * scale setting/getting. This is needed because scale is combined by
+ * gain and integration time settings and we need to ensure those are
+ * not read / written when scale is being computed.
+ *
+ * As a result of this serializing, we don't need regmap locking. Note,
+ * this is not true if we add any configurations which are not
+ * serialized by the mutex and which may need for example a protected
+ * read-modify-write cycle (eg. regmap_update_bits()). Please, revise
+ * this when adding features to the driver.
+ */
+ .disable_locking = true,
+};
+
+#define BU27008_MAX_VALID_RESULT_WAIT_US 50000
+#define BU27008_VALID_RESULT_WAIT_QUANTA_US 1000
+
+static int bu27008_chan_read_data(struct bu27008_data *data, int reg, int *val)
+{
+ int ret, valid;
+ __le16 tmp;
+
+ ret = regmap_read_poll_timeout(data->regmap, BU27008_REG_MODE_CONTROL3,
+ valid, (valid & BU27008_MASK_VALID),
+ BU27008_VALID_RESULT_WAIT_QUANTA_US,
+ BU27008_MAX_VALID_RESULT_WAIT_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, reg, &tmp, sizeof(tmp));
+ if (ret)
+ dev_err(data->dev, "Reading channel data failed\n");
+
+ *val = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+static int bu27008_get_gain(struct bu27008_data *data, struct iio_gts *gts, int *gain)
+{
+ int ret, sel;
+
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, &sel);
+ if (ret)
+ return ret;
+
+ sel = FIELD_GET(BU27008_MASK_RGBC_GAIN, sel);
+
+ ret = iio_gts_find_gain_by_sel(gts, sel);
+ if (ret < 0) {
+ dev_err(data->dev, "unknown gain value 0x%x\n", sel);
+ return ret;
+ }
+
+ *gain = ret;
+
+ return 0;
+}
+
+static int bu27008_write_gain_sel(struct bu27008_data *data, int sel)
+{
+ int regval;
+
+ regval = FIELD_PREP(BU27008_MASK_RGBC_GAIN, sel);
+
+ /*
+ * We do always set also the LOW bits of IR-gain because othervice we
+ * would risk resulting an invalid GAIN register value.
+ *
+ * We could allow setting separate gains for RGBC and IR when the
+ * values were such that HW could support both gain settings.
+ * Eg, when the shared bits were same for both gain values.
+ *
+ * This, however, has a negligible benefit compared to the increased
+ * software complexity when we would need to go through the gains
+ * for both channels separately when the integration time changes.
+ * This would end up with nasty logic for computing gain values for
+ * both channels - and rejecting them if shared bits changed.
+ *
+ * We should then build the logic by guessing what a user prefers.
+ * RGBC or IR gains correctly set while other jumps to odd value?
+ * Maybe look-up a value where both gains are somehow optimized
+ * <what this somehow is, is ATM unknown to us>. Or maybe user would
+ * expect us to reject changes when optimal gains can't be set to both
+ * channels w/given integration time. At best that would result
+ * solution that works well for a very specific subset of
+ * configurations but causes unexpected corner-cases.
+ *
+ * So, we keep it simple. Always set same selector to IR and RGBC.
+ * We disallow setting IR (as I expect that most of the users are
+ * interested in RGBC). This way we can show the user that the scales
+ * for RGBC and IR channels are different (1X Vs 2X with sel 0) while
+ * still keeping the operation deterministic.
+ */
+ regval |= FIELD_PREP(BU27008_MASK_IR_GAIN_LO, sel);
+
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL2,
+ BU27008_MASK_RGBC_GAIN, regval);
+}
+
+static int bu27008_set_gain(struct bu27008_data *data, int gain)
+{
+ int ret;
+
+ ret = iio_gts_find_sel_by_gain(&data->gts, gain);
+ if (ret < 0)
+ return ret;
+
+ return bu27008_write_gain_sel(data, ret);
+}
+
+static int bu27008_get_int_time_sel(struct bu27008_data *data, int *sel)
+{
+ int ret, val;
+
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &val);
+ *sel = FIELD_GET(BU27008_MASK_MEAS_MODE, val);
+
+ return ret;
+}
+
+static int bu27008_set_int_time_sel(struct bu27008_data *data, int sel)
+{
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
+ BU27008_MASK_MEAS_MODE, sel);
+}
+
+static int bu27008_get_int_time_us(struct bu27008_data *data)
+{
+ int ret, sel;
+
+ ret = bu27008_get_int_time_sel(data, &sel);
+ if (ret)
+ return ret;
+
+ return iio_gts_find_int_time_by_sel(&data->gts, sel);
+}
+
+static int _bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
+ int *val2)
+{
+ struct iio_gts *gts;
+ int gain, ret;
+
+ if (ir)
+ gts = &data->gts_ir;
+ else
+ gts = &data->gts;
+
+ ret = bu27008_get_gain(data, gts, &gain);
+ if (ret)
+ return ret;
+
+ ret = bu27008_get_int_time_us(data);
+ if (ret < 0)
+ return ret;
+
+ return iio_gts_get_scale(gts, gain, ret, val, val2);
+}
+
+static int bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
+ int *val2)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = _bu27008_get_scale(data, ir, val, val2);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bu27008_set_int_time(struct bu27008_data *data, int time)
+{
+ int ret;
+
+ ret = iio_gts_find_sel_by_int_time(&data->gts, time);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
+ BU27008_MASK_MEAS_MODE, ret);
+}
+
+/* Try to change the time so that the scale is maintained */
+static int bu27008_try_set_int_time(struct bu27008_data *data, int int_time_new)
+{
+ int ret, old_time_sel, new_time_sel, old_gain, new_gain;
+
+ mutex_lock(&data->mutex);
+
+ ret = bu27008_get_int_time_sel(data, &old_time_sel);
+ if (ret < 0)
+ goto unlock_out;
+
+ if (!iio_gts_valid_time(&data->gts, int_time_new)) {
+ dev_dbg(data->dev, "Unsupported integration time %u\n",
+ int_time_new);
+
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ /* If we already use requested time, then we're done */
+ new_time_sel = iio_gts_find_sel_by_int_time(&data->gts, int_time_new);
+ if (new_time_sel == old_time_sel)
+ goto unlock_out;
+
+ ret = bu27008_get_gain(data, &data->gts, &old_gain);
+ if (ret)
+ goto unlock_out;
+
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&data->gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ if (ret) {
+ int scale1, scale2;
+ bool ok;
+
+ _bu27008_get_scale(data, false, &scale1, &scale2);
+ dev_dbg(data->dev,
+ "Can't support time %u with current scale %u %u\n",
+ int_time_new, scale1, scale2);
+
+ if (new_gain < 0)
+ goto unlock_out;
+
+ /*
+ * If caller requests for integration time change and we
+ * can't support the scale - then the caller should be
+ * prepared to 'pick up the pieces and deal with the
+ * fact that the scale changed'.
+ */
+ ret = iio_find_closest_gain_low(&data->gts, new_gain, &ok);
+ if (!ok)
+ dev_dbg(data->dev, "optimal gain out of range\n");
+
+ if (ret < 0) {
+ dev_dbg(data->dev,
+ "Total gain increase. Risk of saturation");
+ ret = iio_gts_get_min_gain(&data->gts);
+ if (ret < 0)
+ goto unlock_out;
+ }
+ new_gain = ret;
+ dev_dbg(data->dev, "scale changed, new gain %u\n", new_gain);
+ }
+
+ ret = bu27008_set_gain(data, new_gain);
+ if (ret)
+ goto unlock_out;
+
+ ret = bu27008_set_int_time(data, int_time_new);
+
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bu27008_meas_set(struct bu27008_data *data, int state)
+{
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
+ BU27008_MASK_MEAS_EN, state);
+}
+
+static int bu27008_chan_cfg(struct bu27008_data *data,
+ struct iio_chan_spec const *chan)
+{
+ int chan_sel;
+
+ if (chan->scan_index == BU27008_BLUE)
+ chan_sel = BU27008_BLUE2_CLEAR3;
+ else
+ chan_sel = BU27008_CLEAR2_IR3;
+
+ chan_sel = FIELD_PREP(BU27008_MASK_CHAN_SEL, chan_sel);
+
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
+ BU27008_MASK_CHAN_SEL, chan_sel);
+}
+
+static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
+ struct iio_chan_spec const *chan, int *val, int *val2)
+{
+ int ret, int_time;
+
+ ret = bu27008_chan_cfg(data, chan);
+ if (ret)
+ return ret;
+
+ ret = bu27008_meas_set(data, BU27008_MEAS_EN);
+ if (ret)
+ return ret;
+
+ ret = bu27008_get_int_time_us(data);
+ if (ret < 0)
+ int_time = BU27008_MEAS_TIME_MAX_MS;
+ else
+ int_time = ret / USEC_PER_MSEC;
+
+ msleep(int_time);
+
+ ret = bu27008_chan_read_data(data, chan->address, val);
+ if (!ret)
+ ret = IIO_VAL_INT;
+
+ if (bu27008_meas_set(data, BU27008_MEAS_DIS))
+ dev_warn(data->dev, "measurement disabling failed\n");
+
+ return ret;
+}
+
+static int bu27008_read_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bu27008_data *data = iio_priv(idev);
+ int busy, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ busy = iio_device_claim_direct_mode(idev);
+ if (busy)
+ return -EBUSY;
+
+ mutex_lock(&data->mutex);
+ ret = bu27008_read_one(data, idev, chan, val, val2);
+ mutex_unlock(&data->mutex);
+
+ iio_device_release_direct_mode(idev);
+
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR,
+ val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = bu27008_get_int_time_us(data);
+ if (ret < 0)
+ return ret;
+
+ *val = 0;
+ *val2 = ret;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Called if the new scale could not be supported with existing int-time */
+static int bu27008_try_find_new_time_gain(struct bu27008_data *data, int val,
+ int val2, int *gain_sel)
+{
+ int i, ret, new_time_sel;
+
+ for (i = 0; i < data->gts.num_itime; i++) {
+ new_time_sel = data->gts.itime_table[i].sel;
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
+ new_time_sel, val, val2 * 1000, gain_sel);
+ if (!ret)
+ break;
+ }
+ if (i == data->gts.num_itime) {
+ dev_err(data->dev, "Can't support scale %u %u\n", val, val2);
+
+ return -EINVAL;
+ }
+
+ return bu27008_set_int_time_sel(data, new_time_sel);
+}
+
+static int bu27008_set_scale(struct bu27008_data *data,
+ struct iio_chan_spec const *chan,
+ int val, int val2)
+{
+ int ret, gain_sel, time_sel;
+
+ if (chan->scan_index == BU27008_IR)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = bu27008_get_int_time_sel(data, &time_sel);
+ if (ret < 0)
+ goto unlock_out;
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
+ val, val2 * 1000, &gain_sel);
+ if (ret) {
+ ret = bu27008_try_find_new_time_gain(data, val, val2, &gain_sel);
+ if (ret)
+ goto unlock_out;
+
+ }
+ ret = bu27008_write_gain_sel(data, gain_sel);
+
+unlock_out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bu27008_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bu27008_data *data = iio_priv(idev);
+ int ret;
+
+ /*
+ * Do not allow changing scale when measurement is ongoing as doing so
+ * could make values in the buffer inconsistent.
+ */
+ ret = iio_device_claim_direct_mode(idev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = bu27008_set_scale(data, chan, val, val2);
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = bu27008_try_set_int_time(data, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ iio_device_release_direct_mode(idev);
+
+ return ret;
+}
+
+static int bu27008_read_avail(struct iio_dev *idev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ struct bu27008_data *data = iio_priv(idev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return iio_gts_avail_times(&data->gts, vals, type, length);
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->channel2 == IIO_MOD_LIGHT_IR)
+ return iio_gts_all_avail_scales(&data->gts_ir, vals,
+ type, length);
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bu27008_update_scan_mode(struct iio_dev *idev,
+ const unsigned long *scan_mask)
+{
+ struct bu27008_data *data = iio_priv(idev);
+ int chan_sel;
+
+ /* Configure channel selection */
+ if (test_bit(BU27008_BLUE, idev->active_scan_mask)) {
+ if (test_bit(BU27008_CLEAR, idev->active_scan_mask))
+ chan_sel = BU27008_BLUE2_CLEAR3;
+ else
+ chan_sel = BU27008_BLUE2_IR3;
+ } else {
+ chan_sel = BU27008_CLEAR2_IR3;
+ }
+
+ chan_sel = FIELD_PREP(BU27008_MASK_CHAN_SEL, chan_sel);
+
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
+ BU27008_MASK_CHAN_SEL, chan_sel);
+}
+
+static const struct iio_info bu27008_info = {
+ .read_raw = &bu27008_read_raw,
+ .write_raw = &bu27008_write_raw,
+ .read_avail = &bu27008_read_avail,
+ .update_scan_mode = bu27008_update_scan_mode,
+ .validate_trigger = iio_validate_own_trigger,
+};
+
+static int bu27008_chip_init(struct bu27008_data *data)
+{
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
+ BU27008_MASK_SW_RESET, BU27008_MASK_SW_RESET);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
+
+ /*
+ * The data-sheet does not tell how long performing the IC reset takes.
+ * However, the data-sheet says the minimum time it takes the IC to be
+ * able to take inputs after power is applied, is 100 uS. I'd assume
+ * > 1 mS is enough.
+ */
+ msleep(1);
+
+ ret = regmap_reinit_cache(data->regmap, &bu27008_regmap);
+ if (ret)
+ dev_err(data->dev, "Failed to reinit reg cache\n");
+
+ return ret;
+}
+
+static int bu27008_set_drdy_irq(struct bu27008_data *data, int state)
+{
+ return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
+ BU27008_MASK_INT_EN, state);
+}
+
+static int bu27008_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct bu27008_data *data = iio_trigger_get_drvdata(trig);
+ int ret;
+
+ if (state)
+ ret = bu27008_set_drdy_irq(data, BU27008_INT_EN);
+ else
+ ret = bu27008_set_drdy_irq(data, BU27008_INT_DIS);
+ if (ret)
+ dev_err(data->dev, "Failed to set trigger state\n");
+
+ return ret;
+}
+
+static void bu27008_trigger_reenable(struct iio_trigger *trig)
+{
+ struct bu27008_data *data = iio_trigger_get_drvdata(trig);
+
+ enable_irq(data->irq);
+}
+
+static const struct iio_trigger_ops bu27008_trigger_ops = {
+ .set_trigger_state = bu27008_trigger_set_state,
+ .reenable = bu27008_trigger_reenable,
+};
+
+static irqreturn_t bu27008_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct bu27008_data *data = iio_priv(idev);
+ struct {
+ __le16 chan[BU27008_NUM_HW_CHANS];
+ s64 ts __aligned(8);
+ } raw;
+ int ret, dummy;
+
+ memset(&raw, 0, sizeof(raw));
+
+ /*
+ * After some measurements, it seems reading the
+ * BU27008_REG_MODE_CONTROL3 debounces the IRQ line
+ */
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL3, &dummy);
+ if (ret < 0)
+ goto err_read;
+
+ ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, &raw.chan,
+ sizeof(raw.chan));
+ if (ret < 0)
+ goto err_read;
+
+ iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
+err_read:
+ iio_trigger_notify_done(idev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bu27008_buffer_preenable(struct iio_dev *idev)
+{
+ struct bu27008_data *data = iio_priv(idev);
+
+ return bu27008_meas_set(data, BU27008_MEAS_EN);
+}
+
+static int bu27008_buffer_postdisable(struct iio_dev *idev)
+{
+ struct bu27008_data *data = iio_priv(idev);
+
+ return bu27008_meas_set(data, BU27008_MEAS_DIS);
+}
+
+static const struct iio_buffer_setup_ops bu27008_buffer_ops = {
+ .preenable = bu27008_buffer_preenable,
+ .postdisable = bu27008_buffer_postdisable,
+};
+
+static irqreturn_t bu27008_data_rdy_poll(int irq, void *private)
+{
+ /*
+ * The BU27008 keeps IRQ asserted until we read the VALID bit from
+ * a register. We need to keep the IRQ disabled until then.
+ */
+ disable_irq_nosync(irq);
+ iio_trigger_poll(private);
+
+ return IRQ_HANDLED;
+}
+
+static int bu27008_setup_trigger(struct bu27008_data *data, struct iio_dev *idev)
+{
+ struct iio_trigger *itrig;
+ char *name;
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(data->dev, idev,
+ &iio_pollfunc_store_time,
+ bu27008_trigger_handler,
+ &bu27008_buffer_ops);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "iio_triggered_buffer_setup_ext FAIL\n");
+
+ itrig = devm_iio_trigger_alloc(data->dev, "%sdata-rdy-dev%d",
+ idev->name, iio_device_id(idev));
+ if (!itrig)
+ return -ENOMEM;
+
+ data->trig = itrig;
+
+ itrig->ops = &bu27008_trigger_ops;
+ iio_trigger_set_drvdata(itrig, data);
+
+ name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-bu27008",
+ dev_name(data->dev));
+
+ ret = devm_request_irq(data->dev, data->irq,
+ &bu27008_data_rdy_poll,
+ 0, name, itrig);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
+
+ ret = devm_iio_trigger_register(data->dev, itrig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Trigger registration failed\n");
+
+ /* set default trigger */
+ idev->trig = iio_trigger_get(itrig);
+
+ return 0;
+}
+
+static int bu27008_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct bu27008_data *data;
+ struct regmap *regmap;
+ unsigned int part_id, reg;
+ struct iio_dev *idev;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &bu27008_regmap);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize Regmap\n");
+
+ idev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!idev)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
+ data = iio_priv(idev);
+
+ ret = regmap_read(regmap, BU27008_REG_SYSTEM_CONTROL, &reg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to access sensor\n");
+
+ part_id = FIELD_GET(BU27008_MASK_PART_ID, reg);
+
+ if (part_id != BU27008_ID)
+ dev_warn(dev, "unknown device 0x%x\n", part_id);
+
+ ret = devm_iio_init_iio_gts(dev, BU27008_SCALE_1X, 0, bu27008_gains,
+ ARRAY_SIZE(bu27008_gains), bu27008_itimes,
+ ARRAY_SIZE(bu27008_itimes), &data->gts);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_init_iio_gts(dev, BU27008_SCALE_1X, 0, bu27008_gains_ir,
+ ARRAY_SIZE(bu27008_gains_ir), bu27008_itimes,
+ ARRAY_SIZE(bu27008_itimes), &data->gts_ir);
+ if (ret)
+ return ret;
+
+ mutex_init(&data->mutex);
+ data->regmap = regmap;
+ data->dev = dev;
+ data->irq = i2c->irq;
+
+ idev->channels = bu27008_channels;
+ idev->num_channels = ARRAY_SIZE(bu27008_channels);
+ idev->name = "bu27008";
+ idev->info = &bu27008_info;
+ idev->modes = INDIO_DIRECT_MODE;
+ idev->available_scan_masks = bu27008_scan_masks;
+
+ ret = bu27008_chip_init(data);
+ if (ret)
+ return ret;
+
+ if (i2c->irq) {
+ ret = bu27008_setup_trigger(data, idev);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(dev, "No IRQ, buffered mode disabled\n");
+ }
+
+ ret = devm_iio_device_register(dev, idev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Unable to register iio device\n");
+
+ return 0;
+}
+
+static const struct of_device_id bu27008_of_match[] = {
+ { .compatible = "rohm,bu27008" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bu27008_of_match);
+
+static struct i2c_driver bu27008_i2c_driver = {
+ .driver = {
+ .name = "bu27008",
+ .of_match_table = bu27008_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = bu27008_probe,
+};
+module_i2c_driver(bu27008_i2c_driver);
+
+MODULE_DESCRIPTION("ROHM BU27008 colour sensor driver");
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index f85194fda6b0..e63ef5789cde 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1500,8 +1500,9 @@ static struct i2c_driver bu27034_i2c_driver = {
.driver = {
.name = "bu27034-als",
.of_match_table = bu27034_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = bu27034_probe,
+ .probe = bu27034_probe,
};
module_i2c_driver(bu27034_i2c_driver);
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 9d0218b7426e..bbb8581622f2 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -1121,7 +1121,7 @@ static struct i2c_driver rpr0521_driver = {
.pm = pm_ptr(&rpr0521_pm_ops),
.acpi_match_table = ACPI_PTR(rpr0521_acpi_match),
},
- .probe_new = rpr0521_probe,
+ .probe = rpr0521_probe,
.remove = rpr0521_remove,
.id_table = rpr0521_id,
};
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index a08fbc8f5adb..ea2c437199c0 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -1064,7 +1064,7 @@ static struct i2c_driver si1133_driver = {
.driver = {
.name = "si1133",
},
- .probe_new = si1133_probe,
+ .probe = si1133_probe,
.id_table = si1133_ids,
};
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index f7126235f94c..77666b780a5c 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -1352,7 +1352,7 @@ static struct i2c_driver si1145_driver = {
.driver = {
.name = "si1145",
},
- .probe_new = si1145_probe,
+ .probe = si1145_probe,
.id_table = si1145_ids,
};
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index 2160e87bb498..6bc2ddfb77ca 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -57,7 +57,7 @@ static struct i2c_driver st_uvis25_driver = {
.pm = pm_sleep_ptr(&st_uvis25_pm_ops),
.of_match_table = st_uvis25_i2c_of_match,
},
- .probe_new = st_uvis25_i2c_probe,
+ .probe = st_uvis25_i2c_probe,
.id_table = st_uvis25_i2c_id_table,
};
module_i2c_driver(st_uvis25_driver);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 48ae6ff0015e..72b08d870d33 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -714,7 +714,7 @@ static struct i2c_driver stk3310_driver = {
.pm = pm_sleep_ptr(&stk3310_pm_ops),
.acpi_match_table = ACPI_PTR(stk3310_acpi_id),
},
- .probe_new = stk3310_probe,
+ .probe = stk3310_probe,
.remove = stk3310_remove,
.id_table = stk3310_i2c_id,
};
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 5100732fbaf0..dcdd85b006be 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -373,7 +373,7 @@ static struct i2c_driver tcs3414_driver = {
.name = TCS3414_DRV_NAME,
.pm = pm_sleep_ptr(&tcs3414_pm_ops),
},
- .probe_new = tcs3414_probe,
+ .probe = tcs3414_probe,
.id_table = tcs3414_id,
};
module_i2c_driver(tcs3414_driver);
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 6187c5487916..75fcf2c93717 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -609,7 +609,7 @@ static struct i2c_driver tcs3472_driver = {
.name = TCS3472_DRV_NAME,
.pm = pm_sleep_ptr(&tcs3472_pm_ops),
},
- .probe_new = tcs3472_probe,
+ .probe = tcs3472_probe,
.remove = tcs3472_remove,
.id_table = tcs3472_id,
};
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index f2f55239a072..1a6f514bced6 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -862,7 +862,7 @@ static struct i2c_driver tsl2563_i2c_driver = {
.of_match_table = tsl2563_of_match,
.pm = pm_sleep_ptr(&tsl2563_pm_ops),
},
- .probe_new = tsl2563_probe,
+ .probe = tsl2563_probe,
.remove = tsl2563_remove,
.id_table = tsl2563_id,
};
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index a05f1c0453d1..02ad11611b9c 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -942,7 +942,7 @@ static struct i2c_driver tsl2583_driver = {
.of_match_table = tsl2583_of_match,
},
.id_table = tsl2583_idtable,
- .probe_new = tsl2583_probe,
+ .probe = tsl2583_probe,
.remove = tsl2583_remove,
};
module_i2c_driver(tsl2583_driver);
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
index e485a556e6da..7bdbfe72f0f0 100644
--- a/drivers/iio/light/tsl2591.c
+++ b/drivers/iio/light/tsl2591.c
@@ -1214,7 +1214,7 @@ static struct i2c_driver tsl2591_driver = {
.pm = pm_ptr(&tsl2591_pm_ops),
.of_match_table = tsl2591_of_match,
},
- .probe_new = tsl2591_probe
+ .probe = tsl2591_probe
};
module_i2c_driver(tsl2591_driver);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index e823c145f679..cab468a82b61 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -1932,7 +1932,7 @@ static struct i2c_driver tsl2772_driver = {
.pm = &tsl2772_pm_ops,
},
.id_table = tsl2772_idtable,
- .probe_new = tsl2772_probe,
+ .probe = tsl2772_probe,
};
module_i2c_driver(tsl2772_driver);
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index d95397eb1526..4da7d78906d4 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -237,7 +237,7 @@ static struct i2c_driver tsl4531_driver = {
.name = TSL4531_DRV_NAME,
.pm = pm_sleep_ptr(&tsl4531_pm_ops),
},
- .probe_new = tsl4531_probe,
+ .probe = tsl4531_probe,
.remove = tsl4531_remove,
.id_table = tsl4531_id,
};
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 8b2a0c99c8e6..61b3b2aea626 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -974,7 +974,7 @@ static struct i2c_driver us5182d_driver = {
.of_match_table = us5182d_of_match,
.acpi_match_table = ACPI_PTR(us5182d_acpi_match),
},
- .probe_new = us5182d_probe,
+ .probe = us5182d_probe,
.remove = us5182d_remove,
.id_table = us5182d_id,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 56d3963d3d66..7c7362e28821 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1500,7 +1500,7 @@ static struct i2c_driver vcnl4000_driver = {
.pm = pm_ptr(&vcnl4000_pm_ops),
.of_match_table = vcnl_4000_of_match,
},
- .probe_new = vcnl4000_probe,
+ .probe = vcnl4000_probe,
.id_table = vcnl4000_id,
.remove = vcnl4000_remove,
};
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 94f5d611e98c..56bbefbc0ae6 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -670,7 +670,7 @@ static struct i2c_driver vcnl4035_driver = {
.pm = pm_ptr(&vcnl4035_pm_ops),
.of_match_table = vcnl4035_of_match,
},
- .probe_new = vcnl4035_probe,
+ .probe = vcnl4035_probe,
.remove = vcnl4035_remove,
.id_table = vcnl4035_id,
};
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index e7d2d5d177d4..043f233d9bdb 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -892,7 +892,7 @@ static struct i2c_driver veml6030_driver = {
.of_match_table = veml6030_of_match,
.pm = pm_ptr(&veml6030_pm_ops),
},
- .probe_new = veml6030_probe,
+ .probe = veml6030_probe,
.id_table = veml6030_id,
};
module_i2c_driver(veml6030_driver);
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index ee76a68deb24..d99bf3ae0fe8 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -198,7 +198,7 @@ static struct i2c_driver veml6070_driver = {
.driver = {
.name = VEML6070_DRV_NAME,
},
- .probe_new = veml6070_probe,
+ .probe = veml6070_probe,
.remove = veml6070_remove,
.id_table = veml6070_id,
};
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 8b56df26c59e..d4948dfc31ff 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -538,7 +538,7 @@ static struct i2c_driver vl6180_driver = {
.name = VL6180_DRV_NAME,
.of_match_table = vl6180_of_match,
},
- .probe_new = vl6180_probe,
+ .probe = vl6180_probe,
.id_table = vl6180_id,
};
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index e3bac8b56380..d370193a4742 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -554,7 +554,7 @@ static struct i2c_driver zopt2201_driver = {
.driver = {
.name = ZOPT2201_DRV_NAME,
},
- .probe_new = zopt2201_probe,
+ .probe = zopt2201_probe,
.id_table = zopt2201_id,
};
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 45abdcce6bc0..c74d11943ec7 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -1046,7 +1046,7 @@ static struct i2c_driver ak8974_driver = {
.pm = pm_ptr(&ak8974_dev_pm_ops),
.of_match_table = ak8974_of_match,
},
- .probe_new = ak8974_probe,
+ .probe = ak8974_probe,
.remove = ak8974_remove,
.id_table = ak8974_id,
};
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 924b481a3034..eb706d0bf70b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -1110,7 +1110,7 @@ static struct i2c_driver ak8975_driver = {
.of_match_table = ak8975_of_match,
.acpi_match_table = ak_acpi_match,
},
- .probe_new = ak8975_probe,
+ .probe = ak8975_probe,
.remove = ak8975_remove,
.id_table = ak8975_id,
};
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 44b8960eea17..281d1fa31c8e 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -71,7 +71,7 @@ static struct i2c_driver bmc150_magn_driver = {
.acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
.pm = &bmc150_magn_pm_ops,
},
- .probe_new = bmc150_magn_i2c_probe,
+ .probe = bmc150_magn_i2c_probe,
.remove = bmc150_magn_i2c_remove,
.id_table = bmc150_magn_i2c_id,
};
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 7ef2b1d56289..bdd2784a9f86 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -95,7 +95,7 @@ static struct i2c_driver hmc5843_driver = {
.of_match_table = hmc5843_of_match,
},
.id_table = hmc5843_id,
- .probe_new = hmc5843_i2c_probe,
+ .probe = hmc5843_i2c_probe,
.remove = hmc5843_i2c_remove,
};
module_i2c_driver(hmc5843_driver);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 661176a885ad..deffe3ca9004 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -641,7 +641,7 @@ static struct i2c_driver mag3110_driver = {
.of_match_table = mag3110_of_match,
.pm = pm_sleep_ptr(&mag3110_pm_ops),
},
- .probe_new = mag3110_probe,
+ .probe = mag3110_probe,
.remove = mag3110_remove,
.id_table = mag3110_id,
};
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 756dadbad106..b495b8a63928 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -575,7 +575,7 @@ static struct i2c_driver mmc35240_driver = {
.pm = pm_sleep_ptr(&mmc35240_pm_ops),
.acpi_match_table = ACPI_PTR(mmc35240_acpi_match),
},
- .probe_new = mmc35240_probe,
+ .probe = mmc35240_probe,
.id_table = mmc35240_id,
};
diff --git a/drivers/iio/magnetometer/rm3100-i2c.c b/drivers/iio/magnetometer/rm3100-i2c.c
index ba669ab7113d..ac7276b3798c 100644
--- a/drivers/iio/magnetometer/rm3100-i2c.c
+++ b/drivers/iio/magnetometer/rm3100-i2c.c
@@ -45,7 +45,7 @@ static struct i2c_driver rm3100_driver = {
.name = "rm3100-i2c",
.of_match_table = rm3100_dt_match,
},
- .probe_new = rm3100_probe,
+ .probe = rm3100_probe,
};
module_i2c_driver(rm3100_driver);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8faa7409d9e1..6cc0dfd31821 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -427,6 +427,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM9DS0_IMU_DEV_NAME,
+ [1] = LSM303D_IMU_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_4_16bit_channels,
.odr = {
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index cc0e0e94b129..950826dd20bf 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -111,7 +111,7 @@ static struct i2c_driver st_magn_driver = {
.name = "st-magn-i2c",
.of_match_table = st_magn_of_match,
},
- .probe_new = st_magn_i2c_probe,
+ .probe = st_magn_i2c_probe,
.id_table = st_magn_id_table,
};
module_i2c_driver(st_magn_driver);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index e155a75b3cd2..c5e5c4ad681e 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -734,7 +734,7 @@ static struct i2c_driver tmag5273_driver = {
.of_match_table = tmag5273_of_match,
.pm = pm_ptr(&tmag5273_pm_ops),
},
- .probe_new = tmag5273_probe,
+ .probe = tmag5273_probe,
.id_table = tmag5273_id,
};
module_i2c_driver(tmag5273_driver);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 753717158b07..c5e485bfc6fc 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -1605,7 +1605,7 @@ static struct i2c_driver yas5xx_driver = {
.of_match_table = yas5xx_of_match,
.pm = pm_ptr(&yas5xx_dev_pm_ops),
},
- .probe_new = yas5xx_probe,
+ .probe = yas5xx_probe,
.remove = yas5xx_remove,
.id_table = yas5xx_id,
};
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index 01dd3f858d99..e6a9a3c67845 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -136,4 +136,14 @@ config TPL0102
To compile this driver as a module, choose M here: the
module will be called tpl0102.
+config X9250
+ tristate "Renesas X9250 quad controlled potentiometers"
+ depends on SPI
+ help
+ Enable support for the Renesas X9250 quad controlled
+ potentiometers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called x9250.
+
endmenu
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 5ebb8e3bbd76..d11fb739176c 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_MCP4131) += mcp4131.o
obj-$(CONFIG_MCP4531) += mcp4531.o
obj-$(CONFIG_MCP41010) += mcp41010.o
obj-$(CONFIG_TPL0102) += tpl0102.o
+obj-$(CONFIG_X9250) += x9250.o
diff --git a/drivers/iio/potentiometer/ad5110.c b/drivers/iio/potentiometer/ad5110.c
index 8fbcce482989..991e745c4f93 100644
--- a/drivers/iio/potentiometer/ad5110.c
+++ b/drivers/iio/potentiometer/ad5110.c
@@ -334,7 +334,7 @@ static struct i2c_driver ad5110_driver = {
.name = "ad5110",
.of_match_table = ad5110_of_match,
},
- .probe_new = ad5110_probe,
+ .probe = ad5110_probe,
.id_table = ad5110_id,
};
module_i2c_driver(ad5110_driver);
diff --git a/drivers/iio/potentiometer/ad5272.c b/drivers/iio/potentiometer/ad5272.c
index aa140d632101..b17941e4c2f7 100644
--- a/drivers/iio/potentiometer/ad5272.c
+++ b/drivers/iio/potentiometer/ad5272.c
@@ -218,7 +218,7 @@ static struct i2c_driver ad5272_driver = {
.name = "ad5272",
.of_match_table = ad5272_dt_ids,
},
- .probe_new = ad5272_probe,
+ .probe = ad5272_probe,
.id_table = ad5272_id,
};
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index 0b5e475807cb..fc183e0790da 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -252,7 +252,7 @@ static struct i2c_driver ds1803_driver = {
.name = "ds1803",
.of_match_table = ds1803_dt_ids,
},
- .probe_new = ds1803_probe,
+ .probe = ds1803_probe,
.id_table = ds1803_id,
};
diff --git a/drivers/iio/potentiometer/max5432.c b/drivers/iio/potentiometer/max5432.c
index 94ef27ef3fb5..c8e2481dadb5 100644
--- a/drivers/iio/potentiometer/max5432.c
+++ b/drivers/iio/potentiometer/max5432.c
@@ -123,7 +123,7 @@ static struct i2c_driver max5432_driver = {
.name = "max5432",
.of_match_table = max5432_dt_ids,
},
- .probe_new = max5432_probe,
+ .probe = max5432_probe,
};
module_i2c_driver(max5432_driver);
diff --git a/drivers/iio/potentiometer/mcp4018.c b/drivers/iio/potentiometer/mcp4018.c
index c0e171fec062..89daecc90305 100644
--- a/drivers/iio/potentiometer/mcp4018.c
+++ b/drivers/iio/potentiometer/mcp4018.c
@@ -174,7 +174,7 @@ static struct i2c_driver mcp4018_driver = {
.name = "mcp4018",
.of_match_table = mcp4018_of_match,
},
- .probe_new = mcp4018_probe,
+ .probe = mcp4018_probe,
.id_table = mcp4018_id,
};
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index c25f84b4a270..c513c00c8243 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -385,7 +385,7 @@ static struct i2c_driver mcp4531_driver = {
.name = "mcp4531",
.of_match_table = mcp4531_of_match,
},
- .probe_new = mcp4531_probe,
+ .probe = mcp4531_probe,
.id_table = mcp4531_id,
};
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index a3465b413b0c..8923ccb0fc4f 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -161,7 +161,7 @@ static struct i2c_driver tpl0102_driver = {
.driver = {
.name = "tpl0102",
},
- .probe_new = tpl0102_probe,
+ .probe = tpl0102_probe,
.id_table = tpl0102_id,
};
diff --git a/drivers/iio/potentiometer/x9250.c b/drivers/iio/potentiometer/x9250.c
new file mode 100644
index 000000000000..735348492699
--- /dev/null
+++ b/drivers/iio/potentiometer/x9250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * x9250.c -- Renesas X9250 potentiometers IIO driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+struct x9250_cfg {
+ const char *name;
+ int kohms;
+};
+
+struct x9250 {
+ struct spi_device *spi;
+ const struct x9250_cfg *cfg;
+ struct gpio_desc *wp_gpio;
+};
+
+#define X9250_ID 0x50
+#define X9250_CMD_RD_WCR(_p) (0x90 | (_p))
+#define X9250_CMD_WR_WCR(_p) (0xa0 | (_p))
+
+static int x9250_write8(struct x9250 *x9250, u8 cmd, u8 val)
+{
+ u8 txbuf[3];
+
+ txbuf[0] = X9250_ID;
+ txbuf[1] = cmd;
+ txbuf[2] = val;
+
+ return spi_write_then_read(x9250->spi, txbuf, ARRAY_SIZE(txbuf), NULL, 0);
+}
+
+static int x9250_read8(struct x9250 *x9250, u8 cmd, u8 *val)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = X9250_ID;
+ txbuf[1] = cmd;
+
+ return spi_write_then_read(x9250->spi, txbuf, ARRAY_SIZE(txbuf), val, 1);
+}
+
+#define X9250_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec x9250_channels[] = {
+ X9250_CHANNEL(0),
+ X9250_CHANNEL(1),
+ X9250_CHANNEL(2),
+ X9250_CHANNEL(3),
+};
+
+static int x9250_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct x9250 *x9250 = iio_priv(indio_dev);
+ int ch = chan->channel;
+ int ret;
+ u8 v;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = x9250_read8(x9250, X9250_CMD_RD_WCR(ch), &v);
+ if (ret)
+ return ret;
+ *val = v;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * x9250->cfg->kohms;
+ *val2 = U8_MAX;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int x9250_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long mask)
+{
+ static const int range[] = {0, 1, 255}; /* min, step, max */
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *length = ARRAY_SIZE(range);
+ *vals = range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
+static int x9250_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct x9250 *x9250 = iio_priv(indio_dev);
+ int ch = chan->channel;
+ int ret;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val > U8_MAX || val < 0)
+ return -EINVAL;
+
+ gpiod_set_value_cansleep(x9250->wp_gpio, 0);
+ ret = x9250_write8(x9250, X9250_CMD_WR_WCR(ch), val);
+ gpiod_set_value_cansleep(x9250->wp_gpio, 1);
+
+ return ret;
+}
+
+static const struct iio_info x9250_info = {
+ .read_raw = x9250_read_raw,
+ .read_avail = x9250_read_avail,
+ .write_raw = x9250_write_raw,
+};
+
+enum x9250_type {
+ X9250T,
+ X9250U,
+};
+
+static const struct x9250_cfg x9250_cfg[] = {
+ [X9250T] = { .name = "x9250t", .kohms = 100, },
+ [X9250U] = { .name = "x9250u", .kohms = 50, },
+};
+
+static const char *const x9250_regulator_names[] = {
+ "vcc",
+ "avp",
+ "avn",
+};
+
+static int x9250_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct x9250 *x9250;
+ int ret;
+
+ ret = devm_regulator_bulk_get_enable(&spi->dev, ARRAY_SIZE(x9250_regulator_names),
+ x9250_regulator_names);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to get regulators\n");
+
+ /*
+ * The x9250 needs a 5ms maximum delay after the power-supplies are set
+ * before performing the first write (1ms for the first read).
+ */
+ usleep_range(5000, 6000);
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*x9250));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ x9250 = iio_priv(indio_dev);
+ x9250->spi = spi;
+ x9250->cfg = spi_get_device_match_data(spi);
+ x9250->wp_gpio = devm_gpiod_get_optional(&spi->dev, "wp", GPIOD_OUT_LOW);
+ if (IS_ERR(x9250->wp_gpio))
+ return dev_err_probe(&spi->dev, PTR_ERR(x9250->wp_gpio),
+ "failed to get wp gpio\n");
+
+ indio_dev->info = &x9250_info;
+ indio_dev->channels = x9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(x9250_channels);
+ indio_dev->name = x9250->cfg->name;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id x9250_of_match[] = {
+ { .compatible = "renesas,x9250t", .data = &x9250_cfg[X9250T]},
+ { .compatible = "renesas,x9250u", .data = &x9250_cfg[X9250U]},
+ { }
+};
+MODULE_DEVICE_TABLE(of, x9250_of_match);
+
+static const struct spi_device_id x9250_id_table[] = {
+ { "x9250t", (kernel_ulong_t)&x9250_cfg[X9250T] },
+ { "x9250u", (kernel_ulong_t)&x9250_cfg[X9250U] },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, x9250_id_table);
+
+static struct spi_driver x9250_spi_driver = {
+ .driver = {
+ .name = "x9250",
+ .of_match_table = x9250_of_match,
+ },
+ .id_table = x9250_id_table,
+ .probe = x9250_probe,
+};
+
+module_spi_driver(x9250_spi_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("X9250 ALSA SoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 0083e858c21e..bd0f2c3bf2f2 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -416,7 +416,7 @@ static struct i2c_driver lmp91000_driver = {
.name = LMP91000_DRV_NAME,
.of_match_table = lmp91000_of_match,
},
- .probe_new = lmp91000_probe,
+ .probe = lmp91000_probe,
.remove = lmp91000_remove,
.id_table = lmp91000_id,
};
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 02b97e89de50..7b4c2af32852 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -148,6 +148,19 @@ config MPL3115
To compile this driver as a module, choose M here: the module
will be called mpl3115.
+config MPRLS0025PA
+ tristate "Honeywell MPRLS0025PA (MicroPressure sensors series)"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to build support for the Honeywell MicroPressure pressure
+ sensor series. There are many different types with different pressure
+ range. These ranges can be set up in the device tree.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mprls0025pa.
+
config MS5611
tristate "Measurement Specialties MS5611 pressure sensor driver"
select IIO_BUFFER
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 083ae87ff48f..c90f77210e94 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
obj-$(CONFIG_MPL3115) += mpl3115.o
+obj-$(CONFIG_MPRLS0025PA) += mprls0025pa.o
obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
index c0140779366a..752a63c06b44 100644
--- a/drivers/iio/pressure/abp060mg.c
+++ b/drivers/iio/pressure/abp060mg.c
@@ -255,7 +255,7 @@ static struct i2c_driver abp060mg_driver = {
.driver = {
.name = "abp060mg",
},
- .probe_new = abp060mg_probe,
+ .probe = abp060mg_probe,
.id_table = abp060mg_id_table,
};
module_i2c_driver(abp060mg_driver);
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 567b945e6427..dbe630ad05b5 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -56,7 +56,7 @@ static struct i2c_driver bmp280_i2c_driver = {
.of_match_table = bmp280_of_i2c_match,
.pm = pm_ptr(&bmp280_dev_pm_ops),
},
- .probe_new = bmp280_i2c_probe,
+ .probe = bmp280_i2c_probe,
.id_table = bmp280_i2c_id,
};
module_i2c_driver(bmp280_i2c_driver);
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 43650b048d62..28c8269ba65d 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -362,7 +362,7 @@ static struct i2c_driver dlh_driver = {
.name = "dlhl60d",
.of_match_table = dlh_of_match,
},
- .probe_new = dlh_probe,
+ .probe = dlh_probe,
.id_table = dlh_id,
};
module_i2c_driver(dlh_driver);
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 2af275a24ff9..b10dbf5cf494 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -887,7 +887,7 @@ static struct i2c_driver dps310_driver = {
.name = DPS310_DEV_NAME,
.acpi_match_table = dps310_acpi_match,
},
- .probe_new = dps310_probe,
+ .probe = dps310_probe,
.id_table = dps310_id,
};
module_i2c_driver(dps310_driver);
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index bd1f71a99cfa..8bdb279129fa 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -282,7 +282,7 @@ static struct i2c_driver hp03_driver = {
.name = "hp03",
.of_match_table = hp03_of_match,
},
- .probe_new = hp03_probe,
+ .probe = hp03_probe,
.id_table = hp03_id,
};
module_i2c_driver(hp03_driver);
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index b6d2ff464341..a072de6cb59c 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
#endif
static struct i2c_driver hp206c_driver = {
- .probe_new = hp206c_probe,
+ .probe = hp206c_probe,
.id_table = hp206c_id,
.driver = {
.name = "hp206c",
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 407cf25ea0e3..2086f3ef338f 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -648,7 +648,7 @@ static struct i2c_driver icp10100_driver = {
.pm = pm_ptr(&icp10100_pm),
.of_match_table = icp10100_of_match,
},
- .probe_new = icp10100_probe,
+ .probe = icp10100_probe,
.id_table = icp10100_id,
};
module_i2c_driver(icp10100_driver);
diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c
index ade4dd854ddf..fcbdadf4a511 100644
--- a/drivers/iio/pressure/mpl115_i2c.c
+++ b/drivers/iio/pressure/mpl115_i2c.c
@@ -55,7 +55,7 @@ static struct i2c_driver mpl115_i2c_driver = {
.name = "mpl115",
.pm = pm_ptr(&mpl115_dev_pm_ops),
},
- .probe_new = mpl115_i2c_probe,
+ .probe = mpl115_i2c_probe,
.id_table = mpl115_i2c_id,
};
module_i2c_driver(mpl115_i2c_driver);
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 72e811a5c96e..7aa19584c340 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -335,7 +335,7 @@ static struct i2c_driver mpl3115_driver = {
.of_match_table = mpl3115_of_match,
.pm = pm_sleep_ptr(&mpl3115_pm_ops),
},
- .probe_new = mpl3115_probe,
+ .probe = mpl3115_probe,
.remove = mpl3115_remove,
.id_table = mpl3115_id,
};
diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c
new file mode 100644
index 000000000000..30fb2de36821
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MPRLS0025PA - Honeywell MicroPressure pressure sensor series driver
+ *
+ * Copyright (c) Andreas Klinger <ak@it-klinger.de>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/
+ * products/sensors/pressure-sensors/board-mount-pressure-sensors/
+ * micropressure-mpr-series/documents/
+ * sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ *
+ * 7-bit I2C default slave address: 0x18
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/units.h>
+
+#include <linux/gpio/consumer.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <asm/unaligned.h>
+
+/* bits in i2c status byte */
+#define MPR_I2C_POWER BIT(6) /* device is powered */
+#define MPR_I2C_BUSY BIT(5) /* device is busy */
+#define MPR_I2C_MEMORY BIT(2) /* integrity test passed */
+#define MPR_I2C_MATH BIT(0) /* internal math saturation */
+
+/*
+ * support _RAW sysfs interface:
+ *
+ * Calculation formula from the datasheet:
+ * pressure = (press_cnt - outputmin) * scale + pmin
+ * with:
+ * * pressure - measured pressure in Pascal
+ * * press_cnt - raw value read from sensor
+ * * pmin - minimum pressure range value of sensor (data->pmin)
+ * * pmax - maximum pressure range value of sensor (data->pmax)
+ * * outputmin - minimum numerical range raw value delivered by sensor
+ * (mpr_func_spec.output_min)
+ * * outputmax - maximum numerical range raw value delivered by sensor
+ * (mpr_func_spec.output_max)
+ * * scale - (pmax - pmin) / (outputmax - outputmin)
+ *
+ * formula of the userspace:
+ * pressure = (raw + offset) * scale
+ *
+ * Values given to the userspace in sysfs interface:
+ * * raw - press_cnt
+ * * offset - (-1 * outputmin) - pmin / scale
+ * note: With all sensors from the datasheet pmin = 0
+ * which reduces the offset to (-1 * outputmin)
+ */
+
+/*
+ * transfer function A: 10% to 90% of 2^24
+ * transfer function B: 2.5% to 22.5% of 2^24
+ * transfer function C: 20% to 80% of 2^24
+ */
+enum mpr_func_id {
+ MPR_FUNCTION_A,
+ MPR_FUNCTION_B,
+ MPR_FUNCTION_C,
+};
+
+struct mpr_func_spec {
+ u32 output_min;
+ u32 output_max;
+};
+
+static const struct mpr_func_spec mpr_func_spec[] = {
+ [MPR_FUNCTION_A] = {.output_min = 1677722, .output_max = 15099494},
+ [MPR_FUNCTION_B] = {.output_min = 419430, .output_max = 3774874},
+ [MPR_FUNCTION_C] = {.output_min = 3355443, .output_max = 13421773},
+};
+
+struct mpr_chan {
+ s32 pres; /* pressure value */
+ s64 ts; /* timestamp */
+};
+
+struct mpr_data {
+ struct i2c_client *client;
+ struct mutex lock; /*
+ * access to device during read
+ */
+ u32 pmin; /* minimal pressure in pascal */
+ u32 pmax; /* maximal pressure in pascal */
+ enum mpr_func_id function; /* transfer function */
+ u32 outmin; /*
+ * minimal numerical range raw
+ * value from sensor
+ */
+ u32 outmax; /*
+ * maximal numerical range raw
+ * value from sensor
+ */
+ int scale; /* int part of scale */
+ int scale2; /* nano part of scale */
+ int offset; /* int part of offset */
+ int offset2; /* nano part of offset */
+ struct gpio_desc *gpiod_reset; /* reset */
+ int irq; /*
+ * end of conversion irq;
+ * used to distinguish between
+ * irq mode and reading in a
+ * loop until data is ready
+ */
+ struct completion completion; /* handshake from irq to read */
+ struct mpr_chan chan; /*
+ * channel values for buffered
+ * mode
+ */
+};
+
+static const struct iio_chan_spec mpr_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static void mpr_reset(struct mpr_data *data)
+{
+ if (data->gpiod_reset) {
+ gpiod_set_value(data->gpiod_reset, 0);
+ udelay(10);
+ gpiod_set_value(data->gpiod_reset, 1);
+ }
+}
+
+/**
+ * mpr_read_pressure() - Read pressure value from sensor via I2C
+ * @data: Pointer to private data struct.
+ * @press: Output value read from sensor.
+ *
+ * Reading from the sensor by sending and receiving I2C telegrams.
+ *
+ * If there is an end of conversion (EOC) interrupt registered the function
+ * waits for a maximum of one second for the interrupt.
+ *
+ * Context: The function can sleep and data->lock should be held when calling it
+ * Return:
+ * * 0 - OK, the pressure value could be read
+ * * -ETIMEDOUT - Timeout while waiting for the EOC interrupt or busy flag is
+ * still set after nloops attempts of reading
+ */
+static int mpr_read_pressure(struct mpr_data *data, s32 *press)
+{
+ struct device *dev = &data->client->dev;
+ int ret, i;
+ u8 wdata[] = {0xAA, 0x00, 0x00};
+ s32 status;
+ int nloops = 10;
+ u8 buf[4];
+
+ reinit_completion(&data->completion);
+
+ ret = i2c_master_send(data->client, wdata, sizeof(wdata));
+ if (ret < 0) {
+ dev_err(dev, "error while writing ret: %d\n", ret);
+ return ret;
+ }
+ if (ret != sizeof(wdata)) {
+ dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
+ (u32)sizeof(wdata));
+ return -EIO;
+ }
+
+ if (data->irq > 0) {
+ ret = wait_for_completion_timeout(&data->completion, HZ);
+ if (!ret) {
+ dev_err(dev, "timeout while waiting for eoc irq\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ /* wait until status indicates data is ready */
+ for (i = 0; i < nloops; i++) {
+ /*
+ * datasheet only says to wait at least 5 ms for the
+ * data but leave the maximum response time open
+ * --> let's try it nloops (10) times which seems to be
+ * quite long
+ */
+ usleep_range(5000, 10000);
+ status = i2c_smbus_read_byte(data->client);
+ if (status < 0) {
+ dev_err(dev,
+ "error while reading, status: %d\n",
+ status);
+ return status;
+ }
+ if (!(status & MPR_I2C_BUSY))
+ break;
+ }
+ if (i == nloops) {
+ dev_err(dev, "timeout while reading\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ret = i2c_master_recv(data->client, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(dev, "error in i2c_master_recv ret: %d\n", ret);
+ return ret;
+ }
+ if (ret != sizeof(buf)) {
+ dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
+ (u32)sizeof(buf));
+ return -EIO;
+ }
+
+ if (buf[0] & MPR_I2C_BUSY) {
+ /*
+ * it should never be the case that status still indicates
+ * business
+ */
+ dev_err(dev, "data still not ready: %08x\n", buf[0]);
+ return -ETIMEDOUT;
+ }
+
+ *press = get_unaligned_be24(&buf[1]);
+
+ dev_dbg(dev, "received: %*ph cnt: %d\n", ret, buf, *press);
+
+ return 0;
+}
+
+static irqreturn_t mpr_eoc_handler(int irq, void *p)
+{
+ struct mpr_data *data = p;
+
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpr_trigger_handler(int irq, void *p)
+{
+ int ret;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpr_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+ ret = mpr_read_pressure(data, &data->chan.pres);
+ if (ret < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->chan,
+ iio_get_time_ns(indio_dev));
+
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int mpr_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ int ret;
+ s32 pressure;
+ struct mpr_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PRESSURE)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = mpr_read_pressure(data, &pressure);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = pressure;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->scale;
+ *val2 = data->scale2;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = data->offset;
+ *val2 = data->offset2;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mpr_info = {
+ .read_raw = &mpr_read_raw,
+};
+
+static int mpr_probe(struct i2c_client *client)
+{
+ int ret;
+ struct mpr_data *data;
+ struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
+ s64 scale, offset;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
+ return dev_err_probe(dev, -EOPNOTSUPP,
+ "I2C functionality not supported\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM, "couldn't get iio_dev\n");
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->irq = client->irq;
+
+ mutex_init(&data->lock);
+ init_completion(&data->completion);
+
+ indio_dev->name = "mprls0025pa";
+ indio_dev->info = &mpr_info;
+ indio_dev->channels = mpr_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpr_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "can't get and enable vdd supply\n");
+
+ if (dev_fwnode(dev)) {
+ ret = device_property_read_u32(dev, "honeywell,pmin-pascal",
+ &data->pmin);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,pmin-pascal could not be read\n");
+ ret = device_property_read_u32(dev, "honeywell,pmax-pascal",
+ &data->pmax);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,pmax-pascal could not be read\n");
+ ret = device_property_read_u32(dev,
+ "honeywell,transfer-function", &data->function);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,transfer-function could not be read\n");
+ if (data->function > MPR_FUNCTION_C)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,transfer-function %d invalid\n",
+ data->function);
+ } else {
+ /* when loaded as i2c device we need to use default values */
+ dev_notice(dev, "firmware node not found; using defaults\n");
+ data->pmin = 0;
+ data->pmax = 172369; /* 25 psi */
+ data->function = MPR_FUNCTION_A;
+ }
+
+ data->outmin = mpr_func_spec[data->function].output_min;
+ data->outmax = mpr_func_spec[data->function].output_max;
+
+ /* use 64 bit calculation for preserving a reasonable precision */
+ scale = div_s64(((s64)(data->pmax - data->pmin)) * NANO,
+ data->outmax - data->outmin);
+ data->scale = div_s64_rem(scale, NANO, &data->scale2);
+ /*
+ * multiply with NANO before dividing by scale and later divide by NANO
+ * again.
+ */
+ offset = ((-1LL) * (s64)data->outmin) * NANO -
+ div_s64(div_s64((s64)data->pmin * NANO, scale), NANO);
+ data->offset = div_s64_rem(offset, NANO, &data->offset2);
+
+ if (data->irq > 0) {
+ ret = devm_request_irq(dev, data->irq, mpr_eoc_handler,
+ IRQF_TRIGGER_RISING, client->name, data);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "request irq %d failed\n", data->irq);
+ }
+
+ data->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->gpiod_reset))
+ return dev_err_probe(dev, PTR_ERR(data->gpiod_reset),
+ "request reset-gpio failed\n");
+
+ mpr_reset(data);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ mpr_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio triggered buffer setup failed\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "unable to register iio device\n");
+
+ return 0;
+}
+
+static const struct of_device_id mpr_matches[] = {
+ { .compatible = "honeywell,mprls0025pa" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mpr_matches);
+
+static const struct i2c_device_id mpr_id[] = {
+ { "mprls0025pa" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpr_id);
+
+static struct i2c_driver mpr_driver = {
+ .probe = mpr_probe,
+ .id_table = mpr_id,
+ .driver = {
+ .name = "mprls0025pa",
+ .of_match_table = mpr_matches,
+ },
+};
+module_i2c_driver(mpr_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("Honeywell MPRLS0025PA I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index e3c68a3ed76a..9a0f52321fcb 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -125,7 +125,7 @@ static struct i2c_driver ms5611_driver = {
.of_match_table = ms5611_i2c_matches,
},
.id_table = ms5611_id,
- .probe_new = ms5611_i2c_probe,
+ .probe = ms5611_i2c_probe,
};
module_i2c_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index c4981b29dccb..9b3abffb724b 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -238,7 +238,7 @@ static const struct of_device_id ms5637_of_match[] = {
MODULE_DEVICE_TABLE(of, ms5637_of_match);
static struct i2c_driver ms5637_driver = {
- .probe_new = ms5637_probe,
+ .probe = ms5637_probe,
.id_table = ms5637_id,
.driver = {
.name = "ms5637",
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index f2c3bb568d16..5101552e3f38 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -116,7 +116,7 @@ static struct i2c_driver st_press_driver = {
.of_match_table = st_press_of_match,
.acpi_match_table = ACPI_PTR(st_press_acpi_match),
},
- .probe_new = st_press_i2c_probe,
+ .probe = st_press_i2c_probe,
.id_table = st_press_id_table,
};
module_i2c_driver(st_press_driver);
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index 2fbf14aff033..a6463c06975e 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -260,7 +260,7 @@ static struct i2c_driver t5403_driver = {
.driver = {
.name = "t5403",
},
- .probe_new = t5403_probe,
+ .probe = t5403_probe,
.id_table = t5403_id,
};
module_i2c_driver(t5403_driver);
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index ade465014be1..c7fffbe7c788 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -76,7 +76,7 @@ static struct i2c_driver zpa2326_i2c_driver = {
.of_match_table = zpa2326_i2c_matches,
.pm = ZPA2326_PM_OPS,
},
- .probe_new = zpa2326_probe_i2c,
+ .probe = zpa2326_probe_i2c,
.remove = zpa2326_remove_i2c,
.id_table = zpa2326_i2c_ids,
};
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index 7b8f40b7ccf3..fe45ca35a124 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -1008,7 +1008,7 @@ static struct i2c_driver isl29501_driver = {
.name = "isl29501",
},
.id_table = isl29501_id,
- .probe_new = isl29501_probe,
+ .probe = isl29501_probe,
};
module_i2c_driver(isl29501_driver);
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index e70cac8240af..fb1073c8d9f7 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -264,7 +264,7 @@ static struct i2c_driver mb1232_driver = {
.name = "maxbotix-mb1232",
.of_match_table = of_mb1232_match,
},
- .probe_new = mb1232_probe,
+ .probe = mb1232_probe,
.id_table = mb1232_id,
};
module_i2c_driver(mb1232_driver);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index c9eead01a031..2913d5e0fe4f 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -365,7 +365,7 @@ static struct i2c_driver lidar_driver = {
.of_match_table = lidar_dt_ids,
.pm = pm_ptr(&lidar_pm_ops),
},
- .probe_new = lidar_probe,
+ .probe = lidar_probe,
.remove = lidar_remove,
.id_table = lidar_id,
};
diff --git a/drivers/iio/proximity/rfd77402.c b/drivers/iio/proximity/rfd77402.c
index 44f72b78bd50..f02e83e3f15f 100644
--- a/drivers/iio/proximity/rfd77402.c
+++ b/drivers/iio/proximity/rfd77402.c
@@ -318,7 +318,7 @@ static struct i2c_driver rfd77402_driver = {
.name = RFD77402_DRV_NAME,
.pm = pm_sleep_ptr(&rfd77402_pm_ops),
},
- .probe_new = rfd77402_probe,
+ .probe = rfd77402_probe,
.id_table = rfd77402_id,
};
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 61866d0440f7..a75ea5042876 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -549,7 +549,7 @@ static struct i2c_driver srf08_driver = {
.name = "srf08",
.of_match_table = of_srf08_match,
},
- .probe_new = srf08_probe,
+ .probe = srf08_probe,
.id_table = srf08_id,
};
module_i2c_driver(srf08_driver);
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 0e4747ccd3cf..d977aacb7491 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -1050,7 +1050,7 @@ static struct i2c_driver sx9310_driver = {
*/
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = sx9310_probe,
+ .probe = sx9310_probe,
.id_table = sx9310_id,
};
module_i2c_driver(sx9310_driver);
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 9a40ca32bb1c..438f9c9aba6e 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -1152,7 +1152,7 @@ static struct i2c_driver sx9324_driver = {
*/
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = sx9324_probe,
+ .probe = sx9324_probe,
.id_table = sx9324_id,
};
module_i2c_driver(sx9324_driver);
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index a50d9176411a..2c4e14a4fe9f 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -896,7 +896,7 @@ static struct i2c_driver sx9360_driver = {
*/
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = sx9360_probe,
+ .probe = sx9360_probe,
.id_table = sx9360_id,
};
module_i2c_driver(sx9360_driver);
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 9b2cfcade6a4..550e7d3cd5ee 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -1055,7 +1055,7 @@ static struct i2c_driver sx9500_driver = {
.of_match_table = sx9500_of_match,
.pm = pm_sleep_ptr(&sx9500_pm_ops),
},
- .probe_new = sx9500_probe,
+ .probe = sx9500_probe,
.remove = sx9500_remove,
.id_table = sx9500_id,
};
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
index cbc8400c773c..d1ddf85f5383 100644
--- a/drivers/iio/proximity/vcnl3020.c
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -662,7 +662,7 @@ static struct i2c_driver vcnl3020_driver = {
.name = "vcnl3020",
.of_match_table = vcnl3020_of_match,
},
- .probe_new = vcnl3020_probe,
+ .probe = vcnl3020_probe,
};
module_i2c_driver(vcnl3020_driver);
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index c7c4d33d340f..2cea64bea909 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -294,7 +294,7 @@ static struct i2c_driver vl53l0x_driver = {
.name = "vl53l0x-i2c",
.of_match_table = st_vl53l0x_dt_match,
},
- .probe_new = vl53l0x_probe,
+ .probe = vl53l0x_probe,
.id_table = vl53l0x_id,
};
module_i2c_driver(vl53l0x_driver);
diff --git a/drivers/iio/temperature/max30208.c b/drivers/iio/temperature/max30208.c
index c85c21474711..48be03852cd8 100644
--- a/drivers/iio/temperature/max30208.c
+++ b/drivers/iio/temperature/max30208.c
@@ -242,7 +242,7 @@ static struct i2c_driver max30208_driver = {
.of_match_table = max30208_of_match,
.acpi_match_table = max30208_acpi_match,
},
- .probe_new = max30208_probe,
+ .probe = max30208_probe,
.id_table = max30208_id_table,
};
module_i2c_driver(max30208_driver);
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 909fadb62349..676dc8701924 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * mlx90614.c - Support for Melexis MLX90614 contactless IR temperature sensor
+ * mlx90614.c - Support for Melexis MLX90614/MLX90615 contactless IR temperature sensor
*
* Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
* Copyright (c) 2015 Essensium NV
* Copyright (c) 2015 Melexis
*
- * Driver for the Melexis MLX90614 I2C 16-bit IR thermopile sensor
+ * Driver for the Melexis MLX90614/MLX90615 I2C 16-bit IR thermopile sensor
+ *
+ * MLX90614 - 17-bit ADC + MLX90302 DSP
+ * MLX90615 - 16-bit ADC + MLX90325 DSP
*
* (7-bit I2C slave address 0x5a, 100KHz bus speed only!)
*
@@ -19,12 +22,13 @@
* the "wakeup" GPIO is not given, power management will be disabled.
*/
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -34,16 +38,9 @@
#define MLX90614_OP_EEPROM 0x20
#define MLX90614_OP_SLEEP 0xff
-/* RAM offsets with 16-bit data, MSB first */
-#define MLX90614_RAW1 (MLX90614_OP_RAM | 0x04) /* raw data IR channel 1 */
-#define MLX90614_RAW2 (MLX90614_OP_RAM | 0x05) /* raw data IR channel 2 */
-#define MLX90614_TA (MLX90614_OP_RAM | 0x06) /* ambient temperature */
-#define MLX90614_TOBJ1 (MLX90614_OP_RAM | 0x07) /* object 1 temperature */
-#define MLX90614_TOBJ2 (MLX90614_OP_RAM | 0x08) /* object 2 temperature */
-
-/* EEPROM offsets with 16-bit data, MSB first */
-#define MLX90614_EMISSIVITY (MLX90614_OP_EEPROM | 0x04) /* emissivity correction coefficient */
-#define MLX90614_CONFIG (MLX90614_OP_EEPROM | 0x05) /* configuration register */
+#define MLX90615_OP_EEPROM 0x10
+#define MLX90615_OP_RAM 0x20
+#define MLX90615_OP_SLEEP 0xc6
/* Control bits in configuration register */
#define MLX90614_CONFIG_IIR_SHIFT 0 /* IIR coefficient */
@@ -52,44 +49,61 @@
#define MLX90614_CONFIG_DUAL_MASK (1 << MLX90614_CONFIG_DUAL_SHIFT)
#define MLX90614_CONFIG_FIR_SHIFT 8 /* FIR coefficient */
#define MLX90614_CONFIG_FIR_MASK (0x7 << MLX90614_CONFIG_FIR_SHIFT)
-#define MLX90614_CONFIG_GAIN_SHIFT 11 /* gain */
-#define MLX90614_CONFIG_GAIN_MASK (0x7 << MLX90614_CONFIG_GAIN_SHIFT)
+
+#define MLX90615_CONFIG_IIR_SHIFT 12 /* IIR coefficient */
+#define MLX90615_CONFIG_IIR_MASK (0x7 << MLX90615_CONFIG_IIR_SHIFT)
/* Timings (in ms) */
#define MLX90614_TIMING_EEPROM 20 /* time for EEPROM write/erase to complete */
#define MLX90614_TIMING_WAKEUP 34 /* time to hold SDA low for wake-up */
#define MLX90614_TIMING_STARTUP 250 /* time before first data after wake-up */
+#define MLX90615_TIMING_WAKEUP 22 /* time to hold SCL low for wake-up */
+
#define MLX90614_AUTOSLEEP_DELAY 5000 /* default autosleep delay */
/* Magic constants */
#define MLX90614_CONST_OFFSET_DEC -13657 /* decimal part of the Kelvin offset */
#define MLX90614_CONST_OFFSET_REM 500000 /* remainder of offset (273.15*50) */
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
-#define MLX90614_CONST_RAW_EMISSIVITY_MAX 65535 /* max value for emissivity */
-#define MLX90614_CONST_EMISSIVITY_RESOLUTION 15259 /* 1/65535 ~ 0.000015259 */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+struct mlx_chip_info {
+ /* EEPROM offsets with 16-bit data, MSB first */
+ /* emissivity correction coefficient */
+ u8 op_eeprom_emissivity;
+ u8 op_eeprom_config1;
+ /* RAM offsets with 16-bit data, MSB first */
+ /* ambient temperature */
+ u8 op_ram_ta;
+ /* object 1 temperature */
+ u8 op_ram_tobj1;
+ /* object 2 temperature */
+ u8 op_ram_tobj2;
+ u8 op_sleep;
+ /* support for two input channels (MLX90614 only) */
+ u8 dual_channel;
+ u8 wakeup_delay_ms;
+ u16 emissivity_max;
+ u16 fir_config_mask;
+ u16 iir_config_mask;
+ int iir_valid_offset;
+ u16 iir_values[8];
+ int iir_freqs[8][2];
+};
+
struct mlx90614_data {
struct i2c_client *client;
struct mutex lock; /* for EEPROM access only */
struct gpio_desc *wakeup_gpio; /* NULL to disable sleep/wake-up */
+ const struct mlx_chip_info *chip_info; /* Chip hardware details */
unsigned long ready_timestamp; /* in jiffies */
};
-/* Bandwidth values for IIR filtering */
-static const int mlx90614_iir_values[] = {77, 31, 20, 15, 723, 153, 110, 86};
-static const int mlx90614_freqs[][2] = {
- {0, 150000},
- {0, 200000},
- {0, 310000},
- {0, 770000},
- {0, 860000},
- {1, 100000},
- {1, 530000},
- {7, 230000}
-};
-
/*
* Erase an address and write word.
* The mutex must be locked before calling.
@@ -129,21 +143,26 @@ static s32 mlx90614_write_word(const struct i2c_client *client, u8 command,
}
/*
- * Find the IIR value inside mlx90614_iir_values array and return its position
+ * Find the IIR value inside iir_values array and return its position
* which is equivalent to the bit value in sensor register
*/
static inline s32 mlx90614_iir_search(const struct i2c_client *client,
int value)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mlx90614_data *data = iio_priv(indio_dev);
+ const struct mlx_chip_info *chip_info = data->chip_info;
int i;
s32 ret;
- for (i = 0; i < ARRAY_SIZE(mlx90614_iir_values); ++i) {
- if (value == mlx90614_iir_values[i])
+ for (i = chip_info->iir_valid_offset;
+ i < ARRAY_SIZE(chip_info->iir_values);
+ i++) {
+ if (value == chip_info->iir_values[i])
break;
}
- if (i == ARRAY_SIZE(mlx90614_iir_values))
+ if (i == ARRAY_SIZE(chip_info->iir_values))
return -EINVAL;
/*
@@ -151,17 +170,21 @@ static inline s32 mlx90614_iir_search(const struct i2c_client *client,
* we must read them before we actually write
* changes
*/
- ret = i2c_smbus_read_word_data(client, MLX90614_CONFIG);
+ ret = i2c_smbus_read_word_data(client, chip_info->op_eeprom_config1);
if (ret < 0)
return ret;
- ret &= ~MLX90614_CONFIG_FIR_MASK;
- ret |= MLX90614_CONST_FIR << MLX90614_CONFIG_FIR_SHIFT;
- ret &= ~MLX90614_CONFIG_IIR_MASK;
- ret |= i << MLX90614_CONFIG_IIR_SHIFT;
+ /* Modify FIR on parts which have configurable FIR filter */
+ if (chip_info->fir_config_mask) {
+ ret &= ~chip_info->fir_config_mask;
+ ret |= field_prep(chip_info->fir_config_mask, MLX90614_CONST_FIR);
+ }
+
+ ret &= ~chip_info->iir_config_mask;
+ ret |= field_prep(chip_info->iir_config_mask, i);
/* Write changed values */
- ret = mlx90614_write_word(client, MLX90614_CONFIG, ret);
+ ret = mlx90614_write_word(client, chip_info->op_eeprom_config1, ret);
return ret;
}
@@ -221,22 +244,26 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct mlx90614_data *data = iio_priv(indio_dev);
- u8 cmd;
+ const struct mlx_chip_info *chip_info = data->chip_info;
+ u8 cmd, idx;
s32 ret;
switch (mask) {
case IIO_CHAN_INFO_RAW: /* 0.02K / LSB */
switch (channel->channel2) {
case IIO_MOD_TEMP_AMBIENT:
- cmd = MLX90614_TA;
+ cmd = chip_info->op_ram_ta;
break;
case IIO_MOD_TEMP_OBJECT:
+ if (chip_info->dual_channel && channel->channel)
+ return -EINVAL;
+
switch (channel->channel) {
case 0:
- cmd = MLX90614_TOBJ1;
+ cmd = chip_info->op_ram_tobj1;
break;
case 1:
- cmd = MLX90614_TOBJ2;
+ cmd = chip_info->op_ram_tobj2;
break;
default:
return -EINVAL;
@@ -268,45 +295,48 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val = MLX90614_CONST_SCALE;
return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
+ case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/emissivity_max / LSB */
ret = mlx90614_power_get(data, false);
if (ret < 0)
return ret;
mutex_lock(&data->lock);
ret = i2c_smbus_read_word_data(data->client,
- MLX90614_EMISSIVITY);
+ chip_info->op_eeprom_emissivity);
mutex_unlock(&data->lock);
mlx90614_power_put(data);
if (ret < 0)
return ret;
- if (ret == MLX90614_CONST_RAW_EMISSIVITY_MAX) {
+ if (ret == chip_info->emissivity_max) {
*val = 1;
*val2 = 0;
} else {
*val = 0;
- *val2 = ret * MLX90614_CONST_EMISSIVITY_RESOLUTION;
+ *val2 = ret * NSEC_PER_SEC / chip_info->emissivity_max;
}
return IIO_VAL_INT_PLUS_NANO;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: /* IIR setting with
- FIR = 1024 */
+ /* IIR setting with FIR=1024 (MLX90614) or FIR=65536 (MLX90615) */
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
ret = mlx90614_power_get(data, false);
if (ret < 0)
return ret;
mutex_lock(&data->lock);
- ret = i2c_smbus_read_word_data(data->client, MLX90614_CONFIG);
+ ret = i2c_smbus_read_word_data(data->client,
+ chip_info->op_eeprom_config1);
mutex_unlock(&data->lock);
mlx90614_power_put(data);
if (ret < 0)
return ret;
- *val = mlx90614_iir_values[ret & MLX90614_CONFIG_IIR_MASK] / 100;
- *val2 = (mlx90614_iir_values[ret & MLX90614_CONFIG_IIR_MASK] % 100) *
- 10000;
+ idx = field_get(chip_info->iir_config_mask, ret) -
+ chip_info->iir_valid_offset;
+
+ *val = chip_info->iir_values[idx] / 100;
+ *val2 = (chip_info->iir_values[idx] % 100) * 10000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -318,22 +348,23 @@ static int mlx90614_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct mlx90614_data *data = iio_priv(indio_dev);
+ const struct mlx_chip_info *chip_info = data->chip_info;
s32 ret;
switch (mask) {
- case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
+ case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/emissivity_max / LSB */
if (val < 0 || val2 < 0 || val > 1 || (val == 1 && val2 != 0))
return -EINVAL;
- val = val * MLX90614_CONST_RAW_EMISSIVITY_MAX +
- val2 / MLX90614_CONST_EMISSIVITY_RESOLUTION;
+ val = val * chip_info->emissivity_max +
+ val2 * chip_info->emissivity_max / NSEC_PER_SEC;
ret = mlx90614_power_get(data, false);
if (ret < 0)
return ret;
mutex_lock(&data->lock);
- ret = mlx90614_write_word(data->client, MLX90614_EMISSIVITY,
- val);
+ ret = mlx90614_write_word(data->client,
+ chip_info->op_eeprom_emissivity, val);
mutex_unlock(&data->lock);
mlx90614_power_put(data);
@@ -377,11 +408,15 @@ static int mlx90614_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct mlx90614_data *data = iio_priv(indio_dev);
+ const struct mlx_chip_info *chip_info = data->chip_info;
+
switch (mask) {
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- *vals = (int *)mlx90614_freqs;
+ *vals = (int *)chip_info->iir_freqs;
*type = IIO_VAL_INT_PLUS_MICRO;
- *length = 2 * ARRAY_SIZE(mlx90614_freqs);
+ *length = 2 * (ARRAY_SIZE(chip_info->iir_freqs) -
+ chip_info->iir_valid_offset);
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@@ -435,6 +470,7 @@ static const struct iio_info mlx90614_info = {
#ifdef CONFIG_PM
static int mlx90614_sleep(struct mlx90614_data *data)
{
+ const struct mlx_chip_info *chip_info = data->chip_info;
s32 ret;
if (!data->wakeup_gpio) {
@@ -447,7 +483,7 @@ static int mlx90614_sleep(struct mlx90614_data *data)
mutex_lock(&data->lock);
ret = i2c_smbus_xfer(data->client->adapter, data->client->addr,
data->client->flags | I2C_CLIENT_PEC,
- I2C_SMBUS_WRITE, MLX90614_OP_SLEEP,
+ I2C_SMBUS_WRITE, chip_info->op_sleep,
I2C_SMBUS_BYTE, NULL);
mutex_unlock(&data->lock);
@@ -456,6 +492,8 @@ static int mlx90614_sleep(struct mlx90614_data *data)
static int mlx90614_wakeup(struct mlx90614_data *data)
{
+ const struct mlx_chip_info *chip_info = data->chip_info;
+
if (!data->wakeup_gpio) {
dev_dbg(&data->client->dev, "Wake-up disabled");
return -ENOSYS;
@@ -465,7 +503,7 @@ static int mlx90614_wakeup(struct mlx90614_data *data)
i2c_lock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
gpiod_direction_output(data->wakeup_gpio, 0);
- msleep(MLX90614_TIMING_WAKEUP);
+ msleep(chip_info->wakeup_delay_ms);
gpiod_direction_input(data->wakeup_gpio);
i2c_unlock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
@@ -478,7 +516,7 @@ static int mlx90614_wakeup(struct mlx90614_data *data)
* If the read fails, the controller will probably be reset so that
* further reads will work.
*/
- i2c_smbus_read_word_data(data->client, MLX90614_CONFIG);
+ i2c_smbus_read_word_data(data->client, chip_info->op_eeprom_config1);
return 0;
}
@@ -527,9 +565,15 @@ static inline struct gpio_desc *mlx90614_probe_wakeup(struct i2c_client *client)
/* Return 0 for single sensor, 1 for dual sensor, <0 on error. */
static int mlx90614_probe_num_ir_sensors(struct i2c_client *client)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mlx90614_data *data = iio_priv(indio_dev);
+ const struct mlx_chip_info *chip_info = data->chip_info;
s32 ret;
- ret = i2c_smbus_read_word_data(client, MLX90614_CONFIG);
+ if (chip_info->dual_channel)
+ return 0;
+
+ ret = i2c_smbus_read_word_data(client, chip_info->op_eeprom_config1);
if (ret < 0)
return ret;
@@ -556,6 +600,7 @@ static int mlx90614_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->lock);
data->wakeup_gpio = mlx90614_probe_wakeup(client);
+ data->chip_info = device_get_match_data(&client->dev);
mlx90614_wakeup(data);
@@ -605,14 +650,68 @@ static void mlx90614_remove(struct i2c_client *client)
}
}
+static const struct mlx_chip_info mlx90614_chip_info = {
+ .op_eeprom_emissivity = MLX90614_OP_EEPROM | 0x04,
+ .op_eeprom_config1 = MLX90614_OP_EEPROM | 0x05,
+ .op_ram_ta = MLX90614_OP_RAM | 0x06,
+ .op_ram_tobj1 = MLX90614_OP_RAM | 0x07,
+ .op_ram_tobj2 = MLX90614_OP_RAM | 0x08,
+ .op_sleep = MLX90614_OP_SLEEP,
+ .dual_channel = true,
+ .wakeup_delay_ms = MLX90614_TIMING_WAKEUP,
+ .emissivity_max = 65535,
+ .fir_config_mask = MLX90614_CONFIG_FIR_MASK,
+ .iir_config_mask = MLX90614_CONFIG_IIR_MASK,
+ .iir_valid_offset = 0,
+ .iir_values = { 77, 31, 20, 15, 723, 153, 110, 86 },
+ .iir_freqs = {
+ { 0, 150000 }, /* 13% ~= 0.15 Hz */
+ { 0, 200000 }, /* 17% ~= 0.20 Hz */
+ { 0, 310000 }, /* 25% ~= 0.31 Hz */
+ { 0, 770000 }, /* 50% ~= 0.77 Hz */
+ { 0, 860000 }, /* 57% ~= 0.86 Hz */
+ { 1, 100000 }, /* 67% ~= 1.10 Hz */
+ { 1, 530000 }, /* 80% ~= 1.53 Hz */
+ { 7, 230000 } /* 100% ~= 7.23 Hz */
+ },
+};
+
+static const struct mlx_chip_info mlx90615_chip_info = {
+ .op_eeprom_emissivity = MLX90615_OP_EEPROM | 0x03,
+ .op_eeprom_config1 = MLX90615_OP_EEPROM | 0x02,
+ .op_ram_ta = MLX90615_OP_RAM | 0x06,
+ .op_ram_tobj1 = MLX90615_OP_RAM | 0x07,
+ .op_ram_tobj2 = MLX90615_OP_RAM | 0x08,
+ .op_sleep = MLX90615_OP_SLEEP,
+ .dual_channel = false,
+ .wakeup_delay_ms = MLX90615_TIMING_WAKEUP,
+ .emissivity_max = 16383,
+ .fir_config_mask = 0, /* MLX90615 FIR is fixed */
+ .iir_config_mask = MLX90615_CONFIG_IIR_MASK,
+ /* IIR value 0 is FORBIDDEN COMBINATION on MLX90615 */
+ .iir_valid_offset = 1,
+ .iir_values = { 500, 50, 30, 20, 15, 13, 10 },
+ .iir_freqs = {
+ { 0, 100000 }, /* 14% ~= 0.10 Hz */
+ { 0, 130000 }, /* 17% ~= 0.13 Hz */
+ { 0, 150000 }, /* 20% ~= 0.15 Hz */
+ { 0, 200000 }, /* 25% ~= 0.20 Hz */
+ { 0, 300000 }, /* 33% ~= 0.30 Hz */
+ { 0, 500000 }, /* 50% ~= 0.50 Hz */
+ { 5, 000000 }, /* 100% ~= 5.00 Hz */
+ },
+};
+
static const struct i2c_device_id mlx90614_id[] = {
- { "mlx90614", 0 },
+ { "mlx90614", .driver_data = (kernel_ulong_t)&mlx90614_chip_info },
+ { "mlx90615", .driver_data = (kernel_ulong_t)&mlx90615_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, mlx90614_id);
static const struct of_device_id mlx90614_of_match[] = {
- { .compatible = "melexis,mlx90614" },
+ { .compatible = "melexis,mlx90614", .data = &mlx90614_chip_info },
+ { .compatible = "melexis,mlx90615", .data = &mlx90615_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, mlx90614_of_match);
@@ -675,7 +774,7 @@ static struct i2c_driver mlx90614_driver = {
.of_match_table = mlx90614_of_match,
.pm = pm_ptr(&mlx90614_pm_ops),
},
- .probe_new = mlx90614_probe,
+ .probe = mlx90614_probe,
.remove = mlx90614_remove,
.id_table = mlx90614_id,
};
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 753b7a4ccfdd..8a57be108620 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -1337,7 +1337,7 @@ static struct i2c_driver mlx90632_driver = {
.of_match_table = mlx90632_of_match,
.pm = pm_ptr(&mlx90632_pm_ops),
},
- .probe_new = mlx90632_probe,
+ .probe = mlx90632_probe,
.id_table = mlx90632_id,
};
module_i2c_driver(mlx90632_driver);
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index cdf08477e63f..3a3904fe138c 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -15,6 +15,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/pm.h>
#include <linux/bitops.h>
@@ -272,6 +273,12 @@ static int tmp006_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp006_pm_ops, tmp006_suspend, tmp006_resume);
+static const struct of_device_id tmp006_of_match[] = {
+ { .compatible = "ti,tmp006" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tmp006_of_match);
+
static const struct i2c_device_id tmp006_id[] = {
{ "tmp006", 0 },
{ }
@@ -281,9 +288,10 @@ MODULE_DEVICE_TABLE(i2c, tmp006_id);
static struct i2c_driver tmp006_driver = {
.driver = {
.name = "tmp006",
+ .of_match_table = tmp006_of_match,
.pm = pm_sleep_ptr(&tmp006_pm_ops),
},
- .probe_new = tmp006_probe,
+ .probe = tmp006_probe,
.id_table = tmp006_id,
};
module_i2c_driver(tmp006_driver);
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index 8d27aa3bdd6d..decef6896362 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -574,7 +574,7 @@ static struct i2c_driver tmp007_driver = {
.of_match_table = tmp007_of_match,
.pm = pm_sleep_ptr(&tmp007_pm_ops),
},
- .probe_new = tmp007_probe,
+ .probe = tmp007_probe,
.id_table = tmp007_id,
};
module_i2c_driver(tmp007_driver);
diff --git a/drivers/iio/temperature/tmp117.c b/drivers/iio/temperature/tmp117.c
index 638e3a5bd6b8..fc02f491688b 100644
--- a/drivers/iio/temperature/tmp117.c
+++ b/drivers/iio/temperature/tmp117.c
@@ -217,7 +217,7 @@ static struct i2c_driver tmp117_driver = {
.name = "tmp117",
.of_match_table = tmp117_of_match,
},
- .probe_new = tmp117_probe,
+ .probe = tmp117_probe,
.id_table = tmp117_id,
};
module_i2c_driver(tmp117_driver);
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 30b268ba82cc..53ef56fbfe1d 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -218,7 +218,7 @@ static const struct of_device_id tsys01_of_match[] = {
MODULE_DEVICE_TABLE(of, tsys01_of_match);
static struct i2c_driver tsys01_driver = {
- .probe_new = tsys01_i2c_probe,
+ .probe = tsys01_i2c_probe,
.id_table = tsys01_id,
.driver = {
.name = "tsys01",
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index cdefe046ab17..6191db92ef9a 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -174,7 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = {
MODULE_DEVICE_TABLE(i2c, tsys02d_id);
static struct i2c_driver tsys02d_driver = {
- .probe_new = tsys02d_probe,
+ .probe = tsys02d_probe,
.id_table = tsys02d_id,
.driver = {
.name = "tsys02d",
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index d637a89d4695..5faa8d2aecff 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -15,4 +15,10 @@ source "drivers/interconnect/imx/Kconfig"
source "drivers/interconnect/qcom/Kconfig"
source "drivers/interconnect/samsung/Kconfig"
+config INTERCONNECT_CLK
+ tristate
+ depends on COMMON_CLK
+ help
+ Support for wrapping clocks into the interconnect nodes.
+
endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 97d393fd638d..5604ce351a9f 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_INTERCONNECT) += icc-core.o
obj-$(CONFIG_INTERCONNECT_IMX) += imx/
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
obj-$(CONFIG_INTERCONNECT_SAMSUNG) += samsung/
+
+obj-$(CONFIG_INTERCONNECT_CLK) += icc-clk.o
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index ec46bcb16d5e..5fac448c28fd 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(icc_set_tag);
/**
* icc_get_name() - Get name of the icc path
- * @path: reference to the path returned by icc_get()
+ * @path: interconnect path
*
* This function is used by an interconnect consumer to get the name of the icc
* path.
@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(icc_get_name);
/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
- * @path: reference to the path returned by icc_get()
+ * @path: interconnect path
* @avg_bw: average bandwidth in kilobytes per second
* @peak_bw: peak bandwidth in kilobytes per second
*
@@ -705,54 +705,6 @@ int icc_disable(struct icc_path *path)
EXPORT_SYMBOL_GPL(icc_disable);
/**
- * icc_get() - return a handle for path between two endpoints
- * @dev: the device requesting the path
- * @src_id: source device port id
- * @dst_id: destination device port id
- *
- * This function will search for a path between two endpoints and return an
- * icc_path handle on success. Use icc_put() to release
- * constraints when they are not needed anymore.
- * If the interconnect API is disabled, NULL is returned and the consumer
- * drivers will still build. Drivers are free to handle this specifically,
- * but they don't have to.
- *
- * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
- * interconnect API is disabled.
- */
-struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
-{
- struct icc_node *src, *dst;
- struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
-
- mutex_lock(&icc_lock);
-
- src = node_find(src_id);
- if (!src)
- goto out;
-
- dst = node_find(dst_id);
- if (!dst)
- goto out;
-
- path = path_find(dev, src, dst);
- if (IS_ERR(path)) {
- dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
- goto out;
- }
-
- path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
- if (!path->name) {
- kfree(path);
- path = ERR_PTR(-ENOMEM);
- }
-out:
- mutex_unlock(&icc_lock);
- return path;
-}
-EXPORT_SYMBOL_GPL(icc_get);
-
-/**
* icc_put() - release the reference to the icc_path
* @path: interconnect path
*
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
new file mode 100644
index 000000000000..4d43ebff4257
--- /dev/null
+++ b/drivers/interconnect/icc-clk.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-clk.h>
+#include <linux/interconnect-provider.h>
+
+struct icc_clk_node {
+ struct clk *clk;
+ bool enabled;
+};
+
+struct icc_clk_provider {
+ struct icc_provider provider;
+ int num_clocks;
+ struct icc_clk_node clocks[];
+};
+
+#define to_icc_clk_provider(_provider) \
+ container_of(_provider, struct icc_clk_provider, provider)
+
+static int icc_clk_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct icc_clk_node *qn = src->data;
+ int ret;
+
+ if (!qn || !qn->clk)
+ return 0;
+
+ if (!src->peak_bw) {
+ if (qn->enabled)
+ clk_disable_unprepare(qn->clk);
+ qn->enabled = false;
+
+ return 0;
+ }
+
+ if (!qn->enabled) {
+ ret = clk_prepare_enable(qn->clk);
+ if (ret)
+ return ret;
+ qn->enabled = true;
+ }
+
+ return clk_set_rate(qn->clk, icc_units_to_bps(src->peak_bw));
+}
+
+static int icc_clk_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ struct icc_clk_node *qn = node->data;
+
+ if (!qn || !qn->clk)
+ *peak = INT_MAX;
+ else
+ *peak = Bps_to_icc(clk_get_rate(qn->clk));
+
+ return 0;
+}
+
+/**
+ * icc_clk_register() - register a new clk-based interconnect provider
+ * @dev: device supporting this provider
+ * @first_id: an ID of the first provider's node
+ * @num_clocks: number of instances of struct icc_clk_data
+ * @data: data for the provider
+ *
+ * Registers and returns a clk-based interconnect provider. It is a simple
+ * wrapper around COMMON_CLK framework, allowing other devices to vote on the
+ * clock rate.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data)
+{
+ struct icc_clk_provider *qp;
+ struct icc_provider *provider;
+ struct icc_onecell_data *onecell;
+ struct icc_node *node;
+ int ret, i, j;
+
+ onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL);
+ if (!onecell)
+ return ERR_PTR(-ENOMEM);
+
+ qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->num_clocks = num_clocks;
+
+ provider = &qp->provider;
+ provider->dev = dev;
+ provider->get_bw = icc_clk_get_bw;
+ provider->set = icc_clk_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = onecell;
+
+ icc_provider_init(provider);
+
+ for (i = 0, j = 0; i < num_clocks; i++) {
+ qp->clocks[i].clk = data[i].clk;
+
+ node = icc_node_create(first_id + j);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
+ node->data = &qp->clocks[i];
+ icc_node_add(node, provider);
+ /* link to the next node, slave */
+ icc_link_create(node, first_id + j + 1);
+ onecell->nodes[j++] = node;
+
+ node = icc_node_create(first_id + j);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
+ /* no data for slave node */
+ icc_node_add(node, provider);
+ onecell->nodes[j++] = node;
+ }
+
+ onecell->num_nodes = j;
+
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err;
+
+ return provider;
+
+err:
+ icc_nodes_remove(provider);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(icc_clk_register);
+
+/**
+ * icc_clk_unregister() - unregister a previously registered clk interconnect provider
+ * @provider: provider returned by icc_clk_register()
+ */
+void icc_clk_unregister(struct icc_provider *provider)
+{
+ struct icc_clk_provider *qp = container_of(provider, struct icc_clk_provider, provider);
+ int i;
+
+ icc_provider_deregister(&qp->provider);
+ icc_nodes_remove(&qp->provider);
+
+ for (i = 0; i < qp->num_clocks; i++) {
+ struct icc_clk_node *qn = &qp->clocks[i];
+
+ if (qn->enabled)
+ clk_disable_unprepare(qn->clk);
+ }
+}
+EXPORT_SYMBOL_GPL(icc_clk_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Interconnect wrapper for clocks");
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 5341fa169dbf..6acc7686ed38 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -50,7 +50,7 @@
#define NOC_QOS_MODE_FIXED_VAL 0x0
#define NOC_QOS_MODE_BYPASS_VAL 0x2
-static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
+static int qcom_icc_set_qnoc_qos(struct icc_node *src)
{
struct icc_provider *provider = src->provider;
struct qcom_icc_provider *qp = to_qcom_provider(provider);
@@ -95,7 +95,7 @@ static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
mask, val);
}
-static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
+static int qcom_icc_set_bimc_qos(struct icc_node *src)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
@@ -150,7 +150,7 @@ static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
}
-static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
+static int qcom_icc_set_noc_qos(struct icc_node *src)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
@@ -187,7 +187,7 @@ static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
NOC_QOS_MODEn_MASK, mode);
}
-static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
+static int qcom_icc_qos_set(struct icc_node *node)
{
struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
struct qcom_icc_node *qn = node->data;
@@ -196,38 +196,41 @@ static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
switch (qp->type) {
case QCOM_ICC_BIMC:
- return qcom_icc_set_bimc_qos(node, sum_bw);
+ return qcom_icc_set_bimc_qos(node);
case QCOM_ICC_QNOC:
- return qcom_icc_set_qnoc_qos(node, sum_bw);
+ return qcom_icc_set_qnoc_qos(node);
default:
- return qcom_icc_set_noc_qos(node, sum_bw);
+ return qcom_icc_set_noc_qos(node);
}
}
-static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
+static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 sum_bw)
{
int ret = 0;
- if (mas_rpm_id != -1) {
+ if (qn->qos.ap_owned)
+ return 0;
+
+ if (qn->mas_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_MASTER_REQ,
- mas_rpm_id,
+ qn->mas_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- mas_rpm_id, ret);
+ qn->mas_rpm_id, ret);
return ret;
}
}
- if (slv_rpm_id != -1) {
+ if (qn->slv_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
RPM_BUS_SLAVE_REQ,
- slv_rpm_id,
+ qn->slv_rpm_id,
sum_bw);
if (ret) {
pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
- slv_rpm_id, ret);
+ qn->slv_rpm_id, ret);
return ret;
}
}
@@ -235,26 +238,6 @@ static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
return ret;
}
-static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
- u64 sum_bw)
-{
- int ret;
-
- if (!qn->qos.ap_owned) {
- /* send bandwidth request message to the RPM processor */
- ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
- if (ret)
- return ret;
- } else if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) {
- /* set bandwidth directly from the AP */
- ret = qcom_icc_qos_set(n, sum_bw);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/**
* qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
* @node: icc node to operate on
@@ -370,16 +353,17 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
sum_bw = icc_units_to_bps(max_agg_avg);
- ret = __qcom_icc_set(src, src_qn, sum_bw);
+ ret = qcom_icc_rpm_set(src_qn, sum_bw);
if (ret)
return ret;
+
if (dst_qn) {
- ret = __qcom_icc_set(dst, dst_qn, sum_bw);
+ ret = qcom_icc_rpm_set(dst_qn, sum_bw);
if (ret)
return ret;
}
- for (i = 0; i < qp->num_clks; i++) {
+ for (i = 0; i < qp->num_bus_clks; i++) {
/*
* Use WAKE bucket for active clock, otherwise, use SLEEP bucket
* for other clocks. If a platform doesn't set interconnect
@@ -425,7 +409,7 @@ int qnoc_probe(struct platform_device *pdev)
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
- const char * const *cds;
+ const char * const *cds = NULL;
int cd_num;
int ret;
@@ -440,21 +424,20 @@ int qnoc_probe(struct platform_device *pdev)
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
- if (desc->num_clocks) {
- cds = desc->clocks;
- cd_num = desc->num_clocks;
+ if (desc->num_intf_clocks) {
+ cds = desc->intf_clocks;
+ cd_num = desc->num_intf_clocks;
} else {
- cds = bus_clocks;
- cd_num = ARRAY_SIZE(bus_clocks);
+ /* 0 intf clocks is perfectly fine */
+ cd_num = 0;
}
- qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
- qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
- GFP_KERNEL);
- if (!qp->bus_clk_rate)
+ qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL);
+ if (!qp->intf_clks)
return -ENOMEM;
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
@@ -462,9 +445,13 @@ int qnoc_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ qp->num_intf_clks = cd_num;
for (i = 0; i < cd_num; i++)
- qp->bus_clks[i].id = cds[i];
- qp->num_clks = cd_num;
+ qp->intf_clks[i].id = cds[i];
+
+ qp->num_bus_clks = desc->no_clk_scaling ? 0 : NUM_BUS_CLKS;
+ for (i = 0; i < qp->num_bus_clks; i++)
+ qp->bus_clks[i].id = bus_clocks[i];
qp->type = desc->type;
qp->qos_offset = desc->qos_offset;
@@ -494,11 +481,15 @@ int qnoc_probe(struct platform_device *pdev)
}
regmap_done:
- ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks);
+ ret = devm_clk_bulk_get(dev, qp->num_bus_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_bus_clks, qp->bus_clks);
if (ret)
return ret;
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
if (ret)
return ret;
@@ -512,6 +503,11 @@ regmap_done:
icc_provider_init(provider);
+ /* If this fails, bus accesses will crash the platform! */
+ ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
+ if (ret)
+ return ret;
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -528,10 +524,20 @@ regmap_done:
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
+ /* Set QoS registers (we only need to do it once, generally) */
+ if (qnodes[i]->qos.ap_owned &&
+ qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
+ ret = qcom_icc_qos_set(node);
+ if (ret)
+ return ret;
+ }
+
data->nodes[i] = node;
}
data->num_nodes = num_nodes;
+ clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
+
ret = icc_provider_register(provider);
if (ret)
goto err_remove_nodes;
@@ -551,7 +557,7 @@ err_deregister_provider:
icc_provider_deregister(provider);
err_remove_nodes:
icc_nodes_remove(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks);
return ret;
}
@@ -563,7 +569,7 @@ int qnoc_remove(struct platform_device *pdev)
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks);
return 0;
}
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index 22bdb1e4bb12..ee705edf19dd 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -20,24 +20,32 @@ enum qcom_icc_type {
QCOM_ICC_QNOC,
};
+#define NUM_BUS_CLKS 2
+
/**
* struct qcom_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
- * @num_clks: the total number of clk_bulk_data entries
+ * @num_bus_clks: the total number of bus_clks clk_bulk_data entries (0 or 2)
+ * @num_intf_clks: the total number of intf_clks clk_bulk_data entries
* @type: the ICC provider type
* @regmap: regmap for QoS registers read/write access
* @qos_offset: offset to QoS registers
* @bus_clk_rate: bus clock rate in Hz
* @bus_clks: the clk_bulk_data table of bus clocks
+ * @intf_clks: a clk_bulk_data array of interface clocks
+ * @is_on: whether the bus is powered on
*/
struct qcom_icc_provider {
struct icc_provider provider;
- int num_clks;
+ int num_bus_clks;
+ int num_intf_clks;
enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
- u64 *bus_clk_rate;
- struct clk_bulk_data bus_clks[];
+ u64 bus_clk_rate[NUM_BUS_CLKS];
+ struct clk_bulk_data bus_clks[NUM_BUS_CLKS];
+ struct clk_bulk_data *intf_clks;
+ bool is_on;
};
/**
@@ -91,8 +99,10 @@ struct qcom_icc_node {
struct qcom_icc_desc {
struct qcom_icc_node * const *nodes;
size_t num_nodes;
- const char * const *clocks;
- size_t num_clocks;
+ const char * const *bus_clocks;
+ const char * const *intf_clocks;
+ size_t num_intf_clocks;
+ bool no_clk_scaling;
enum qcom_icc_type type;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index 14efd2761b7a..20340fb62fe6 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -21,21 +21,17 @@
#include "smd-rpm.h"
#include "msm8996.h"
-static const char * const bus_mm_clocks[] = {
- "bus",
- "bus_a",
+static const char * const mm_intf_clocks[] = {
"iface"
};
-static const char * const bus_a0noc_clocks[] = {
+static const char * const a0noc_intf_clocks[] = {
"aggre0_snoc_axi",
"aggre0_cnoc_ahb",
"aggre0_noc_mpu_cfg"
};
-static const char * const bus_a2noc_clocks[] = {
- "bus",
- "bus_a",
+static const char * const a2noc_intf_clocks[] = {
"aggre2_ufs_axi",
"ufs_axi"
};
@@ -1821,8 +1817,9 @@ static const struct qcom_icc_desc msm8996_a0noc = {
.type = QCOM_ICC_NOC,
.nodes = a0noc_nodes,
.num_nodes = ARRAY_SIZE(a0noc_nodes),
- .clocks = bus_a0noc_clocks,
- .num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
+ .intf_clocks = a0noc_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(a0noc_intf_clocks),
+ .no_clk_scaling = true,
.regmap_cfg = &msm8996_a0noc_regmap_config
};
@@ -1865,8 +1862,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = a2noc_nodes,
.num_nodes = ARRAY_SIZE(a2noc_nodes),
- .clocks = bus_a2noc_clocks,
- .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
+ .intf_clocks = a2noc_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(a2noc_intf_clocks),
.regmap_cfg = &msm8996_a2noc_regmap_config
};
@@ -2004,8 +2001,8 @@ static const struct qcom_icc_desc msm8996_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = mnoc_nodes,
.num_nodes = ARRAY_SIZE(mnoc_nodes),
- .clocks = bus_mm_clocks,
- .num_clocks = ARRAY_SIZE(bus_mm_clocks),
+ .intf_clocks = mm_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
.regmap_cfg = &msm8996_mnoc_regmap_config
};
@@ -2111,7 +2108,17 @@ static struct platform_driver qnoc_driver = {
.sync_state = icc_sync_state,
}
};
-module_platform_driver(qnoc_driver);
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
MODULE_DESCRIPTION("Qualcomm MSM8996 NoC driver");
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 8d879b0bcabc..7ffaf70d62d3 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -127,15 +127,11 @@ enum {
SDM660_SNOC,
};
-static const char * const bus_mm_clocks[] = {
- "bus",
- "bus_a",
+static const char * const mm_intf_clocks[] = {
"iface",
};
-static const char * const bus_a2noc_clocks[] = {
- "bus",
- "bus_a",
+static const char * const a2noc_intf_clocks[] = {
"ipa",
"ufs_axi",
"aggre2_ufs_axi",
@@ -1516,8 +1512,8 @@ static const struct qcom_icc_desc sdm660_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
- .clocks = bus_a2noc_clocks,
- .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
+ .intf_clocks = a2noc_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(a2noc_intf_clocks),
.regmap_cfg = &sdm660_a2noc_regmap_config,
};
@@ -1620,6 +1616,7 @@ static const struct qcom_icc_desc sdm660_gnoc = {
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
+ .no_clk_scaling = true,
};
static struct qcom_icc_node * const sdm660_mnoc_nodes[] = {
@@ -1659,8 +1656,8 @@ static const struct qcom_icc_desc sdm660_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
- .clocks = bus_mm_clocks,
- .num_clocks = ARRAY_SIZE(bus_mm_clocks),
+ .intf_clocks = mm_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
.regmap_cfg = &sdm660_mnoc_regmap_config,
};
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 2690e2c5a158..6fd1b3f84a29 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -6,7 +6,6 @@
menuconfig ISDN
bool "ISDN support"
depends on NET && NETDEVICES
- depends on !S390 && !UML
help
ISDN ("Integrated Services Digital Network", called RNIS in France)
is a fully digital telephone service that can be used for voice and
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 078eeadf707a..a35bff8a93f5 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -14,7 +14,7 @@ config MISDN_HFCPCI
config MISDN_HFCMULTI
tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
- depends on PCI || CPM1
+ depends on (PCI || CPM1) && HAS_IOPORT
depends on MISDN
help
Enable support for cards with Cologne Chip AG's HFC multiport
@@ -43,7 +43,7 @@ config MISDN_HFCUSB
config MISDN_AVMFRITZ
tristate "Support for AVM FRITZ!CARD PCI"
depends on MISDN
- depends on PCI
+ depends on PCI && HAS_IOPORT
select MISDN_IPAC
help
Enable support for AVMs FRITZ!CARD PCI cards
@@ -51,7 +51,7 @@ config MISDN_AVMFRITZ
config MISDN_SPEEDFAX
tristate "Support for Sedlbauer Speedfax+"
depends on MISDN
- depends on PCI
+ depends on PCI && HAS_IOPORT
select MISDN_IPAC
select MISDN_ISAR
help
@@ -60,7 +60,7 @@ config MISDN_SPEEDFAX
config MISDN_INFINEON
tristate "Support for cards with Infineon chipset"
depends on MISDN
- depends on PCI
+ depends on PCI && HAS_IOPORT
select MISDN_IPAC
help
Enable support for cards with ISAC + HSCX, IPAC or IPAC-SX
@@ -69,14 +69,14 @@ config MISDN_INFINEON
config MISDN_W6692
tristate "Support for cards with Winbond 6692"
depends on MISDN
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
Enable support for Winbond 6692 PCI chip based cards.
config MISDN_NETJET
tristate "Support for NETJet cards"
depends on MISDN
- depends on PCI
+ depends on PCI && HAS_IOPORT
depends on TTY
select MISDN_IPAC
select MISDN_HDLC
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2c5fdf848210..6046dfeca16f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -94,6 +94,19 @@ config LEDS_ARIEL
Say Y to if your machine is a Dell Wyse 3020 thin client.
+config LEDS_AW200XX
+ tristate "LED support for Awinic AW20036/AW20054/AW20072"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for the AW20036/AW20054/AW20072 LED driver.
+ It is a 3x12/6x9/6x12 matrix LED driver programmed via
+ an I2C interface, up to 36/54/72 LEDs or 12/18/24 RGBs,
+ 3 pattern controllers for auto breathing or group dimming control.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-aw200xx.
+
config LEDS_AW2013
tristate "LED support for Awinic AW2013"
depends on LEDS_CLASS && I2C && OF
@@ -122,6 +135,17 @@ config LEDS_BCM6358
This option enables support for LEDs connected to the BCM6358
LED HW controller accessed via MMIO registers.
+config LEDS_CHT_WCOVE
+ tristate "LED support for Intel Cherry Trail Whiskey Cove PMIC"
+ depends on LEDS_CLASS
+ depends on INTEL_SOC_PMIC_CHTWC
+ help
+ This option enables support for charger and general purpose LEDs
+ connected to the Intel Cherrytrail Whiskey Cove PMIC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-cht-wcove.
+
config LEDS_CPCAP
tristate "LED Support for Motorola CPCAP"
depends on LEDS_CLASS
@@ -676,7 +700,7 @@ config LEDS_LM355x
config LEDS_OT200
tristate "LED support for the Bachmann OT200"
- depends on LEDS_CLASS && HAS_IOMEM && (X86_32 || COMPILE_TEST)
+ depends on LEDS_CLASS && HAS_IOPORT && (X86_32 || COMPILE_TEST)
help
This option enables support for the LEDs on the Bachmann OT200.
Say Y to enable LEDs on the Bachmann OT200.
@@ -807,8 +831,7 @@ config LEDS_SPI_BYTE
supported: Ubiquiti airCube ISP microcontroller based LED controller.
config LEDS_TI_LMU_COMMON
- tristate "LED driver for TI LMU"
- depends on LEDS_CLASS
+ tristate "LED driver for TI LMU" if COMPILE_TEST
select REGMAP
help
Say Y to enable the LED driver for TI LMU devices.
@@ -817,16 +840,16 @@ config LEDS_TI_LMU_COMMON
config LEDS_LM3697
tristate "LED driver for LM3697"
- depends on LEDS_TI_LMU_COMMON
- depends on I2C && OF
+ depends on LEDS_CLASS && I2C && OF
+ select LEDS_TI_LMU_COMMON
help
Say Y to enable the LM3697 LED driver for TI LMU devices.
This supports the LED device LM3697.
config LEDS_LM36274
tristate "LED driver for LM36274"
- depends on LEDS_TI_LMU_COMMON
- depends on MFD_TI_LMU
+ depends on LEDS_CLASS && MFD_TI_LMU
+ select LEDS_TI_LMU_COMMON
help
Say Y to enable the LM36274 LED driver for TI LMU devices.
This supports the LED device LM36274.
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index c07d1512c745..d71f1226540c 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -14,12 +14,14 @@ obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_ARIEL) += leds-ariel.o
+obj-$(CONFIG_LEDS_AW200XX) += leds-aw200xx.o
obj-$(CONFIG_LEDS_AW2013) += leds-aw2013.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2606MVV) += leds-bd2606mvv.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_CHT_WCOVE) += leds-cht-wcove.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
diff --git a/drivers/leds/flash/leds-aat1290.c b/drivers/leds/flash/leds-aat1290.c
index 589484b22c79..f12ecb2c6580 100644
--- a/drivers/leds/flash/leds-aat1290.c
+++ b/drivers/leds/flash/leds-aat1290.c
@@ -425,7 +425,7 @@ static void aat1290_init_v4l2_flash_config(struct aat1290_led *led,
struct led_classdev *led_cdev = &led->fled_cdev.led_cdev;
struct led_flash_setting *s;
- strlcpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
+ strscpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
sizeof(v4l2_sd_cfg->dev_name));
s = &v4l2_sd_cfg->intensity;
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index bb2249771acb..12c2609c1137 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -651,8 +651,8 @@ static int as3645a_v4l2_setup(struct as3645a *flash)
},
};
- strlcpy(cfg.dev_name, led->dev->kobj.name, sizeof(cfg.dev_name));
- strlcpy(cfgind.dev_name, flash->iled_cdev.dev->kobj.name,
+ strscpy(cfg.dev_name, led->dev->kobj.name, sizeof(cfg.dev_name));
+ strscpy(cfgind.dev_name, flash->iled_cdev.dev->kobj.name,
sizeof(cfgind.dev_name));
flash->vf = v4l2_flash_init(
@@ -759,7 +759,7 @@ static struct i2c_driver as3645a_i2c_driver = {
.of_match_table = as3645a_of_table,
.name = AS_NAME,
},
- .probe_new = as3645a_probe,
+ .probe = as3645a_probe,
.remove = as3645a_remove,
.id_table = as3645a_id_table,
};
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index 78730e066a73..b6c524facf49 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -471,7 +471,7 @@ static struct i2c_driver lm3601x_i2c_driver = {
.name = "lm3601x",
.of_match_table = of_lm3601x_leds_match,
},
- .probe_new = lm3601x_probe,
+ .probe = lm3601x_probe,
.remove = lm3601x_remove,
.id_table = lm3601x_id,
};
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index 90a24fa25a49..b089ca1a1901 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -18,7 +18,8 @@
#define FLASH_TYPE_VAL 0x18
#define FLASH_SUBTYPE_REG 0x05
-#define FLASH_SUBTYPE_3CH_VAL 0x04
+#define FLASH_SUBTYPE_3CH_PM8150_VAL 0x04
+#define FLASH_SUBTYPE_3CH_PMI8998_VAL 0x03
#define FLASH_SUBTYPE_4CH_VAL 0x07
#define FLASH_STS_3CH_OTST1 BIT(0)
@@ -416,6 +417,14 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev,
bool enable = !!brightness;
int rc;
+ rc = set_flash_strobe(led, SW_STROBE, false);
+ if (rc)
+ return rc;
+
+ rc = set_flash_module_en(led, false);
+ if (rc)
+ return rc;
+
rc = set_flash_current(led, current_ma, TORCH_MODE);
if (rc)
return rc;
@@ -529,9 +538,9 @@ static int qcom_flash_register_led_device(struct device *dev,
struct led_init_data init_data;
struct led_classdev_flash *flash = &led->flash;
struct led_flash_setting *brightness, *timeout;
- u32 count, current_ua, timeout_us;
+ u32 current_ua, timeout_us;
u32 channels[4];
- int i, rc;
+ int i, rc, count;
count = fwnode_property_count_u32(node, "led-sources");
if (count <= 0) {
@@ -682,7 +691,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
return rc;
}
- if (val == FLASH_SUBTYPE_3CH_VAL) {
+ if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
flash_data->hw_type = QCOM_MVFLASH_3CH;
flash_data->max_channels = 3;
regs = mvflash_3ch_regs;
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
index e404fe8b0314..1ae5b387f4a5 100644
--- a/drivers/leds/flash/leds-rt4505.c
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -419,7 +419,7 @@ static struct i2c_driver rt4505_driver = {
.name = "rt4505",
.of_match_table = of_match_ptr(rt4505_leds_match),
},
- .probe_new = rt4505_probe,
+ .probe = rt4505_probe,
.remove = rt4505_remove,
.shutdown = rt4505_shutdown,
};
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index d3a30ad94ac4..d3f50dca5136 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -291,6 +291,7 @@ static int sgm3140_remove(struct platform_device *pdev)
static const struct of_device_id sgm3140_dt_match[] = {
{ .compatible = "ocs,ocp8110" },
+ { .compatible = "richtek,rt5033-led" },
{ .compatible = "sgmicro,sgm3140" },
{ /* sentinel */ }
};
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 9255bc11f99d..6dae56b914fe 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -409,7 +409,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
int ret = 0;
struct device *dev;
- strlcpy(name, init_name, len);
+ strscpy(name, init_name, len);
while ((ret < len) &&
(dev = class_find_device_by_name(leds_class, name))) {
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 4a97cb745788..b9b1295833c9 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -114,21 +114,14 @@ static void led_timer_function(struct timer_list *t)
mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
}
-static void set_brightness_delayed(struct work_struct *ws)
+static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev,
+ unsigned int value)
{
- struct led_classdev *led_cdev =
- container_of(ws, struct led_classdev, set_brightness_work);
int ret = 0;
- if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) {
- led_cdev->delayed_set_value = LED_OFF;
- led_stop_software_blink(led_cdev);
- }
-
- ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+ ret = __led_set_brightness(led_cdev, value);
if (ret == -ENOTSUPP)
- ret = __led_set_brightness_blocking(led_cdev,
- led_cdev->delayed_set_value);
+ ret = __led_set_brightness_blocking(led_cdev, value);
if (ret < 0 &&
/* LED HW might have been unplugged, therefore don't warn */
!(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
@@ -137,6 +130,37 @@ static void set_brightness_delayed(struct work_struct *ws)
"Setting an LED's brightness failed (%d)\n", ret);
}
+static void set_brightness_delayed(struct work_struct *ws)
+{
+ struct led_classdev *led_cdev =
+ container_of(ws, struct led_classdev, set_brightness_work);
+
+ if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) {
+ led_stop_software_blink(led_cdev);
+ set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
+ }
+
+ /*
+ * Triggers may call led_set_brightness(LED_OFF),
+ * led_set_brightness(LED_FULL) in quick succession to disable blinking
+ * and turn the LED on. Both actions may have been scheduled to run
+ * before this work item runs once. To make sure this works properly
+ * handle LED_SET_BRIGHTNESS_OFF first.
+ */
+ if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags))
+ set_brightness_delayed_set_brightness(led_cdev, LED_OFF);
+
+ if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags))
+ set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value);
+
+ if (test_and_clear_bit(LED_SET_BLINK, &led_cdev->work_flags)) {
+ unsigned long delay_on = led_cdev->delayed_delay_on;
+ unsigned long delay_off = led_cdev->delayed_delay_off;
+
+ led_blink_set(led_cdev, &delay_on, &delay_off);
+ }
+}
+
static void led_set_software_blink(struct led_classdev *led_cdev,
unsigned long delay_on,
unsigned long delay_off)
@@ -229,6 +253,22 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
}
EXPORT_SYMBOL_GPL(led_blink_set_oneshot);
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off)
+{
+ /* If necessary delegate to a work queue task. */
+ if (led_cdev->blink_set && led_cdev->brightness_set_blocking) {
+ led_cdev->delayed_delay_on = delay_on;
+ led_cdev->delayed_delay_off = delay_off;
+ set_bit(LED_SET_BLINK, &led_cdev->work_flags);
+ schedule_work(&led_cdev->set_brightness_work);
+ return;
+ }
+
+ led_blink_set(led_cdev, &delay_on, &delay_off);
+}
+EXPORT_SYMBOL_GPL(led_blink_set_nosleep);
+
void led_stop_software_blink(struct led_classdev *led_cdev)
{
del_timer_sync(&led_cdev->blink_timer);
@@ -271,8 +311,23 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value)
if (!__led_set_brightness(led_cdev, value))
return;
- /* If brightness setting can sleep, delegate it to a work queue task */
- led_cdev->delayed_set_value = value;
+ /*
+ * Brightness setting can sleep, delegate it to a work queue task.
+ * value 0 / LED_OFF is special, since it also disables hw-blinking
+ * (sw-blink disable is handled in led_set_brightness()).
+ * To avoid a hw-blink-disable getting lost when a second brightness
+ * change is done immediately afterwards (before the work runs),
+ * it uses a separate work_flag.
+ */
+ if (value) {
+ led_cdev->delayed_set_value = value;
+ set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
+ } else {
+ clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
+ clear_bit(LED_SET_BLINK, &led_cdev->work_flags);
+ set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
+ }
+
schedule_work(&led_cdev->set_brightness_work);
}
EXPORT_SYMBOL_GPL(led_set_brightness_nopm);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 072491d3e17b..6a5e1f41f9a4 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -185,6 +185,7 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_cdev->activated = false;
+ led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
@@ -393,8 +394,8 @@ void led_trigger_event(struct led_trigger *trig,
EXPORT_SYMBOL_GPL(led_trigger_event);
static void led_trigger_blink_setup(struct led_trigger *trig,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int oneshot,
int invert)
{
@@ -406,25 +407,25 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
rcu_read_lock();
list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
- led_blink_set_oneshot(led_cdev, delay_on, delay_off,
+ led_blink_set_oneshot(led_cdev, &delay_on, &delay_off,
invert);
else
- led_blink_set(led_cdev, delay_on, delay_off);
+ led_blink_set_nosleep(led_cdev, delay_on, delay_off);
}
rcu_read_unlock();
}
void led_trigger_blink(struct led_trigger *trig,
- unsigned long *delay_on,
- unsigned long *delay_off)
+ unsigned long delay_on,
+ unsigned long delay_off)
{
led_trigger_blink_setup(trig, delay_on, delay_off, 0, 0);
}
EXPORT_SYMBOL_GPL(led_trigger_blink);
void led_trigger_blink_oneshot(struct led_trigger *trig,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert)
{
led_trigger_blink_setup(trig, delay_on, delay_off, 1, invert);
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 89df267853a9..24b1041213c2 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -346,7 +346,7 @@ static struct i2c_driver an30259a_driver = {
.name = "leds-an30259a",
.of_match_table = of_match_ptr(an30259a_match_table),
},
- .probe_new = an30259a_probe,
+ .probe = an30259a_probe,
.remove = an30259a_remove,
.id_table = an30259a_id,
};
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
new file mode 100644
index 000000000000..96979b8e09b7
--- /dev/null
+++ b/drivers/leds/leds-aw200xx.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Awinic AW20036/AW20054/AW20072 LED driver
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@sberdevices.ru>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#define AW200XX_DIM_MAX (BIT(6) - 1)
+#define AW200XX_FADE_MAX (BIT(8) - 1)
+#define AW200XX_IMAX_DEFAULT_uA 60000
+#define AW200XX_IMAX_MAX_uA 160000
+#define AW200XX_IMAX_MIN_uA 3300
+
+/* Page 0 */
+#define AW200XX_REG_PAGE0_BASE 0xc000
+
+/* Select page register */
+#define AW200XX_REG_PAGE 0xF0
+#define AW200XX_PAGE_MASK (GENMASK(7, 6) | GENMASK(2, 0))
+#define AW200XX_PAGE_SHIFT 0
+#define AW200XX_NUM_PAGES 6
+#define AW200XX_PAGE_SIZE 256
+#define AW200XX_REG(page, reg) \
+ (AW200XX_REG_PAGE0_BASE + (page) * AW200XX_PAGE_SIZE + (reg))
+#define AW200XX_REG_MAX \
+ AW200XX_REG(AW200XX_NUM_PAGES - 1, AW200XX_PAGE_SIZE - 1)
+#define AW200XX_PAGE0 0
+#define AW200XX_PAGE1 1
+#define AW200XX_PAGE2 2
+#define AW200XX_PAGE3 3
+#define AW200XX_PAGE4 4
+#define AW200XX_PAGE5 5
+
+/* Chip ID register */
+#define AW200XX_REG_IDR AW200XX_REG(AW200XX_PAGE0, 0x00)
+#define AW200XX_IDR_CHIPID 0x18
+
+/* Sleep mode register */
+#define AW200XX_REG_SLPCR AW200XX_REG(AW200XX_PAGE0, 0x01)
+#define AW200XX_SLPCR_ACTIVE 0x00
+
+/* Reset register */
+#define AW200XX_REG_RSTR AW200XX_REG(AW200XX_PAGE0, 0x02)
+#define AW200XX_RSTR_RESET 0x01
+
+/* Global current configuration register */
+#define AW200XX_REG_GCCR AW200XX_REG(AW200XX_PAGE0, 0x03)
+#define AW200XX_GCCR_IMAX_MASK GENMASK(7, 4)
+#define AW200XX_GCCR_IMAX(x) ((x) << 4)
+#define AW200XX_GCCR_ALLON BIT(3)
+
+/* Fast clear display control register */
+#define AW200XX_REG_FCD AW200XX_REG(AW200XX_PAGE0, 0x04)
+#define AW200XX_FCD_CLEAR 0x01
+
+/* Display size configuration */
+#define AW200XX_REG_DSIZE AW200XX_REG(AW200XX_PAGE0, 0x80)
+#define AW200XX_DSIZE_COLUMNS_MAX 12
+
+#define AW200XX_LED2REG(x, columns) \
+ ((x) + (((x) / (columns)) * (AW200XX_DSIZE_COLUMNS_MAX - (columns))))
+
+/*
+ * DIM current configuration register (page 4).
+ * The even address for current DIM configuration.
+ * The odd address for current FADE configuration
+ */
+#define AW200XX_REG_DIM(x, columns) \
+ AW200XX_REG(AW200XX_PAGE4, AW200XX_LED2REG(x, columns) * 2)
+#define AW200XX_REG_DIM2FADE(x) ((x) + 1)
+
+/*
+ * Duty ratio of display scan (see p.15 of datasheet for formula):
+ * duty = (592us / 600.5us) * (1 / (display_rows + 1))
+ *
+ * Multiply to 1000 (MILLI) to improve the accuracy of calculations.
+ */
+#define AW200XX_DUTY_RATIO(rows) \
+ (((592UL * USEC_PER_SEC) / 600500UL) * (MILLI / (rows)) / MILLI)
+
+struct aw200xx_chipdef {
+ u32 channels;
+ u32 display_size_rows_max;
+ u32 display_size_columns;
+};
+
+struct aw200xx_led {
+ struct led_classdev cdev;
+ struct aw200xx *chip;
+ int dim;
+ u32 num;
+};
+
+struct aw200xx {
+ const struct aw200xx_chipdef *cdef;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct mutex mutex;
+ u32 num_leds;
+ u32 display_rows;
+ struct aw200xx_led leds[];
+};
+
+static ssize_t dim_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct aw200xx_led *led = container_of(cdev, struct aw200xx_led, cdev);
+ int dim = led->dim;
+
+ if (dim < 0)
+ return sysfs_emit(buf, "auto\n");
+
+ return sysfs_emit(buf, "%d\n", dim);
+}
+
+static ssize_t dim_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct aw200xx_led *led = container_of(cdev, struct aw200xx_led, cdev);
+ struct aw200xx *chip = led->chip;
+ u32 columns = chip->cdef->display_size_columns;
+ int dim;
+ ssize_t ret;
+
+ if (sysfs_streq(buf, "auto")) {
+ dim = -1;
+ } else {
+ ret = kstrtoint(buf, 0, &dim);
+ if (ret)
+ return ret;
+
+ if (dim > AW200XX_DIM_MAX)
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->mutex);
+
+ if (dim >= 0) {
+ ret = regmap_write(chip->regmap,
+ AW200XX_REG_DIM(led->num, columns), dim);
+ if (ret)
+ goto out_unlock;
+ }
+
+ led->dim = dim;
+ ret = count;
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+static DEVICE_ATTR_RW(dim);
+
+static struct attribute *dim_attrs[] = {
+ &dev_attr_dim.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dim);
+
+static int aw200xx_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct aw200xx_led *led = container_of(cdev, struct aw200xx_led, cdev);
+ struct aw200xx *chip = led->chip;
+ int dim;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ reg = AW200XX_REG_DIM(led->num, chip->cdef->display_size_columns);
+
+ dim = led->dim;
+ if (dim < 0)
+ dim = max_t(int,
+ brightness / (AW200XX_FADE_MAX / AW200XX_DIM_MAX),
+ 1);
+
+ ret = regmap_write(chip->regmap, reg, dim);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(chip->regmap,
+ AW200XX_REG_DIM2FADE(reg), brightness);
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static u32 aw200xx_imax_from_global(const struct aw200xx *const chip,
+ u32 global_imax_uA)
+{
+ u64 led_imax_uA;
+
+ /*
+ * The output current of each LED (see p.14 of datasheet for formula):
+ * Iled = Imax * (dim / 63) * ((fade + 1) / 256) * duty
+ *
+ * The value of duty is determined by the following formula:
+ * duty = (592us / 600.5us) * (1 / (display_rows + 1))
+ *
+ * Calculated for the maximum values of fade and dim.
+ * We divide by 1000 because we earlier multiplied by 1000 to improve
+ * accuracy when calculating the duty.
+ */
+ led_imax_uA = global_imax_uA * AW200XX_DUTY_RATIO(chip->display_rows);
+ do_div(led_imax_uA, MILLI);
+
+ return led_imax_uA;
+}
+
+static u32 aw200xx_imax_to_global(const struct aw200xx *const chip,
+ u32 led_imax_uA)
+{
+ u32 duty = AW200XX_DUTY_RATIO(chip->display_rows);
+
+ /* The output current of each LED (see p.14 of datasheet for formula) */
+ return (led_imax_uA * 1000U) / duty;
+}
+
+#define AW200XX_IMAX_MULTIPLIER1 10000
+#define AW200XX_IMAX_MULTIPLIER2 3333
+#define AW200XX_IMAX_BASE_VAL1 0
+#define AW200XX_IMAX_BASE_VAL2 8
+
+/*
+ * The AW200XX has a 4-bit register (GCCR) to configure the global current,
+ * which ranges from 3.3mA to 160mA. The following table indicates the values
+ * of the global current, divided into two parts:
+ *
+ * +-----------+-----------------+-----------+-----------------+
+ * | reg value | global max (mA) | reg value | global max (mA) |
+ * +-----------+-----------------+-----------+-----------------+
+ * | 0 | 10 | 8 | 3.3 |
+ * | 1 | 20 | 9 | 6.7 |
+ * | 2 | 30 | 10 | 10 |
+ * | 3 | 40 | 11 | 13.3 |
+ * | 4 | 60 | 12 | 20 |
+ * | 5 | 80 | 13 | 26.7 |
+ * | 6 | 120 | 14 | 40 |
+ * | 7 | 160 | 15 | 53.3 |
+ * +-----------+-----------------+-----------+-----------------+
+ *
+ * The left part with a multiplier of 10, and the right part with a multiplier
+ * of 3.3.
+ * So we have two formulas to calculate the global current:
+ * for the left part of the table:
+ * imax = coefficient * 10
+ *
+ * for the right part of the table:
+ * imax = coefficient * 3.3
+ *
+ * The coefficient table consists of the following values:
+ * 1, 2, 3, 4, 6, 8, 12, 16.
+ */
+static int aw200xx_set_imax(const struct aw200xx *const chip,
+ u32 led_imax_uA)
+{
+ u32 g_imax_uA = aw200xx_imax_to_global(chip, led_imax_uA);
+ u32 coeff_table[] = {1, 2, 3, 4, 6, 8, 12, 16};
+ u32 gccr_imax = UINT_MAX;
+ u32 cur_imax = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coeff_table); i++) {
+ u32 imax;
+
+ /* select closest ones */
+ imax = coeff_table[i] * AW200XX_IMAX_MULTIPLIER1;
+ if (g_imax_uA >= imax && imax > cur_imax) {
+ cur_imax = imax;
+ gccr_imax = i + AW200XX_IMAX_BASE_VAL1;
+ }
+
+ imax = coeff_table[i] * AW200XX_IMAX_MULTIPLIER2;
+ imax = DIV_ROUND_CLOSEST(imax, 100) * 100;
+ if (g_imax_uA >= imax && imax > cur_imax) {
+ cur_imax = imax;
+ gccr_imax = i + AW200XX_IMAX_BASE_VAL2;
+ }
+ }
+
+ if (gccr_imax == UINT_MAX)
+ return -EINVAL;
+
+ return regmap_update_bits(chip->regmap, AW200XX_REG_GCCR,
+ AW200XX_GCCR_IMAX_MASK,
+ AW200XX_GCCR_IMAX(gccr_imax));
+}
+
+static int aw200xx_chip_reset(const struct aw200xx *const chip)
+{
+ int ret;
+
+ ret = regmap_write(chip->regmap, AW200XX_REG_RSTR, AW200XX_RSTR_RESET);
+ if (ret)
+ return ret;
+
+ regcache_mark_dirty(chip->regmap);
+ return regmap_write(chip->regmap, AW200XX_REG_FCD, AW200XX_FCD_CLEAR);
+}
+
+static int aw200xx_chip_init(const struct aw200xx *const chip)
+{
+ int ret;
+
+ ret = regmap_write(chip->regmap, AW200XX_REG_DSIZE,
+ chip->display_rows - 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(chip->regmap, AW200XX_REG_SLPCR,
+ AW200XX_SLPCR_ACTIVE);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(chip->regmap, AW200XX_REG_GCCR,
+ AW200XX_GCCR_ALLON, AW200XX_GCCR_ALLON);
+}
+
+static int aw200xx_chip_check(const struct aw200xx *const chip)
+{
+ struct device *dev = &chip->client->dev;
+ u32 chipid;
+ int ret;
+
+ ret = regmap_read(chip->regmap, AW200XX_REG_IDR, &chipid);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read chip ID\n");
+
+ if (chipid != AW200XX_IDR_CHIPID)
+ return dev_err_probe(dev, -ENODEV,
+ "Chip reported wrong ID: %x\n", chipid);
+
+ return 0;
+}
+
+static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
+{
+ struct fwnode_handle *child;
+ u32 current_min, current_max, min_uA;
+ int ret;
+ int i;
+
+ ret = device_property_read_u32(dev, "awinic,display-rows",
+ &chip->display_rows);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read 'display-rows' property\n");
+
+ if (!chip->display_rows ||
+ chip->display_rows > chip->cdef->display_size_rows_max) {
+ return dev_err_probe(dev, ret,
+ "Invalid leds display size %u\n",
+ chip->display_rows);
+ }
+
+ current_max = aw200xx_imax_from_global(chip, AW200XX_IMAX_MAX_uA);
+ current_min = aw200xx_imax_from_global(chip, AW200XX_IMAX_MIN_uA);
+ min_uA = UINT_MAX;
+ i = 0;
+
+ device_for_each_child_node(dev, child) {
+ struct led_init_data init_data = {};
+ struct aw200xx_led *led;
+ u32 source, imax;
+
+ ret = fwnode_property_read_u32(child, "reg", &source);
+ if (ret) {
+ dev_err(dev, "Missing reg property\n");
+ chip->num_leds--;
+ continue;
+ }
+
+ if (source >= chip->cdef->channels) {
+ dev_err(dev, "LED reg %u out of range (max %u)\n",
+ source, chip->cdef->channels);
+ chip->num_leds--;
+ continue;
+ }
+
+ ret = fwnode_property_read_u32(child, "led-max-microamp",
+ &imax);
+ if (ret) {
+ dev_info(&chip->client->dev,
+ "DT property led-max-microamp is missing\n");
+ } else if (imax < current_min || imax > current_max) {
+ dev_err(dev, "Invalid value %u for led-max-microamp\n",
+ imax);
+ chip->num_leds--;
+ continue;
+ } else {
+ min_uA = min(min_uA, imax);
+ }
+
+ led = &chip->leds[i];
+ led->dim = -1;
+ led->num = source;
+ led->chip = chip;
+ led->cdev.brightness_set_blocking = aw200xx_brightness_set;
+ led->cdev.groups = dim_groups;
+ init_data.fwnode = child;
+
+ ret = devm_led_classdev_register_ext(dev, &led->cdev,
+ &init_data);
+ if (ret) {
+ fwnode_handle_put(child);
+ break;
+ }
+
+ i++;
+ }
+
+ if (!chip->num_leds)
+ return -EINVAL;
+
+ if (min_uA == UINT_MAX) {
+ min_uA = aw200xx_imax_from_global(chip,
+ AW200XX_IMAX_DEFAULT_uA);
+ }
+
+ return aw200xx_set_imax(chip, min_uA);
+}
+
+static const struct regmap_range_cfg aw200xx_ranges[] = {
+ {
+ .name = "aw200xx",
+ .range_min = 0,
+ .range_max = AW200XX_REG_MAX,
+ .selector_reg = AW200XX_REG_PAGE,
+ .selector_mask = AW200XX_PAGE_MASK,
+ .selector_shift = AW200XX_PAGE_SHIFT,
+ .window_start = 0,
+ .window_len = AW200XX_PAGE_SIZE,
+ },
+};
+
+static const struct regmap_range aw200xx_writeonly_ranges[] = {
+ regmap_reg_range(AW200XX_REG(AW200XX_PAGE1, 0x00), AW200XX_REG_MAX),
+};
+
+static const struct regmap_access_table aw200xx_readable_table = {
+ .no_ranges = aw200xx_writeonly_ranges,
+ .n_no_ranges = ARRAY_SIZE(aw200xx_writeonly_ranges),
+};
+
+static const struct regmap_range aw200xx_readonly_ranges[] = {
+ regmap_reg_range(AW200XX_REG_IDR, AW200XX_REG_IDR),
+};
+
+static const struct regmap_access_table aw200xx_writeable_table = {
+ .no_ranges = aw200xx_readonly_ranges,
+ .n_no_ranges = ARRAY_SIZE(aw200xx_readonly_ranges),
+};
+
+static const struct regmap_config aw200xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AW200XX_REG_MAX,
+ .ranges = aw200xx_ranges,
+ .num_ranges = ARRAY_SIZE(aw200xx_ranges),
+ .rd_table = &aw200xx_readable_table,
+ .wr_table = &aw200xx_writeable_table,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int aw200xx_probe(struct i2c_client *client)
+{
+ const struct aw200xx_chipdef *cdef;
+ struct aw200xx *chip;
+ int count;
+ int ret;
+
+ cdef = device_get_match_data(&client->dev);
+ if (!cdef)
+ return -ENODEV;
+
+ count = device_get_child_node_count(&client->dev);
+ if (!count || count > cdef->channels)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Incorrect number of leds (%d)", count);
+
+ chip = devm_kzalloc(&client->dev, struct_size(chip, leds, count),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->cdef = cdef;
+ chip->num_leds = count;
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &aw200xx_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return PTR_ERR(chip->regmap);
+
+ ret = aw200xx_chip_check(chip);
+ if (ret)
+ return ret;
+
+ mutex_init(&chip->mutex);
+
+ /* Need a lock now since after call aw200xx_probe_fw, sysfs nodes created */
+ mutex_lock(&chip->mutex);
+
+ ret = aw200xx_chip_reset(chip);
+ if (ret)
+ goto out_unlock;
+
+ ret = aw200xx_probe_fw(&client->dev, chip);
+ if (ret)
+ goto out_unlock;
+
+ ret = aw200xx_chip_init(chip);
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static void aw200xx_remove(struct i2c_client *client)
+{
+ struct aw200xx *chip = i2c_get_clientdata(client);
+
+ aw200xx_chip_reset(chip);
+ mutex_destroy(&chip->mutex);
+}
+
+static const struct aw200xx_chipdef aw20036_cdef = {
+ .channels = 36,
+ .display_size_rows_max = 3,
+ .display_size_columns = 12,
+};
+
+static const struct aw200xx_chipdef aw20054_cdef = {
+ .channels = 54,
+ .display_size_rows_max = 6,
+ .display_size_columns = 9,
+};
+
+static const struct aw200xx_chipdef aw20072_cdef = {
+ .channels = 72,
+ .display_size_rows_max = 6,
+ .display_size_columns = 12,
+};
+
+static const struct i2c_device_id aw200xx_id[] = {
+ { "aw20036" },
+ { "aw20054" },
+ { "aw20072" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, aw200xx_id);
+
+static const struct of_device_id aw200xx_match_table[] = {
+ { .compatible = "awinic,aw20036", .data = &aw20036_cdef, },
+ { .compatible = "awinic,aw20054", .data = &aw20054_cdef, },
+ { .compatible = "awinic,aw20072", .data = &aw20072_cdef, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aw200xx_match_table);
+
+static struct i2c_driver aw200xx_driver = {
+ .driver = {
+ .name = "aw200xx",
+ .of_match_table = aw200xx_match_table,
+ },
+ .probe_new = aw200xx_probe,
+ .remove = aw200xx_remove,
+ .id_table = aw200xx_id,
+};
+module_i2c_driver(aw200xx_driver);
+
+MODULE_AUTHOR("Martin Kurbanov <mmkurbanov@sberdevices.ru>");
+MODULE_DESCRIPTION("AW200XX LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 0b52fc9097c6..59765640b70f 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -422,7 +422,7 @@ static struct i2c_driver aw2013_driver = {
.name = "leds-aw2013",
.of_match_table = of_match_ptr(aw2013_match_table),
},
- .probe_new = aw2013_probe,
+ .probe = aw2013_probe,
.remove = aw2013_remove,
};
diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c
index 76f9d4d70f9a..3fda712d2f80 100644
--- a/drivers/leds/leds-bd2606mvv.c
+++ b/drivers/leds/leds-bd2606mvv.c
@@ -150,7 +150,7 @@ static struct i2c_driver bd2606mvv_driver = {
.name = "leds-bd2606mvv",
.of_match_table = of_match_ptr(of_bd2606mvv_leds_match),
},
- .probe_new = bd2606mvv_probe,
+ .probe = bd2606mvv_probe,
};
module_i2c_driver(bd2606mvv_driver);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 601185ddabcc..0792ea126cea 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -786,7 +786,7 @@ static struct i2c_driver bd2802_i2c_driver = {
.name = "BD2802",
.pm = &bd2802_pm,
},
- .probe_new = bd2802_probe,
+ .probe = bd2802_probe,
.remove = bd2802_remove,
.id_table = bd2802_id,
};
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index 37f2f32ae42d..2782da1a1930 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -561,7 +561,7 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, "blinkm", I2C_NAME_SIZE);
+ strscpy(info->type, "blinkm", I2C_NAME_SIZE);
return 0;
}
@@ -730,7 +730,7 @@ static struct i2c_driver blinkm_driver = {
.driver = {
.name = "blinkm",
},
- .probe_new = blinkm_probe,
+ .probe = blinkm_probe,
.remove = blinkm_remove,
.id_table = blinkm_id,
.detect = blinkm_detect,
diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c
new file mode 100644
index 000000000000..b4998402b8c6
--- /dev/null
+++ b/drivers/leds/leds-cht-wcove.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for LEDs connected to the Intel Cherry Trail Whiskey Cove PMIC
+ *
+ * Copyright 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ * Copyright 2023 Hans de Goede <hansg@kernel.org>
+ *
+ * Register info comes from the Lenovo Yoga Book Android opensource code
+ * available from Lenovo. File lenovo_yb1_x90f_l_osc_201803.7z path in the 7z:
+ * YB1_source_code/kernel/cht/drivers/misc/charger_gp_led.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/suspend.h>
+
+#define CHT_WC_LED1_CTRL 0x5e1f
+#define CHT_WC_LED1_FSM 0x5e20
+#define CHT_WC_LED1_PWM 0x5e21
+
+#define CHT_WC_LED2_CTRL 0x4fdf
+#define CHT_WC_LED2_FSM 0x4fe0
+#define CHT_WC_LED2_PWM 0x4fe1
+
+#define CHT_WC_LED1_SWCTL BIT(0) /* HW or SW control of charging led */
+#define CHT_WC_LED1_ON BIT(1)
+
+#define CHT_WC_LED2_ON BIT(0)
+#define CHT_WC_LED_I_MA2_5 (2 << 2) /* LED current limit */
+#define CHT_WC_LED_I_MASK GENMASK(3, 2) /* LED current limit mask */
+
+#define CHT_WC_LED_F_1_4_HZ (0 << 4)
+#define CHT_WC_LED_F_1_2_HZ (1 << 4)
+#define CHT_WC_LED_F_1_HZ (2 << 4)
+#define CHT_WC_LED_F_2_HZ (3 << 4)
+#define CHT_WC_LED_F_MASK GENMASK(5, 4)
+
+#define CHT_WC_LED_EFF_OFF (0 << 1)
+#define CHT_WC_LED_EFF_ON (1 << 1)
+#define CHT_WC_LED_EFF_BLINKING (2 << 1)
+#define CHT_WC_LED_EFF_BREATHING (3 << 1)
+#define CHT_WC_LED_EFF_MASK GENMASK(2, 1)
+
+#define CHT_WC_LED_COUNT 2
+
+struct cht_wc_led_regs {
+ /* Register addresses */
+ u16 ctrl;
+ u16 fsm;
+ u16 pwm;
+ /* Mask + values for turning the LED on/off */
+ u8 on_off_mask;
+ u8 on_val;
+ u8 off_val;
+};
+
+struct cht_wc_led_saved_regs {
+ unsigned int ctrl;
+ unsigned int fsm;
+ unsigned int pwm;
+};
+
+struct cht_wc_led {
+ struct led_classdev cdev;
+ const struct cht_wc_led_regs *regs;
+ struct regmap *regmap;
+ struct mutex mutex;
+ struct cht_wc_led_saved_regs saved_regs;
+};
+
+struct cht_wc_leds {
+ struct cht_wc_led leds[CHT_WC_LED_COUNT];
+ /* Saved LED1 initial register values */
+ struct cht_wc_led_saved_regs led1_initial_regs;
+};
+
+static const struct cht_wc_led_regs cht_wc_led_regs[CHT_WC_LED_COUNT] = {
+ {
+ .ctrl = CHT_WC_LED1_CTRL,
+ .fsm = CHT_WC_LED1_FSM,
+ .pwm = CHT_WC_LED1_PWM,
+ .on_off_mask = CHT_WC_LED1_SWCTL | CHT_WC_LED1_ON,
+ .on_val = CHT_WC_LED1_SWCTL | CHT_WC_LED1_ON,
+ .off_val = CHT_WC_LED1_SWCTL,
+ },
+ {
+ .ctrl = CHT_WC_LED2_CTRL,
+ .fsm = CHT_WC_LED2_FSM,
+ .pwm = CHT_WC_LED2_PWM,
+ .on_off_mask = CHT_WC_LED2_ON,
+ .on_val = CHT_WC_LED2_ON,
+ .off_val = 0,
+ },
+};
+
+static const char * const cht_wc_leds_names[CHT_WC_LED_COUNT] = {
+ "platform::" LED_FUNCTION_CHARGING,
+ "platform::" LED_FUNCTION_INDICATOR,
+};
+
+static int cht_wc_leds_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct cht_wc_led *led = container_of(cdev, struct cht_wc_led, cdev);
+ int ret;
+
+ mutex_lock(&led->mutex);
+
+ if (!value) {
+ ret = regmap_update_bits(led->regmap, led->regs->ctrl,
+ led->regs->on_off_mask, led->regs->off_val);
+ if (ret < 0) {
+ dev_err(cdev->dev, "Failed to turn off: %d\n", ret);
+ goto out;
+ }
+
+ /* Disable HW blinking */
+ ret = regmap_update_bits(led->regmap, led->regs->fsm,
+ CHT_WC_LED_EFF_MASK, CHT_WC_LED_EFF_ON);
+ if (ret < 0)
+ dev_err(cdev->dev, "Failed to update LED FSM reg: %d\n", ret);
+ } else {
+ ret = regmap_write(led->regmap, led->regs->pwm, value);
+ if (ret < 0) {
+ dev_err(cdev->dev, "Failed to set brightness: %d\n", ret);
+ goto out;
+ }
+
+ ret = regmap_update_bits(led->regmap, led->regs->ctrl,
+ led->regs->on_off_mask, led->regs->on_val);
+ if (ret < 0)
+ dev_err(cdev->dev, "Failed to turn on: %d\n", ret);
+ }
+out:
+ mutex_unlock(&led->mutex);
+ return ret;
+}
+
+static enum led_brightness cht_wc_leds_brightness_get(struct led_classdev *cdev)
+{
+ struct cht_wc_led *led = container_of(cdev, struct cht_wc_led, cdev);
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&led->mutex);
+
+ ret = regmap_read(led->regmap, led->regs->ctrl, &val);
+ if (ret < 0) {
+ dev_err(cdev->dev, "Failed to read LED CTRL reg: %d\n", ret);
+ ret = 0;
+ goto done;
+ }
+
+ val &= led->regs->on_off_mask;
+ if (val != led->regs->on_val) {
+ ret = 0;
+ goto done;
+ }
+
+ ret = regmap_read(led->regmap, led->regs->pwm, &val);
+ if (ret < 0) {
+ dev_err(cdev->dev, "Failed to read LED PWM reg: %d\n", ret);
+ ret = 0;
+ goto done;
+ }
+
+ ret = val;
+done:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+/* Return blinking period for given CTRL reg value */
+static unsigned long cht_wc_leds_get_period(int ctrl)
+{
+ ctrl &= CHT_WC_LED_F_MASK;
+
+ switch (ctrl) {
+ case CHT_WC_LED_F_1_4_HZ:
+ return 1000 * 4;
+ case CHT_WC_LED_F_1_2_HZ:
+ return 1000 * 2;
+ case CHT_WC_LED_F_1_HZ:
+ return 1000;
+ case CHT_WC_LED_F_2_HZ:
+ return 1000 / 2;
+ }
+
+ return 0;
+}
+
+/*
+ * Find suitable hardware blink mode for given period.
+ * period < 750 ms - select 2 HZ
+ * 750 ms <= period < 1500 ms - select 1 HZ
+ * 1500 ms <= period < 3000 ms - select 1/2 HZ
+ * 3000 ms <= period < 5000 ms - select 1/4 HZ
+ * 5000 ms <= period - return -1
+ */
+static int cht_wc_leds_find_freq(unsigned long period)
+{
+ if (period < 750)
+ return CHT_WC_LED_F_2_HZ;
+ else if (period < 1500)
+ return CHT_WC_LED_F_1_HZ;
+ else if (period < 3000)
+ return CHT_WC_LED_F_1_2_HZ;
+ else if (period < 5000)
+ return CHT_WC_LED_F_1_4_HZ;
+ else
+ return -1;
+}
+
+static int cht_wc_leds_set_effect(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ u8 effect)
+{
+ struct cht_wc_led *led = container_of(cdev, struct cht_wc_led, cdev);
+ int ctrl, ret;
+
+ mutex_lock(&led->mutex);
+
+ /* Blink with 1 Hz as default if nothing specified */
+ if (!*delay_on && !*delay_off)
+ *delay_on = *delay_off = 500;
+
+ ctrl = cht_wc_leds_find_freq(*delay_on + *delay_off);
+ if (ctrl < 0) {
+ /* Disable HW blinking */
+ ret = regmap_update_bits(led->regmap, led->regs->fsm,
+ CHT_WC_LED_EFF_MASK, CHT_WC_LED_EFF_ON);
+ if (ret < 0)
+ dev_err(cdev->dev, "Failed to update LED FSM reg: %d\n", ret);
+
+ /* Fallback to software timer */
+ *delay_on = *delay_off = 0;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = regmap_update_bits(led->regmap, led->regs->fsm,
+ CHT_WC_LED_EFF_MASK, effect);
+ if (ret < 0)
+ dev_err(cdev->dev, "Failed to update LED FSM reg: %d\n", ret);
+
+ /* Set the frequency and make sure the LED is on */
+ ret = regmap_update_bits(led->regmap, led->regs->ctrl,
+ CHT_WC_LED_F_MASK | led->regs->on_off_mask,
+ ctrl | led->regs->on_val);
+ if (ret < 0)
+ dev_err(cdev->dev, "Failed to update LED CTRL reg: %d\n", ret);
+
+ *delay_off = *delay_on = cht_wc_leds_get_period(ctrl) / 2;
+
+done:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static int cht_wc_leds_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 effect = CHT_WC_LED_EFF_BLINKING;
+
+ /*
+ * The desired default behavior of LED1 / the charge LED is breathing
+ * while charging and on/solid when full. Since triggers cannot select
+ * breathing, blink_set() gets called when charging. Use slow breathing
+ * when the default "charging-blink-full-solid" trigger is used to
+ * achieve the desired default behavior.
+ */
+ if (cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
+ *delay_on = *delay_off = 1000;
+ effect = CHT_WC_LED_EFF_BREATHING;
+ }
+
+ return cht_wc_leds_set_effect(cdev, delay_on, delay_off, effect);
+}
+
+static int cht_wc_leds_pattern_set(struct led_classdev *cdev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ unsigned long delay_off, delay_on;
+
+ if (repeat > 0 || len != 2 ||
+ pattern[0].brightness != 0 || pattern[1].brightness != 1 ||
+ pattern[0].delta_t != pattern[1].delta_t ||
+ (pattern[0].delta_t != 250 && pattern[0].delta_t != 500 &&
+ pattern[0].delta_t != 1000 && pattern[0].delta_t != 2000))
+ return -EINVAL;
+
+ delay_off = pattern[0].delta_t;
+ delay_on = pattern[1].delta_t;
+
+ return cht_wc_leds_set_effect(cdev, &delay_on, &delay_off, CHT_WC_LED_EFF_BREATHING);
+}
+
+static int cht_wc_leds_pattern_clear(struct led_classdev *cdev)
+{
+ return cht_wc_leds_brightness_set(cdev, 0);
+}
+
+static int cht_wc_led_save_regs(struct cht_wc_led *led,
+ struct cht_wc_led_saved_regs *saved_regs)
+{
+ int ret;
+
+ ret = regmap_read(led->regmap, led->regs->ctrl, &saved_regs->ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(led->regmap, led->regs->fsm, &saved_regs->fsm);
+ if (ret < 0)
+ return ret;
+
+ return regmap_read(led->regmap, led->regs->pwm, &saved_regs->pwm);
+}
+
+static void cht_wc_led_restore_regs(struct cht_wc_led *led,
+ const struct cht_wc_led_saved_regs *saved_regs)
+{
+ regmap_write(led->regmap, led->regs->ctrl, saved_regs->ctrl);
+ regmap_write(led->regmap, led->regs->fsm, saved_regs->fsm);
+ regmap_write(led->regmap, led->regs->pwm, saved_regs->pwm);
+}
+
+static int cht_wc_leds_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct cht_wc_leds *leds;
+ int ret;
+ int i;
+
+ /*
+ * On the Lenovo Yoga Tab 3 the LED1 driver output is actually
+ * connected to a haptic feedback motor rather then a LED.
+ * So do not register a LED classdev there (LED2 is unused).
+ */
+ if (pmic->cht_wc_model == INTEL_CHT_WC_LENOVO_YT3_X90)
+ return -ENODEV;
+
+ leds = devm_kzalloc(&pdev->dev, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ /*
+ * LED1 might be in hw-controlled mode when this driver gets loaded; and
+ * since the PMIC is always powered by the battery any changes made are
+ * permanent. Save LED1 regs to restore them on remove() or shutdown().
+ */
+ leds->leds[0].regs = &cht_wc_led_regs[0];
+ leds->leds[0].regmap = pmic->regmap;
+ ret = cht_wc_led_save_regs(&leds->leds[0], &leds->led1_initial_regs);
+ if (ret < 0)
+ return ret;
+
+ /* Set LED1 default trigger based on machine model */
+ switch (pmic->cht_wc_model) {
+ case INTEL_CHT_WC_GPD_WIN_POCKET:
+ leds->leds[0].cdev.default_trigger = "max170xx_battery-charging-blink-full-solid";
+ break;
+ case INTEL_CHT_WC_XIAOMI_MIPAD2:
+ leds->leds[0].cdev.default_trigger = "bq27520-0-charging-blink-full-solid";
+ break;
+ case INTEL_CHT_WC_LENOVO_YOGABOOK1:
+ leds->leds[0].cdev.default_trigger = "bq27542-0-charging-blink-full-solid";
+ break;
+ default:
+ dev_warn(&pdev->dev, "Unknown model, no default charging trigger\n");
+ break;
+ }
+
+ for (i = 0; i < CHT_WC_LED_COUNT; i++) {
+ struct cht_wc_led *led = &leds->leds[i];
+
+ led->regs = &cht_wc_led_regs[i];
+ led->regmap = pmic->regmap;
+ mutex_init(&led->mutex);
+ led->cdev.name = cht_wc_leds_names[i];
+ led->cdev.brightness_set_blocking = cht_wc_leds_brightness_set;
+ led->cdev.brightness_get = cht_wc_leds_brightness_get;
+ led->cdev.blink_set = cht_wc_leds_blink_set;
+ led->cdev.pattern_set = cht_wc_leds_pattern_set;
+ led->cdev.pattern_clear = cht_wc_leds_pattern_clear;
+ led->cdev.max_brightness = 255;
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, leds);
+ return 0;
+}
+
+static void cht_wc_leds_remove(struct platform_device *pdev)
+{
+ struct cht_wc_leds *leds = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < CHT_WC_LED_COUNT; i++)
+ led_classdev_unregister(&leds->leds[i].cdev);
+
+ /* Restore LED1 regs if hw-control was active else leave LED1 off */
+ if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
+ cht_wc_led_restore_regs(&leds->leds[0], &leds->led1_initial_regs);
+}
+
+static void cht_wc_leds_disable(struct platform_device *pdev)
+{
+ struct cht_wc_leds *leds = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < CHT_WC_LED_COUNT; i++)
+ cht_wc_leds_brightness_set(&leds->leds[i].cdev, 0);
+
+ /* Restore LED1 regs if hw-control was active else leave LED1 off */
+ if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
+ cht_wc_led_restore_regs(&leds->leds[0], &leds->led1_initial_regs);
+}
+
+/* On suspend save current settings and turn LEDs off */
+static int cht_wc_leds_suspend(struct device *dev)
+{
+ struct cht_wc_leds *leds = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < CHT_WC_LED_COUNT; i++) {
+ ret = cht_wc_led_save_regs(&leds->leds[i], &leds->leds[i].saved_regs);
+ if (ret < 0)
+ return ret;
+ }
+
+ cht_wc_leds_disable(to_platform_device(dev));
+ return 0;
+}
+
+/* On resume restore the saved settings */
+static int cht_wc_leds_resume(struct device *dev)
+{
+ struct cht_wc_leds *leds = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < CHT_WC_LED_COUNT; i++)
+ cht_wc_led_restore_regs(&leds->leds[i], &leds->leds[i].saved_regs);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_leds_pm, cht_wc_leds_suspend, cht_wc_leds_resume);
+
+static struct platform_driver cht_wc_leds_driver = {
+ .probe = cht_wc_leds_probe,
+ .remove_new = cht_wc_leds_remove,
+ .shutdown = cht_wc_leds_disable,
+ .driver = {
+ .name = "cht_wcove_leds",
+ .pm = pm_sleep_ptr(&cht_wc_leds_pm),
+ },
+};
+module_platform_driver(cht_wc_leds_driver);
+
+MODULE_ALIAS("platform:cht_wcove_leds");
+MODULE_DESCRIPTION("Intel Cherry Trail Whiskey Cove PMIC LEDs driver");
+MODULE_AUTHOR("Yauhen Kharuzhy <jekhor@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ce4e79939731..7bfe40a6bfdd 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -77,6 +78,7 @@ static int create_gpio_led(const struct gpio_led *template,
struct fwnode_handle *fwnode, gpio_blink_set_t blink_set)
{
struct led_init_data init_data = {};
+ struct pinctrl *pinctrl;
int ret, state;
led_dat->cdev.default_trigger = template->default_trigger;
@@ -119,6 +121,22 @@ static int create_gpio_led(const struct gpio_led *template,
&init_data);
}
+ if (ret)
+ return ret;
+
+ pinctrl = devm_pinctrl_get_select_default(led_dat->cdev.dev);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+ if (ret != -ENODEV) {
+ dev_warn(led_dat->cdev.dev,
+ "Failed to select %pOF pinctrl: %d\n",
+ to_of_node(fwnode), ret);
+ } else {
+ /* pinctrl-%d not present, not an error */
+ ret = 0;
+ }
+ }
+
return ret;
}
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 7c908414ac7e..66c65741202e 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -602,7 +602,7 @@ static struct i2c_driver is31fl319x_driver = {
.name = "leds-is31fl319x",
.of_match_table = of_is31fl319x_match,
},
- .probe_new = is31fl319x_probe,
+ .probe = is31fl319x_probe,
.id_table = is31fl319x_id,
};
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 799191859ce0..72cb56d305c4 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -488,7 +488,7 @@ static struct i2c_driver is31fl32xx_driver = {
.name = "is31fl32xx",
.of_match_table = of_is31fl32xx_match,
},
- .probe_new = is31fl32xx_probe,
+ .probe = is31fl32xx_probe,
.remove = is31fl32xx_remove,
.id_table = is31fl32xx_id,
};
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index a9a2018592ff..a2feef8e4ac5 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -484,7 +484,7 @@ static const struct i2c_device_id lm3530_id[] = {
MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
- .probe_new = lm3530_probe,
+ .probe = lm3530_probe,
.remove = lm3530_remove,
.id_table = lm3530_id,
.driver = {
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index a08c09129a68..13662a4aa1f2 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -726,7 +726,7 @@ static const struct i2c_device_id lm3532_id[] = {
MODULE_DEVICE_TABLE(i2c, lm3532_id);
static struct i2c_driver lm3532_i2c_driver = {
- .probe_new = lm3532_probe,
+ .probe = lm3532_probe,
.remove = lm3532_remove,
.id_table = lm3532_id,
.driver = {
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 612873070ca4..f68771b9eac6 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -516,7 +516,7 @@ static struct i2c_driver lm355x_i2c_driver = {
.name = LM355x_NAME,
.pm = NULL,
},
- .probe_new = lm355x_probe,
+ .probe = lm355x_probe,
.remove = lm355x_remove,
.id_table = lm355x_id,
};
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index b75ee3546c2e..6eee52e211be 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -401,7 +401,7 @@ static struct i2c_driver lm3642_i2c_driver = {
.name = LM3642_NAME,
.pm = NULL,
},
- .probe_new = lm3642_probe,
+ .probe = lm3642_probe,
.remove = lm3642_remove,
.id_table = lm3642_id,
};
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 66126d0666f5..f8ad61e47a19 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -518,7 +518,7 @@ static struct i2c_driver lm3692x_driver = {
.name = "lm3692x",
.of_match_table = of_lm3692x_leds_match,
},
- .probe_new = lm3692x_probe,
+ .probe = lm3692x_probe,
.remove = lm3692x_remove,
.id_table = lm3692x_id,
};
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 10e904bf40a0..cfb8ac220db6 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -376,7 +376,7 @@ static struct i2c_driver lm3697_driver = {
.name = "lm3697",
.of_match_table = of_lm3697_leds_match,
},
- .probe_new = lm3697_probe,
+ .probe = lm3697_probe,
.remove = lm3697_remove,
.id_table = lm3697_id,
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index be47c66b2e00..8ea746c499d1 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -427,7 +427,7 @@ static struct i2c_driver lp3944_driver = {
.driver = {
.name = "lp3944",
},
- .probe_new = lp3944_probe,
+ .probe = lp3944_probe,
.remove = lp3944_remove,
.id_table = lp3944_id,
};
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 24b2e0f9080d..3bd55652a706 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -273,7 +273,7 @@ static struct i2c_driver lp3952_i2c_driver = {
.driver = {
.name = LP3952_NAME,
},
- .probe_new = lp3952_probe,
+ .probe = lp3952_probe,
.remove = lp3952_remove,
.id_table = lp3952_id,
};
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 28d6b39fa72d..68c4d9967d68 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -608,7 +608,7 @@ static struct i2c_driver lp50xx_driver = {
.name = "lp50xx",
.of_match_table = of_lp50xx_leds_match,
},
- .probe_new = lp50xx_probe,
+ .probe = lp50xx_probe,
.remove = lp50xx_remove,
.id_table = lp50xx_id,
};
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index a004af8e22c7..030c040fdf6d 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -58,14 +58,11 @@
/* CONFIG register */
#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
-#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
-#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
-#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
-#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
+#define LP5521_CP_MODE_MASK 0x18 /* Charge pump mode */
+#define LP5521_CP_MODE_SHIFT 3
#define LP5521_R_TO_BATT 0x04 /* R out: 0 = CP, 1 = Vbat */
#define LP5521_CLK_INT 0x01 /* Internal clock */
-#define LP5521_DEFAULT_CFG \
- (LP5521_PWM_HF | LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO)
+#define LP5521_DEFAULT_CFG (LP5521_PWM_HF | LP5521_PWRSAVE_EN)
/* Status */
#define LP5521_EXT_CLK_USED 0x08
@@ -310,6 +307,8 @@ static int lp5521_post_init_device(struct lp55xx_chip *chip)
if (!lp55xx_is_extclk_used(chip))
val |= LP5521_CLK_INT;
+ val |= (chip->pdata->charge_pump_mode << LP5521_CP_MODE_SHIFT) & LP5521_CP_MODE_MASK;
+
ret = lp55xx_write(chip, LP5521_REG_CONFIG, val);
if (ret)
return ret;
@@ -608,7 +607,7 @@ static struct i2c_driver lp5521_driver = {
.name = "lp5521",
.of_match_table = of_match_ptr(of_lp5521_leds_match),
},
- .probe_new = lp5521_probe,
+ .probe = lp5521_probe,
.remove = lp5521_remove,
.id_table = lp5521_id,
};
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 55da914b8e5c..daa6a165fba6 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -57,8 +57,11 @@
#define LP5523_AUTO_INC 0x40
#define LP5523_PWR_SAVE 0x20
#define LP5523_PWM_PWR_SAVE 0x04
-#define LP5523_CP_AUTO 0x18
+#define LP5523_CP_MODE_MASK 0x18
+#define LP5523_CP_MODE_SHIFT 3
#define LP5523_AUTO_CLK 0x02
+#define LP5523_DEFAULT_CONFIG \
+ (LP5523_AUTO_INC | LP5523_PWR_SAVE | LP5523_AUTO_CLK | LP5523_PWM_PWR_SAVE)
#define LP5523_EN_LEDTEST 0x80
#define LP5523_LEDTEST_DONE 0x80
@@ -125,6 +128,7 @@ static void lp5523_set_led_current(struct lp55xx_led *led, u8 led_current)
static int lp5523_post_init_device(struct lp55xx_chip *chip)
{
int ret;
+ int val;
ret = lp55xx_write(chip, LP5523_REG_ENABLE, LP5523_ENABLE);
if (ret)
@@ -133,10 +137,10 @@ static int lp5523_post_init_device(struct lp55xx_chip *chip)
/* Chip startup time is 500 us, 1 - 2 ms gives some margin */
usleep_range(1000, 2000);
- ret = lp55xx_write(chip, LP5523_REG_CONFIG,
- LP5523_AUTO_INC | LP5523_PWR_SAVE |
- LP5523_CP_AUTO | LP5523_AUTO_CLK |
- LP5523_PWM_PWR_SAVE);
+ val = LP5523_DEFAULT_CONFIG;
+ val |= (chip->pdata->charge_pump_mode << LP5523_CP_MODE_SHIFT) & LP5523_CP_MODE_MASK;
+
+ ret = lp55xx_write(chip, LP5523_REG_CONFIG, val);
if (ret)
return ret;
@@ -983,7 +987,7 @@ static struct i2c_driver lp5523_driver = {
.name = "lp5523x",
.of_match_table = of_match_ptr(of_lp5523_leds_match),
},
- .probe_new = lp5523_probe,
+ .probe = lp5523_probe,
.remove = lp5523_remove,
.id_table = lp5523_id,
};
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index b5d877faf6d7..4565cc12cea8 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -603,7 +603,7 @@ static struct i2c_driver lp5562_driver = {
.name = "lp5562",
.of_match_table = of_match_ptr(of_lp5562_leds_match),
},
- .probe_new = lp5562_probe,
+ .probe = lp5562_probe,
.remove = lp5562_remove,
.id_table = lp5562_id,
};
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index c1940964067a..77bb26906ea6 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -18,6 +18,7 @@
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
+#include <dt-bindings/leds/leds-lp55xx.h>
#include "leds-lp55xx-common.h"
@@ -691,6 +692,14 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
i++;
}
+ if (of_property_read_u32(np, "ti,charge-pump-mode", &pdata->charge_pump_mode))
+ pdata->charge_pump_mode = LP55XX_CP_AUTO;
+
+ if (pdata->charge_pump_mode > LP55XX_CP_AUTO) {
+ dev_err(dev, "invalid charge pump mode %d\n", pdata->charge_pump_mode);
+ return ERR_PTR(-EINVAL);
+ }
+
of_property_read_string(np, "label", &pdata->label);
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 165d6423a928..f11886aa8965 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -53,10 +53,10 @@
#define LP8501_PWM_PSAVE BIT(7)
#define LP8501_AUTO_INC BIT(6)
#define LP8501_PWR_SAVE BIT(5)
-#define LP8501_CP_AUTO 0x18
+#define LP8501_CP_MODE_MASK 0x18
+#define LP8501_CP_MODE_SHIFT 3
#define LP8501_INT_CLK BIT(0)
-#define LP8501_DEFAULT_CFG \
- (LP8501_PWM_PSAVE | LP8501_AUTO_INC | LP8501_PWR_SAVE | LP8501_CP_AUTO)
+#define LP8501_DEFAULT_CFG (LP8501_PWM_PSAVE | LP8501_AUTO_INC | LP8501_PWR_SAVE)
#define LP8501_REG_RESET 0x3D
#define LP8501_RESET 0xFF
@@ -102,6 +102,8 @@ static int lp8501_post_init_device(struct lp55xx_chip *chip)
if (chip->pdata->clock_mode != LP55XX_CLOCK_EXT)
val |= LP8501_INT_CLK;
+ val |= (chip->pdata->charge_pump_mode << LP8501_CP_MODE_SHIFT) & LP8501_CP_MODE_MASK;
+
ret = lp55xx_write(chip, LP8501_REG_CONFIG, val);
if (ret)
return ret;
@@ -392,7 +394,7 @@ static struct i2c_driver lp8501_driver = {
.name = "lp8501",
.of_match_table = of_match_ptr(of_lp8501_leds_match),
},
- .probe_new = lp8501_probe,
+ .probe = lp8501_probe,
.remove = lp8501_remove,
.id_table = lp8501_id,
};
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 221b386443bc..19b621012e58 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -475,7 +475,7 @@ static struct i2c_driver lp8860_driver = {
.name = "lp8860",
.of_match_table = of_lp8860_leds_match,
},
- .probe_new = lp8860_probe,
+ .probe = lp8860_probe,
.remove = lp8860_remove,
.id_table = lp8860_id,
};
diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c
index 17ee88043f52..24f35bdb55fb 100644
--- a/drivers/leds/leds-mt6323.c
+++ b/drivers/leds/leds-mt6323.c
@@ -14,61 +14,54 @@
#include <linux/regmap.h>
/*
- * Register field for MT6323_TOP_CKPDN0 to enable
+ * Register field for TOP_CKPDN0 to enable
* 32K clock common for LED device.
*/
-#define MT6323_RG_DRV_32K_CK_PDN BIT(11)
-#define MT6323_RG_DRV_32K_CK_PDN_MASK BIT(11)
+#define RG_DRV_32K_CK_PDN BIT(11)
+#define RG_DRV_32K_CK_PDN_MASK BIT(11)
+
+/* 32K/1M/6M clock common for WLED device */
+#define RG_VWLED_1M_CK_PDN BIT(0)
+#define RG_VWLED_32K_CK_PDN BIT(12)
+#define RG_VWLED_6M_CK_PDN BIT(13)
/*
- * Register field for MT6323_TOP_CKPDN2 to enable
+ * Register field for TOP_CKPDN2 to enable
* individual clock for LED device.
*/
-#define MT6323_RG_ISINK_CK_PDN(i) BIT(i)
-#define MT6323_RG_ISINK_CK_PDN_MASK(i) BIT(i)
+#define RG_ISINK_CK_PDN(i) BIT(i)
+#define RG_ISINK_CK_PDN_MASK(i) BIT(i)
/*
- * Register field for MT6323_TOP_CKCON1 to select
+ * Register field for TOP_CKCON1 to select
* clock source.
*/
-#define MT6323_RG_ISINK_CK_SEL_MASK(i) (BIT(10) << (i))
+#define RG_ISINK_CK_SEL_MASK(i) (BIT(10) << (i))
-/*
- * Register for MT6323_ISINK_CON0 to setup the
- * duty cycle of the blink.
- */
-#define MT6323_ISINK_CON0(i) (MT6323_ISINK0_CON0 + 0x8 * (i))
-#define MT6323_ISINK_DIM_DUTY_MASK (0x1f << 8)
-#define MT6323_ISINK_DIM_DUTY(i) (((i) << 8) & \
- MT6323_ISINK_DIM_DUTY_MASK)
-
-/* Register to setup the period of the blink. */
-#define MT6323_ISINK_CON1(i) (MT6323_ISINK0_CON1 + 0x8 * (i))
-#define MT6323_ISINK_DIM_FSEL_MASK (0xffff)
-#define MT6323_ISINK_DIM_FSEL(i) ((i) & MT6323_ISINK_DIM_FSEL_MASK)
-
-/* Register to control the brightness. */
-#define MT6323_ISINK_CON2(i) (MT6323_ISINK0_CON2 + 0x8 * (i))
-#define MT6323_ISINK_CH_STEP_SHIFT 12
-#define MT6323_ISINK_CH_STEP_MASK (0x7 << 12)
-#define MT6323_ISINK_CH_STEP(i) (((i) << 12) & \
- MT6323_ISINK_CH_STEP_MASK)
-#define MT6323_ISINK_SFSTR0_TC_MASK (0x3 << 1)
-#define MT6323_ISINK_SFSTR0_TC(i) (((i) << 1) & \
- MT6323_ISINK_SFSTR0_TC_MASK)
-#define MT6323_ISINK_SFSTR0_EN_MASK BIT(0)
-#define MT6323_ISINK_SFSTR0_EN BIT(0)
+#define ISINK_CON(r, i) (r + 0x8 * (i))
+
+/* ISINK_CON0: Register to setup the duty cycle of the blink. */
+#define ISINK_DIM_DUTY_MASK (0x1f << 8)
+#define ISINK_DIM_DUTY(i) (((i) << 8) & ISINK_DIM_DUTY_MASK)
+
+/* ISINK_CON1: Register to setup the period of the blink. */
+#define ISINK_DIM_FSEL_MASK (0xffff)
+#define ISINK_DIM_FSEL(i) ((i) & ISINK_DIM_FSEL_MASK)
+
+/* ISINK_CON2: Register to control the brightness. */
+#define ISINK_CH_STEP_SHIFT 12
+#define ISINK_CH_STEP_MASK (0x7 << 12)
+#define ISINK_CH_STEP(i) (((i) << 12) & ISINK_CH_STEP_MASK)
+#define ISINK_SFSTR0_TC_MASK (0x3 << 1)
+#define ISINK_SFSTR0_TC(i) (((i) << 1) & ISINK_SFSTR0_TC_MASK)
+#define ISINK_SFSTR0_EN_MASK BIT(0)
+#define ISINK_SFSTR0_EN BIT(0)
/* Register to LED channel enablement. */
-#define MT6323_ISINK_CH_EN_MASK(i) BIT(i)
-#define MT6323_ISINK_CH_EN(i) BIT(i)
+#define ISINK_CH_EN_MASK(i) BIT(i)
+#define ISINK_CH_EN(i) BIT(i)
-#define MT6323_MAX_PERIOD 10000
-#define MT6323_MAX_LEDS 4
-#define MT6323_MAX_BRIGHTNESS 6
-#define MT6323_UNIT_DUTY 3125
-#define MT6323_CAL_HW_DUTY(o, p) DIV_ROUND_CLOSEST((o) * 100000ul,\
- (p) * MT6323_UNIT_DUTY)
+#define MAX_SUPPORTED_LEDS 8
struct mt6323_leds;
@@ -87,11 +80,62 @@ struct mt6323_led {
};
/**
+ * struct mt6323_regs - register spec for the LED device
+ * @top_ckpdn: Offset to ISINK_CKPDN[0..x] registers
+ * @num_top_ckpdn: Number of ISINK_CKPDN registers
+ * @top_ckcon: Offset to ISINK_CKCON[0..x] registers
+ * @num_top_ckcon: Number of ISINK_CKCON registers
+ * @isink_con: Offset to ISINKx_CON[0..x] registers
+ * @num_isink_con: Number of ISINKx_CON registers
+ * @isink_max_regs: Number of ISINK[0..x] registers
+ * @isink_en_ctrl: Offset to ISINK_EN_CTRL register
+ * @iwled_en_ctrl: Offset to IWLED_EN_CTRL register
+ */
+struct mt6323_regs {
+ const u16 *top_ckpdn;
+ u8 num_top_ckpdn;
+ const u16 *top_ckcon;
+ u8 num_top_ckcon;
+ const u16 *isink_con;
+ u8 num_isink_con;
+ u8 isink_max_regs;
+ u16 isink_en_ctrl;
+ u16 iwled_en_ctrl;
+};
+
+/**
+ * struct mt6323_hwspec - hardware specific parameters
+ * @max_period: Maximum period for all LEDs
+ * @max_leds: Maximum number of supported LEDs
+ * @max_wleds: Maximum number of WLEDs
+ * @max_brightness: Maximum brightness for all LEDs
+ * @unit_duty: Steps of duty per period
+ */
+struct mt6323_hwspec {
+ u16 max_period;
+ u8 max_leds;
+ u8 max_wleds;
+ u16 max_brightness;
+ u16 unit_duty;
+};
+
+/**
+ * struct mt6323_data - device specific data
+ * @regs: Register spec for this device
+ * @spec: Hardware specific parameters
+ */
+struct mt6323_data {
+ const struct mt6323_regs *regs;
+ const struct mt6323_hwspec *spec;
+};
+
+/**
* struct mt6323_leds - state container for holding LED controller
* of the driver
* @dev: the device pointer
* @hw: the underlying hardware providing shared
* bus for the register operations
+ * @pdata: device specific data
* @lock: the lock among process context
* @led: the array that contains the state of individual
* LED device
@@ -99,9 +143,10 @@ struct mt6323_led {
struct mt6323_leds {
struct device *dev;
struct mt6397_chip *hw;
+ const struct mt6323_data *pdata;
/* protect among process context */
struct mutex lock;
- struct mt6323_led *led[MT6323_MAX_LEDS];
+ struct mt6323_led *led[MAX_SUPPORTED_LEDS];
};
static int mt6323_led_hw_brightness(struct led_classdev *cdev,
@@ -109,6 +154,7 @@ static int mt6323_led_hw_brightness(struct led_classdev *cdev,
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
struct regmap *regmap = leds->hw->regmap;
u32 con2_mask = 0, con2_val = 0;
int ret;
@@ -117,14 +163,14 @@ static int mt6323_led_hw_brightness(struct led_classdev *cdev,
* Setup current output for the corresponding
* brightness level.
*/
- con2_mask |= MT6323_ISINK_CH_STEP_MASK |
- MT6323_ISINK_SFSTR0_TC_MASK |
- MT6323_ISINK_SFSTR0_EN_MASK;
- con2_val |= MT6323_ISINK_CH_STEP(brightness - 1) |
- MT6323_ISINK_SFSTR0_TC(2) |
- MT6323_ISINK_SFSTR0_EN;
-
- ret = regmap_update_bits(regmap, MT6323_ISINK_CON2(led->id),
+ con2_mask |= ISINK_CH_STEP_MASK |
+ ISINK_SFSTR0_TC_MASK |
+ ISINK_SFSTR0_EN_MASK;
+ con2_val |= ISINK_CH_STEP(brightness - 1) |
+ ISINK_SFSTR0_TC(2) |
+ ISINK_SFSTR0_EN;
+
+ ret = regmap_update_bits(regmap, ISINK_CON(regs->isink_con[2], led->id),
con2_mask, con2_val);
return ret;
}
@@ -133,20 +179,21 @@ static int mt6323_led_hw_off(struct led_classdev *cdev)
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
struct regmap *regmap = leds->hw->regmap;
unsigned int status;
int ret;
- status = MT6323_ISINK_CH_EN(led->id);
- ret = regmap_update_bits(regmap, MT6323_ISINK_EN_CTRL,
- MT6323_ISINK_CH_EN_MASK(led->id), ~status);
+ status = ISINK_CH_EN(led->id);
+ ret = regmap_update_bits(regmap, regs->isink_en_ctrl,
+ ISINK_CH_EN_MASK(led->id), ~status);
if (ret < 0)
return ret;
usleep_range(100, 300);
- ret = regmap_update_bits(regmap, MT6323_TOP_CKPDN2,
- MT6323_RG_ISINK_CK_PDN_MASK(led->id),
- MT6323_RG_ISINK_CK_PDN(led->id));
+ ret = regmap_update_bits(regmap, regs->top_ckpdn[2],
+ RG_ISINK_CK_PDN_MASK(led->id),
+ RG_ISINK_CK_PDN(led->id));
if (ret < 0)
return ret;
@@ -158,30 +205,31 @@ mt6323_get_led_hw_brightness(struct led_classdev *cdev)
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
struct regmap *regmap = leds->hw->regmap;
unsigned int status;
int ret;
- ret = regmap_read(regmap, MT6323_TOP_CKPDN2, &status);
+ ret = regmap_read(regmap, regs->top_ckpdn[2], &status);
if (ret < 0)
return ret;
- if (status & MT6323_RG_ISINK_CK_PDN_MASK(led->id))
+ if (status & RG_ISINK_CK_PDN_MASK(led->id))
return 0;
- ret = regmap_read(regmap, MT6323_ISINK_EN_CTRL, &status);
+ ret = regmap_read(regmap, regs->isink_en_ctrl, &status);
if (ret < 0)
return ret;
- if (!(status & MT6323_ISINK_CH_EN(led->id)))
+ if (!(status & ISINK_CH_EN(led->id)))
return 0;
- ret = regmap_read(regmap, MT6323_ISINK_CON2(led->id), &status);
+ ret = regmap_read(regmap, ISINK_CON(regs->isink_con[2], led->id), &status);
if (ret < 0)
return ret;
- return ((status & MT6323_ISINK_CH_STEP_MASK)
- >> MT6323_ISINK_CH_STEP_SHIFT) + 1;
+ return ((status & ISINK_CH_STEP_MASK)
+ >> ISINK_CH_STEP_SHIFT) + 1;
}
static int mt6323_led_hw_on(struct led_classdev *cdev,
@@ -189,6 +237,7 @@ static int mt6323_led_hw_on(struct led_classdev *cdev,
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
struct regmap *regmap = leds->hw->regmap;
unsigned int status;
int ret;
@@ -198,23 +247,23 @@ static int mt6323_led_hw_on(struct led_classdev *cdev,
* clock and channel and let work with continuous blink as
* the default.
*/
- ret = regmap_update_bits(regmap, MT6323_TOP_CKCON1,
- MT6323_RG_ISINK_CK_SEL_MASK(led->id), 0);
+ ret = regmap_update_bits(regmap, regs->top_ckcon[1],
+ RG_ISINK_CK_SEL_MASK(led->id), 0);
if (ret < 0)
return ret;
- status = MT6323_RG_ISINK_CK_PDN(led->id);
- ret = regmap_update_bits(regmap, MT6323_TOP_CKPDN2,
- MT6323_RG_ISINK_CK_PDN_MASK(led->id),
+ status = RG_ISINK_CK_PDN(led->id);
+ ret = regmap_update_bits(regmap, regs->top_ckpdn[2],
+ RG_ISINK_CK_PDN_MASK(led->id),
~status);
if (ret < 0)
return ret;
usleep_range(100, 300);
- ret = regmap_update_bits(regmap, MT6323_ISINK_EN_CTRL,
- MT6323_ISINK_CH_EN_MASK(led->id),
- MT6323_ISINK_CH_EN(led->id));
+ ret = regmap_update_bits(regmap, regs->isink_en_ctrl,
+ ISINK_CH_EN_MASK(led->id),
+ ISINK_CH_EN(led->id));
if (ret < 0)
return ret;
@@ -222,15 +271,15 @@ static int mt6323_led_hw_on(struct led_classdev *cdev,
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, MT6323_ISINK_CON0(led->id),
- MT6323_ISINK_DIM_DUTY_MASK,
- MT6323_ISINK_DIM_DUTY(31));
+ ret = regmap_update_bits(regmap, ISINK_CON(regs->isink_con[0], led->id),
+ ISINK_DIM_DUTY_MASK,
+ ISINK_DIM_DUTY(31));
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, MT6323_ISINK_CON1(led->id),
- MT6323_ISINK_DIM_FSEL_MASK,
- MT6323_ISINK_DIM_FSEL(1000));
+ ret = regmap_update_bits(regmap, ISINK_CON(regs->isink_con[1], led->id),
+ ISINK_DIM_FSEL_MASK,
+ ISINK_DIM_FSEL(1000));
if (ret < 0)
return ret;
@@ -243,6 +292,8 @@ static int mt6323_led_set_blink(struct led_classdev *cdev,
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
+ const struct mt6323_hwspec *spec = leds->pdata->spec;
struct regmap *regmap = leds->hw->regmap;
unsigned long period;
u8 duty_hw;
@@ -265,14 +316,14 @@ static int mt6323_led_set_blink(struct led_classdev *cdev,
*/
period = *delay_on + *delay_off;
- if (period > MT6323_MAX_PERIOD)
+ if (period > spec->max_period)
return -EINVAL;
/*
* Calculate duty_hw based on the percentage of period during
* which the led is ON.
*/
- duty_hw = MT6323_CAL_HW_DUTY(*delay_on, period);
+ duty_hw = DIV_ROUND_CLOSEST(*delay_on * 100000ul, period * spec->unit_duty);
/* hardware doesn't support zero duty cycle. */
if (!duty_hw)
@@ -290,15 +341,15 @@ static int mt6323_led_set_blink(struct led_classdev *cdev,
led->current_brightness = cdev->max_brightness;
}
- ret = regmap_update_bits(regmap, MT6323_ISINK_CON0(led->id),
- MT6323_ISINK_DIM_DUTY_MASK,
- MT6323_ISINK_DIM_DUTY(duty_hw - 1));
+ ret = regmap_update_bits(regmap, ISINK_CON(regs->isink_con[0], led->id),
+ ISINK_DIM_DUTY_MASK,
+ ISINK_DIM_DUTY(duty_hw - 1));
if (ret < 0)
goto out;
- ret = regmap_update_bits(regmap, MT6323_ISINK_CON1(led->id),
- MT6323_ISINK_DIM_FSEL_MASK,
- MT6323_ISINK_DIM_FSEL(period - 1));
+ ret = regmap_update_bits(regmap, ISINK_CON(regs->isink_con[1], led->id),
+ ISINK_DIM_FSEL_MASK,
+ ISINK_DIM_FSEL(period - 1));
out:
mutex_unlock(&leds->lock);
@@ -335,6 +386,117 @@ out:
return ret;
}
+static int mtk_wled_hw_on(struct led_classdev *cdev)
+{
+ struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
+ struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
+ struct regmap *regmap = leds->hw->regmap;
+ int ret;
+
+ ret = regmap_clear_bits(regmap, regs->top_ckpdn[0], RG_VWLED_32K_CK_PDN);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(regmap, regs->top_ckpdn[0], RG_VWLED_6M_CK_PDN);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(regmap, regs->top_ckpdn[0], RG_VWLED_1M_CK_PDN);
+ if (ret)
+ return ret;
+
+ usleep_range(5000, 6000);
+
+ /* Enable WLED channel pair */
+ ret = regmap_set_bits(regmap, regs->iwled_en_ctrl, BIT(led->id));
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(regmap, regs->iwled_en_ctrl, BIT(led->id + 1));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_wled_hw_off(struct led_classdev *cdev)
+{
+ struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
+ struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
+ struct regmap *regmap = leds->hw->regmap;
+ int ret;
+
+ ret = regmap_clear_bits(regmap, regs->iwled_en_ctrl, BIT(led->id + 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(regmap, regs->iwled_en_ctrl, BIT(led->id));
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(regmap, regs->top_ckpdn[0], RG_VWLED_32K_CK_PDN);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(regmap, regs->top_ckpdn[0], RG_VWLED_6M_CK_PDN);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(regmap, regs->top_ckpdn[0], RG_VWLED_1M_CK_PDN);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static enum led_brightness mt6323_get_wled_brightness(struct led_classdev *cdev)
+{
+ struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
+ struct mt6323_leds *leds = led->parent;
+ const struct mt6323_regs *regs = leds->pdata->regs;
+ struct regmap *regmap = leds->hw->regmap;
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(regmap, regs->iwled_en_ctrl, &status);
+ if (ret)
+ return 0;
+
+ /* Always two channels per WLED */
+ status &= BIT(led->id) | BIT(led->id + 1);
+
+ return status ? led->current_brightness : 0;
+}
+
+static int mt6323_wled_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
+ struct mt6323_leds *leds = led->parent;
+ int ret = 0;
+
+ mutex_lock(&leds->lock);
+
+ if (brightness) {
+ if (!led->current_brightness)
+ ret = mtk_wled_hw_on(cdev);
+ if (ret)
+ goto out;
+ } else {
+ ret = mtk_wled_hw_off(cdev);
+ if (ret)
+ goto out;
+ }
+
+ led->current_brightness = brightness;
+out:
+ mutex_unlock(&leds->lock);
+
+ return ret;
+}
+
static int mt6323_led_set_dt_default(struct led_classdev *cdev,
struct device_node *np)
{
@@ -369,9 +531,12 @@ static int mt6323_led_probe(struct platform_device *pdev)
struct mt6397_chip *hw = dev_get_drvdata(dev->parent);
struct mt6323_leds *leds;
struct mt6323_led *led;
+ const struct mt6323_regs *regs;
+ const struct mt6323_hwspec *spec;
int ret;
unsigned int status;
u32 reg;
+ u8 max_leds;
leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
if (!leds)
@@ -379,6 +544,10 @@ static int mt6323_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, leds);
leds->dev = dev;
+ leds->pdata = device_get_match_data(dev);
+ regs = leds->pdata->regs;
+ spec = leds->pdata->spec;
+ max_leds = spec->max_leds + spec->max_wleds;
/*
* leds->hw points to the underlying bus for the register
@@ -387,17 +556,18 @@ static int mt6323_led_probe(struct platform_device *pdev)
leds->hw = hw;
mutex_init(&leds->lock);
- status = MT6323_RG_DRV_32K_CK_PDN;
- ret = regmap_update_bits(leds->hw->regmap, MT6323_TOP_CKPDN0,
- MT6323_RG_DRV_32K_CK_PDN_MASK, ~status);
+ status = RG_DRV_32K_CK_PDN;
+ ret = regmap_update_bits(leds->hw->regmap, regs->top_ckpdn[0],
+ RG_DRV_32K_CK_PDN_MASK, ~status);
if (ret < 0) {
dev_err(leds->dev,
- "Failed to update MT6323_TOP_CKPDN0 Register\n");
+ "Failed to update TOP_CKPDN0 Register\n");
return ret;
}
for_each_available_child_of_node(np, child) {
struct led_init_data init_data = {};
+ bool is_wled;
ret = of_property_read_u32(child, "reg", &reg);
if (ret) {
@@ -405,7 +575,8 @@ static int mt6323_led_probe(struct platform_device *pdev)
goto put_child_node;
}
- if (reg >= MT6323_MAX_LEDS || leds->led[reg]) {
+ if (reg >= max_leds || reg >= MAX_SUPPORTED_LEDS ||
+ leds->led[reg]) {
dev_err(dev, "Invalid led reg %u\n", reg);
ret = -EINVAL;
goto put_child_node;
@@ -417,14 +588,24 @@ static int mt6323_led_probe(struct platform_device *pdev)
goto put_child_node;
}
+ is_wled = of_property_read_bool(child, "mediatek,is-wled");
+
leds->led[reg] = led;
leds->led[reg]->id = reg;
- leds->led[reg]->cdev.max_brightness = MT6323_MAX_BRIGHTNESS;
- leds->led[reg]->cdev.brightness_set_blocking =
- mt6323_led_set_brightness;
- leds->led[reg]->cdev.blink_set = mt6323_led_set_blink;
- leds->led[reg]->cdev.brightness_get =
- mt6323_get_led_hw_brightness;
+ leds->led[reg]->cdev.max_brightness = spec->max_brightness;
+
+ if (is_wled) {
+ leds->led[reg]->cdev.brightness_set_blocking =
+ mt6323_wled_set_brightness;
+ leds->led[reg]->cdev.brightness_get =
+ mt6323_get_wled_brightness;
+ } else {
+ leds->led[reg]->cdev.brightness_set_blocking =
+ mt6323_led_set_brightness;
+ leds->led[reg]->cdev.blink_set = mt6323_led_set_blink;
+ leds->led[reg]->cdev.brightness_get =
+ mt6323_get_led_hw_brightness;
+ }
leds->led[reg]->parent = leds;
ret = mt6323_led_set_dt_default(&leds->led[reg]->cdev, child);
@@ -454,23 +635,88 @@ put_child_node:
static int mt6323_led_remove(struct platform_device *pdev)
{
struct mt6323_leds *leds = platform_get_drvdata(pdev);
+ const struct mt6323_regs *regs = leds->pdata->regs;
int i;
/* Turn the LEDs off on driver removal. */
for (i = 0 ; leds->led[i] ; i++)
mt6323_led_hw_off(&leds->led[i]->cdev);
- regmap_update_bits(leds->hw->regmap, MT6323_TOP_CKPDN0,
- MT6323_RG_DRV_32K_CK_PDN_MASK,
- MT6323_RG_DRV_32K_CK_PDN);
+ regmap_update_bits(leds->hw->regmap, regs->top_ckpdn[0],
+ RG_DRV_32K_CK_PDN_MASK,
+ RG_DRV_32K_CK_PDN);
mutex_destroy(&leds->lock);
return 0;
}
+static const struct mt6323_regs mt6323_registers = {
+ .top_ckpdn = (const u16[]){ 0x102, 0x106, 0x10e },
+ .num_top_ckpdn = 3,
+ .top_ckcon = (const u16[]){ 0x120, 0x126 },
+ .num_top_ckcon = 2,
+ .isink_con = (const u16[]){ 0x330, 0x332, 0x334 },
+ .num_isink_con = 3,
+ .isink_max_regs = 4, /* ISINK[0..3] */
+ .isink_en_ctrl = 0x356,
+};
+
+static const struct mt6323_regs mt6331_registers = {
+ .top_ckpdn = (const u16[]){ 0x138, 0x13e, 0x144 },
+ .num_top_ckpdn = 3,
+ .top_ckcon = (const u16[]){ 0x14c, 0x14a },
+ .num_top_ckcon = 2,
+ .isink_con = (const u16[]){ 0x40c, 0x40e, 0x410, 0x412, 0x414 },
+ .num_isink_con = 5,
+ .isink_max_regs = 4, /* ISINK[0..3] */
+ .isink_en_ctrl = 0x43a,
+};
+
+static const struct mt6323_regs mt6332_registers = {
+ .top_ckpdn = (const u16[]){ 0x8094, 0x809a, 0x80a0 },
+ .num_top_ckpdn = 3,
+ .top_ckcon = (const u16[]){ 0x80a6, 0x80ac },
+ .num_top_ckcon = 2,
+ .isink_con = (const u16[]){ 0x8cd4 },
+ .num_isink_con = 1,
+ .isink_max_regs = 12, /* IWLED[0..2, 3..9] */
+ .iwled_en_ctrl = 0x8cda,
+};
+
+static const struct mt6323_hwspec mt6323_spec = {
+ .max_period = 10000,
+ .max_leds = 4,
+ .max_brightness = 6,
+ .unit_duty = 3125,
+};
+
+static const struct mt6323_hwspec mt6332_spec = {
+ /* There are no LEDs in MT6332. Only WLEDs are present. */
+ .max_leds = 0,
+ .max_wleds = 1,
+ .max_brightness = 1024,
+};
+
+static const struct mt6323_data mt6323_pdata = {
+ .regs = &mt6323_registers,
+ .spec = &mt6323_spec,
+};
+
+static const struct mt6323_data mt6331_pdata = {
+ .regs = &mt6331_registers,
+ .spec = &mt6323_spec,
+};
+
+static const struct mt6323_data mt6332_pdata = {
+ .regs = &mt6332_registers,
+ .spec = &mt6332_spec,
+};
+
static const struct of_device_id mt6323_led_dt_match[] = {
- { .compatible = "mediatek,mt6323-led" },
+ { .compatible = "mediatek,mt6323-led", .data = &mt6323_pdata},
+ { .compatible = "mediatek,mt6331-led", .data = &mt6331_pdata },
+ { .compatible = "mediatek,mt6332-led", .data = &mt6332_pdata },
{},
};
MODULE_DEVICE_TABLE(of, mt6323_led_dt_match);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 15b1acfa442e..8b5c62083e50 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -102,7 +102,7 @@ static struct i2c_driver pca9532_driver = {
.name = "leds-pca953x",
.of_match_table = of_match_ptr(of_pca9532_leds_match),
},
- .probe_new = pca9532_probe,
+ .probe = pca9532_probe,
.remove = pca9532_remove,
.id_table = pca9532_id,
};
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 1edd092e7894..b10e1ef38db0 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -668,7 +668,7 @@ static struct i2c_driver pca955x_driver = {
.name = "leds-pca955x",
.of_match_table = of_pca955x_match,
},
- .probe_new = pca955x_probe,
+ .probe = pca955x_probe,
.id_table = pca955x_id,
};
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 9cd476db601f..47223c850e4b 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -431,7 +431,7 @@ static struct i2c_driver pca963x_driver = {
.name = "leds-pca963x",
.of_match_table = of_pca963x_match,
},
- .probe_new = pca963x_probe,
+ .probe = pca963x_probe,
.id_table = pca963x_id,
};
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 2bc5c99daf51..2c7ffc3c78e6 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -98,7 +98,7 @@ static int spi_byte_probe(struct spi_device *spi)
return -ENOMEM;
of_property_read_string(child, "label", &name);
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->spi = spi;
mutex_init(&led->mutex);
led->cdef = device_get_match_data(dev);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 634cabd5bb79..aab861771210 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -808,7 +808,7 @@ static struct i2c_driver tca6507_driver = {
.name = "leds-tca6507",
.of_match_table = of_match_ptr(of_tca6507_leds_match),
},
- .probe_new = tca6507_probe,
+ .probe = tca6507_probe,
.remove = tca6507_remove,
.id_table = tca6507_id,
};
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 7e31db50036f..dfc6fb2b3e52 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -230,7 +230,7 @@ static struct i2c_driver tlc591xx_driver = {
.name = "tlc591xx",
.of_match_table = of_match_ptr(of_tlc591xx_leds_match),
},
- .probe_new = tlc591xx_probe,
+ .probe = tlc591xx_probe,
.id_table = tlc591xx_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 013f551b32b2..64b2d7b6d3f3 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -271,7 +271,7 @@ static const struct i2c_device_id omnia_id[] = {
MODULE_DEVICE_TABLE(i2c, omnia_id);
static struct i2c_driver omnia_leds_driver = {
- .probe_new = omnia_leds_probe,
+ .probe = omnia_leds_probe,
.remove = omnia_leds_remove,
.id_table = omnia_id,
.driver = {
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 1c849814a491..59581b3e25ca 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -1173,8 +1173,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
i = 0;
for_each_available_child_of_node(np, child) {
ret = lpg_parse_channel(lpg, child, &led->channels[i]);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child);
return ret;
+ }
info[i].color_index = led->channels[i]->color;
info[i].intensity = 0;
@@ -1352,8 +1354,10 @@ static int lpg_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, np) {
ret = lpg_add_led(lpg, np);
- if (ret)
+ if (ret) {
+ of_node_put(np);
return ret;
+ }
}
for (i = 0; i < lpg->num_channels; i++)
@@ -1414,6 +1418,20 @@ static const struct lpg_data pm8994_lpg_data = {
},
};
+/* PMI632 uses SDAM instead of LUT for pattern */
+static const struct lpg_data pmi632_lpg_data = {
+ .triled_base = 0xd000,
+
+ .num_channels = 5,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb300, .triled_mask = BIT(7) },
+ { .base = 0xb400, .triled_mask = BIT(6) },
+ { .base = 0xb500, .triled_mask = BIT(5) },
+ { .base = 0xb600 },
+ { .base = 0xb700 },
+ },
+};
+
static const struct lpg_data pmi8994_lpg_data = {
.lut_base = 0xb000,
.lut_size = 24,
@@ -1505,6 +1523,7 @@ static const struct of_device_id lpg_of_table[] = {
{ .compatible = "qcom,pm8916-pwm", .data = &pm8916_pwm_data },
{ .compatible = "qcom,pm8941-lpg", .data = &pm8941_lpg_data },
{ .compatible = "qcom,pm8994-lpg", .data = &pm8994_lpg_data },
+ { .compatible = "qcom,pmi632-lpg", .data = &pmi632_lpg_data },
{ .compatible = "qcom,pmi8994-lpg", .data = &pmi8994_lpg_data },
{ .compatible = "qcom,pmi8998-lpg", .data = &pmi8998_lpg_data },
{ .compatible = "qcom,pmc8180c-lpg", .data = &pm8150l_lpg_data },
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index fd2b8225d926..44fa0f93cb3b 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,11 +1,36 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
- depends on LEDS_GPIO
depends on SIEMENS_SIMATIC_IPC
help
This option enables support for the LEDs of several Industrial PCs
from Siemens.
- To compile this driver as a module, choose M here: the modules
- will be called simatic-ipc-leds and simatic-ipc-leds-gpio.
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds.
+
+config LEDS_SIEMENS_SIMATIC_IPC_APOLLOLAKE
+ tristate "LED driver for Siemens Simatic IPCs based on Intel Apollo Lake GPIO"
+ depends on LEDS_GPIO
+ depends on PINCTRL_BROXTON
+ depends on SIEMENS_SIMATIC_IPC
+ default LEDS_SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for the LEDs of several Industrial PCs
+ from Siemens based on Apollo Lake GPIO i.e. IPC127E.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds-gpio-apollolake.
+
+config LEDS_SIEMENS_SIMATIC_IPC_F7188X
+ tristate "LED driver for Siemens Simatic IPCs based on Nuvoton GPIO"
+ depends on LEDS_GPIO
+ depends on GPIO_F7188X
+ depends on SIEMENS_SIMATIC_IPC
+ default LEDS_SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for the LEDs of several Industrial PCs
+ from Siemens based on Nuvoton GPIO i.e. IPC227G.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds-gpio-f7188x.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index 1c7ef5e1324b..e3e840cea275 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
-obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds-gpio.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_APOLLOLAKE) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-apollolake.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_F7188X) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-f7188x.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
new file mode 100644
index 000000000000..e1c712729dcf
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-leds-gpio.h"
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 5, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ },
+};
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_extra = {
+ .dev_id = NULL, /* Filled during initialization */
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_leds_gpio_apollolake_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_probe(pdev, &simatic_ipc_led_gpio_table,
+ &simatic_ipc_led_gpio_table_extra);
+}
+
+static int simatic_ipc_leds_gpio_apollolake_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_remove(pdev, &simatic_ipc_led_gpio_table,
+ &simatic_ipc_led_gpio_table_extra);
+}
+
+static struct platform_driver simatic_ipc_led_gpio_apollolake_driver = {
+ .probe = simatic_ipc_leds_gpio_apollolake_probe,
+ .remove = simatic_ipc_leds_gpio_apollolake_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+module_platform_driver(simatic_ipc_led_gpio_apollolake_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core platform:apollolake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
new file mode 100644
index 000000000000..2a21b663df87
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-leds-gpio.h"
+
+static struct platform_device *simatic_leds_pdev;
+
+static const struct gpio_led simatic_ipc_gpio_leds[] = {
+ { .name = "red:" LED_FUNCTION_STATUS "-1" },
+ { .name = "green:" LED_FUNCTION_STATUS "-1" },
+ { .name = "red:" LED_FUNCTION_STATUS "-2" },
+ { .name = "green:" LED_FUNCTION_STATUS "-2" },
+ { .name = "red:" LED_FUNCTION_STATUS "-3" },
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
+};
+
+static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
+ .num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
+ .leds = simatic_ipc_gpio_leds,
+};
+
+int simatic_ipc_leds_gpio_remove(struct platform_device *pdev,
+ struct gpiod_lookup_table *table,
+ struct gpiod_lookup_table *table_extra)
+{
+ gpiod_remove_lookup_table(table);
+ gpiod_remove_lookup_table(table_extra);
+ platform_device_unregister(simatic_leds_pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(simatic_ipc_leds_gpio_remove);
+
+int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
+ struct gpiod_lookup_table *table,
+ struct gpiod_lookup_table *table_extra)
+{
+ const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_desc *gpiod;
+ int err;
+
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_127E:
+ case SIMATIC_IPC_DEVICE_227G:
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ gpiod_add_lookup_table(table);
+ simatic_leds_pdev = platform_device_register_resndata(NULL,
+ "leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
+ &simatic_ipc_gpio_leds_pdata,
+ sizeof(simatic_ipc_gpio_leds_pdata));
+ if (IS_ERR(simatic_leds_pdev)) {
+ err = PTR_ERR(simatic_leds_pdev);
+ goto out;
+ }
+
+ table_extra->dev_id = dev_name(dev);
+ gpiod_add_lookup_table(table_extra);
+
+ /* PM_BIOS_BOOT_N */
+ gpiod = gpiod_get_index(dev, NULL, 6, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ /* PM_WDT_OUT */
+ gpiod = gpiod_get_index(dev, NULL, 7, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ return 0;
+out:
+ simatic_ipc_leds_gpio_remove(pdev, table, table_extra);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(simatic_ipc_leds_gpio_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: platform:leds-gpio");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
new file mode 100644
index 000000000000..583a6b6c7c22
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-leds-gpio.h"
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 0, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 1, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 2, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 3, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 4, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 5, NULL, 5, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
+ },
+};
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_extra = {
+ .dev_id = NULL, /* Filled during initialization */
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 6, NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 7, NULL, 7, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_leds_gpio_f7188x_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_probe(pdev, &simatic_ipc_led_gpio_table,
+ &simatic_ipc_led_gpio_table_extra);
+}
+
+static int simatic_ipc_leds_gpio_f7188x_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_remove(pdev, &simatic_ipc_led_gpio_table,
+ &simatic_ipc_led_gpio_table_extra);
+}
+
+static struct platform_driver simatic_ipc_led_gpio_driver = {
+ .probe = simatic_ipc_leds_gpio_f7188x_probe,
+ .remove = simatic_ipc_leds_gpio_f7188x_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+module_platform_driver(simatic_ipc_led_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core gpio_f7188x");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
deleted file mode 100644
index e8d329b5a68c..000000000000
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Siemens SIMATIC IPC driver for GPIO based LEDs
- *
- * Copyright (c) Siemens AG, 2022
- *
- * Authors:
- * Henning Schild <henning.schild@siemens.com>
- */
-
-#include <linux/gpio/machine.h>
-#include <linux/gpio/consumer.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/x86/simatic-ipc-base.h>
-
-static struct gpiod_lookup_table *simatic_ipc_led_gpio_table;
-
-static struct gpiod_lookup_table simatic_ipc_led_gpio_table_127e = {
- .dev_id = "leds-gpio",
- .table = {
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 2, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 3, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 4, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 5, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
- },
-};
-
-static struct gpiod_lookup_table simatic_ipc_led_gpio_table_227g = {
- .dev_id = "leds-gpio",
- .table = {
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 0, NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 1, NULL, 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 2, NULL, 2, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 3, NULL, 3, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 4, NULL, 4, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-2", 5, NULL, 5, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-f7188x-3", 6, NULL, 6, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("gpio-f7188x-3", 7, NULL, 7, GPIO_ACTIVE_HIGH),
- }
-};
-
-static const struct gpio_led simatic_ipc_gpio_leds[] = {
- { .name = "red:" LED_FUNCTION_STATUS "-1" },
- { .name = "green:" LED_FUNCTION_STATUS "-1" },
- { .name = "red:" LED_FUNCTION_STATUS "-2" },
- { .name = "green:" LED_FUNCTION_STATUS "-2" },
- { .name = "red:" LED_FUNCTION_STATUS "-3" },
- { .name = "green:" LED_FUNCTION_STATUS "-3" },
-};
-
-static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
- .num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
- .leds = simatic_ipc_gpio_leds,
-};
-
-static struct platform_device *simatic_leds_pdev;
-
-static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
-{
- gpiod_remove_lookup_table(simatic_ipc_led_gpio_table);
- platform_device_unregister(simatic_leds_pdev);
-
- return 0;
-}
-
-static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
-{
- const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
- struct gpio_desc *gpiod;
- int err;
-
- switch (plat->devmode) {
- case SIMATIC_IPC_DEVICE_127E:
- if (!IS_ENABLED(CONFIG_PINCTRL_BROXTON))
- return -ENODEV;
- simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
- break;
- case SIMATIC_IPC_DEVICE_227G:
- if (!IS_ENABLED(CONFIG_GPIO_F7188X))
- return -ENODEV;
- request_module("gpio-f7188x");
- simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_227g;
- break;
- default:
- return -ENODEV;
- }
-
- gpiod_add_lookup_table(simatic_ipc_led_gpio_table);
- simatic_leds_pdev = platform_device_register_resndata(NULL,
- "leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
- &simatic_ipc_gpio_leds_pdata,
- sizeof(simatic_ipc_gpio_leds_pdata));
- if (IS_ERR(simatic_leds_pdev)) {
- err = PTR_ERR(simatic_leds_pdev);
- goto out;
- }
-
- /* PM_BIOS_BOOT_N */
- gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 6, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- err = PTR_ERR(gpiod);
- goto out;
- }
- gpiod_put(gpiod);
-
- /* PM_WDT_OUT */
- gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 7, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- err = PTR_ERR(gpiod);
- goto out;
- }
- gpiod_put(gpiod);
-
- return 0;
-out:
- simatic_ipc_leds_gpio_remove(pdev);
-
- return err;
-}
-
-static struct platform_driver simatic_ipc_led_gpio_driver = {
- .probe = simatic_ipc_leds_gpio_probe,
- .remove = simatic_ipc_leds_gpio_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- }
-};
-module_platform_driver(simatic_ipc_led_gpio_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.h b/drivers/leds/simple/simatic-ipc-leds-gpio.h
new file mode 100644
index 000000000000..bf258c32f83d
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#ifndef _SIMATIC_IPC_LEDS_GPIO_H
+#define _SIMATIC_IPC_LEDS_GPIO_H
+
+int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
+ struct gpiod_lookup_table *table,
+ struct gpiod_lookup_table *table_extra);
+
+int simatic_ipc_leds_gpio_remove(struct platform_device *pdev,
+ struct gpiod_lookup_table *table,
+ struct gpiod_lookup_table *table_extra);
+
+#endif /* _SIMATIC_IPC_LEDS_GPIO_H */
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
index 4894c228c165..2124f6d09930 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -126,7 +126,6 @@ static struct platform_driver simatic_ipc_led_driver = {
.name = KBUILD_MODNAME,
}
};
-
module_platform_driver(simatic_ipc_led_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-disk.c b/drivers/leds/trigger/ledtrig-disk.c
index 0b7dfbd04273..e9b87ee944f2 100644
--- a/drivers/leds/trigger/ledtrig-disk.c
+++ b/drivers/leds/trigger/ledtrig-disk.c
@@ -19,16 +19,13 @@ DEFINE_LED_TRIGGER(ledtrig_disk_write);
void ledtrig_disk_activity(bool write)
{
- unsigned long blink_delay = BLINK_DELAY;
-
- led_trigger_blink_oneshot(ledtrig_disk,
- &blink_delay, &blink_delay, 0);
+ led_trigger_blink_oneshot(ledtrig_disk, BLINK_DELAY, BLINK_DELAY, 0);
if (write)
led_trigger_blink_oneshot(ledtrig_disk_write,
- &blink_delay, &blink_delay, 0);
+ BLINK_DELAY, BLINK_DELAY, 0);
else
led_trigger_blink_oneshot(ledtrig_disk_read,
- &blink_delay, &blink_delay, 0);
+ BLINK_DELAY, BLINK_DELAY, 0);
}
EXPORT_SYMBOL(ledtrig_disk_activity);
diff --git a/drivers/leds/trigger/ledtrig-mtd.c b/drivers/leds/trigger/ledtrig-mtd.c
index 8fa763c2269b..bbe6876a249d 100644
--- a/drivers/leds/trigger/ledtrig-mtd.c
+++ b/drivers/leds/trigger/ledtrig-mtd.c
@@ -22,12 +22,8 @@ DEFINE_LED_TRIGGER(ledtrig_nand);
void ledtrig_mtd_activity(void)
{
- unsigned long blink_delay = BLINK_DELAY;
-
- led_trigger_blink_oneshot(ledtrig_mtd,
- &blink_delay, &blink_delay, 0);
- led_trigger_blink_oneshot(ledtrig_nand,
- &blink_delay, &blink_delay, 0);
+ led_trigger_blink_oneshot(ledtrig_mtd, BLINK_DELAY, BLINK_DELAY, 0);
+ led_trigger_blink_oneshot(ledtrig_nand, BLINK_DELAY, BLINK_DELAY, 0);
}
EXPORT_SYMBOL(ledtrig_mtd_activity);
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 32b66703068a..c9bc5a91ec83 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -462,8 +462,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
get_device_state(trigger_data);
fallthrough;
case NETDEV_REGISTER:
- if (trigger_data->net_dev)
- dev_put(trigger_data->net_dev);
+ dev_put(trigger_data->net_dev);
dev_hold(dev);
trigger_data->net_dev = dev;
break;
@@ -594,8 +593,7 @@ static void netdev_trig_deactivate(struct led_classdev *led_cdev)
cancel_delayed_work_sync(&trigger_data->work);
- if (trigger_data->net_dev)
- dev_put(trigger_data->net_dev);
+ dev_put(trigger_data->net_dev);
kfree(trigger_data);
}
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 573481e436f5..7f98e7436d94 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -97,6 +97,7 @@ struct tegra_hsp_soc {
const struct tegra_hsp_db_map *map;
bool has_per_mb_ie;
bool has_128_bit_mb;
+ unsigned int reg_stride;
};
struct tegra_hsp {
@@ -279,7 +280,7 @@ tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
return ERR_PTR(-ENOMEM);
offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
- offset += index * 0x100;
+ offset += index * hsp->soc->reg_stride;
db->channel.regs = hsp->regs + offset;
db->channel.hsp = hsp;
@@ -916,24 +917,35 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = false,
+ .reg_stride = 0x100,
};
static const struct tegra_hsp_soc tegra194_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = true,
.has_128_bit_mb = false,
+ .reg_stride = 0x100,
};
static const struct tegra_hsp_soc tegra234_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = true,
+ .reg_stride = 0x100,
+};
+
+static const struct tegra_hsp_soc tegra264_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+ .has_128_bit_mb = true,
+ .reg_stride = 0x1000,
};
static const struct of_device_id tegra_hsp_match[] = {
{ .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
{ .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
{ .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc },
+ { .compatible = "nvidia,tegra264-hsp", .data = &tegra264_hsp_soc },
{ }
};
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index ddac423ac1a9..03048cbda525 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -430,14 +430,20 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
/* Ensure all unused data is 0 */
data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
writel(data_trail, data_reg);
- data_reg++;
+ data_reg += sizeof(u32);
}
+
/*
* 'data_reg' indicates next register to write. If we did not already
* write on tx complete reg(last reg), we must do so for transmit
+ * In addition, we also need to make sure all intermediate data
+ * registers(if any required), are reset to 0 for TISCI backward
+ * compatibility to be maintained.
*/
- if (data_reg <= qinst->queue_buff_end)
- writel(0, qinst->queue_buff_end);
+ while (data_reg <= qinst->queue_buff_end) {
+ writel(0, data_reg);
+ data_reg += sizeof(u32);
+ }
/* If we are in polled mode, wait for a response before proceeding */
if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e2a803683105..0ae2b3676293 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1369,7 +1369,7 @@ static void cached_dev_free(struct closure *cl)
put_page(virt_to_page(dc->sb_disk));
if (!IS_ERR_OR_NULL(dc->bdev))
- blkdev_put(dc->bdev, bcache_kobj);
+ blkdev_put(dc->bdev, dc);
wake_up(&unregister_wait);
@@ -1453,7 +1453,6 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
- dc->bdev->bd_holder = dc;
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
@@ -2218,7 +2217,7 @@ void bch_cache_release(struct kobject *kobj)
put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
- blkdev_put(ca->bdev, bcache_kobj);
+ blkdev_put(ca->bdev, ca);
kfree(ca);
module_put(THIS_MODULE);
@@ -2345,7 +2344,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
- ca->bdev->bd_holder = ca;
ca->sb_disk = sb_disk;
if (bdev_max_discard_sectors((bdev)))
@@ -2359,7 +2357,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
* call blkdev_put() to bdev in bch_cache_release(). So we
* explicitly call blkdev_put() here.
*/
- blkdev_put(bdev, bcache_kobj);
+ blkdev_put(bdev, ca);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else if (ret == -EPERM)
@@ -2448,6 +2446,7 @@ struct async_reg_args {
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
struct block_device *bdev;
+ void *holder;
};
static void register_bdev_worker(struct work_struct *work)
@@ -2455,22 +2454,13 @@ static void register_bdev_worker(struct work_struct *work)
int fail = false;
struct async_reg_args *args =
container_of(work, struct async_reg_args, reg_work.work);
- struct cached_dev *dc;
-
- dc = kzalloc(sizeof(*dc), GFP_KERNEL);
- if (!dc) {
- fail = true;
- put_page(virt_to_page(args->sb_disk));
- blkdev_put(args->bdev, bcache_kobj);
- goto out;
- }
mutex_lock(&bch_register_lock);
- if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
+ if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
+ < 0)
fail = true;
mutex_unlock(&bch_register_lock);
-out:
if (fail)
pr_info("error %s: fail to register backing device\n",
args->path);
@@ -2485,21 +2475,11 @@ static void register_cache_worker(struct work_struct *work)
int fail = false;
struct async_reg_args *args =
container_of(work, struct async_reg_args, reg_work.work);
- struct cache *ca;
-
- ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca) {
- fail = true;
- put_page(virt_to_page(args->sb_disk));
- blkdev_put(args->bdev, bcache_kobj);
- goto out;
- }
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
+ if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
fail = true;
-out:
if (fail)
pr_info("error %s: fail to register cache device\n",
args->path);
@@ -2520,6 +2500,13 @@ static void register_device_async(struct async_reg_args *args)
queue_delayed_work(system_wq, &args->reg_work, 10);
}
+static void *alloc_holder_object(struct cache_sb *sb)
+{
+ if (SB_IS_BDEV(sb))
+ return kzalloc(sizeof(struct cached_dev), GFP_KERNEL);
+ return kzalloc(sizeof(struct cache), GFP_KERNEL);
+}
+
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
@@ -2527,9 +2514,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct block_device *bdev;
+ struct block_device *bdev, *bdev2;
+ void *holder = NULL;
ssize_t ret;
bool async_registration = false;
+ bool quiet = false;
#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
async_registration = true;
@@ -2558,10 +2547,34 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ | BLK_OPEN_WRITE,
- bcache_kobj, NULL);
+ bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev))
+ goto out_free_sb;
+
+ err = "failed to set blocksize";
+ if (set_blocksize(bdev, 4096))
+ goto out_blkdev_put;
+
+ err = read_super(sb, bdev, &sb_disk);
+ if (err)
+ goto out_blkdev_put;
+
+ holder = alloc_holder_object(sb);
+ if (!holder) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ goto out_put_sb_page;
+ }
+
+ /* Now reopen in exclusive mode with proper holder */
+ bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ holder, NULL);
+ blkdev_put(bdev, NULL);
+ bdev = bdev2;
if (IS_ERR(bdev)) {
- if (bdev == ERR_PTR(-EBUSY)) {
+ ret = PTR_ERR(bdev);
+ bdev = NULL;
+ if (ret == -EBUSY) {
dev_t dev;
mutex_lock(&bch_register_lock);
@@ -2571,20 +2584,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
- if (attr == &ksysfs_register_quiet)
- goto done;
+ if (attr == &ksysfs_register_quiet) {
+ quiet = true;
+ ret = size;
+ }
}
- goto out_free_sb;
+ goto out_free_holder;
}
- err = "failed to set blocksize";
- if (set_blocksize(bdev, 4096))
- goto out_blkdev_put;
-
- err = read_super(sb, bdev, &sb_disk);
- if (err)
- goto out_blkdev_put;
-
err = "failed to register device";
if (async_registration) {
@@ -2595,59 +2602,46 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!args) {
ret = -ENOMEM;
err = "cannot allocate memory";
- goto out_put_sb_page;
+ goto out_free_holder;
}
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
args->bdev = bdev;
+ args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
goto async_done;
}
if (SB_IS_BDEV(sb)) {
- struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
-
- if (!dc) {
- ret = -ENOMEM;
- err = "cannot allocate memory";
- goto out_put_sb_page;
- }
-
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_disk, bdev, dc);
+ ret = register_bdev(sb, sb_disk, bdev, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
- struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
-
- if (!ca) {
- ret = -ENOMEM;
- err = "cannot allocate memory";
- goto out_put_sb_page;
- }
-
/* blkdev_put() will be called in bch_cache_release() */
- ret = register_cache(sb, sb_disk, bdev, ca);
+ ret = register_cache(sb, sb_disk, bdev, holder);
if (ret)
goto out_free_sb;
}
-done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
async_done:
return size;
+out_free_holder:
+ kfree(holder);
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- blkdev_put(bdev, register_bcache);
+ if (bdev)
+ blkdev_put(bdev, holder);
out_free_sb:
kfree(sb);
out_free_path:
@@ -2656,7 +2650,8 @@ out_free_path:
out_module_put:
module_put(THIS_MODULE);
out:
- pr_info("error %s: %s\n", path?path:"", err);
+ if (!quiet)
+ pr_info("error %s: %s\n", path?path:"", err);
return ret;
}
diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
index 4f78cc55c251..0666699b6858 100644
--- a/drivers/md/dm-verity-loadpin.c
+++ b/drivers/md/dm-verity-loadpin.c
@@ -58,6 +58,9 @@ bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
int srcu_idx;
bool trusted = false;
+ if (bdev == NULL)
+ return false;
+
if (list_empty(&dm_verity_loadpin_trusted_root_digests))
return false;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cf3733c90c47..2e38ef421d69 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -643,7 +643,6 @@ void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
- mutex_init(&mddev->delete_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
@@ -749,26 +748,15 @@ static void mddev_free(struct mddev *mddev)
static const struct attribute_group md_redundancy_group;
-static void md_free_rdev(struct mddev *mddev)
+void mddev_unlock(struct mddev *mddev)
{
struct md_rdev *rdev;
struct md_rdev *tmp;
+ LIST_HEAD(delete);
- mutex_lock(&mddev->delete_mutex);
- if (list_empty(&mddev->deleting))
- goto out;
-
- list_for_each_entry_safe(rdev, tmp, &mddev->deleting, same_set) {
- list_del_init(&rdev->same_set);
- kobject_del(&rdev->kobj);
- export_rdev(rdev, mddev);
- }
-out:
- mutex_unlock(&mddev->delete_mutex);
-}
+ if (!list_empty(&mddev->deleting))
+ list_splice_init(&mddev->deleting, &delete);
-void mddev_unlock(struct mddev *mddev)
-{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
@@ -808,7 +796,11 @@ void mddev_unlock(struct mddev *mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
- md_free_rdev(mddev);
+ list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
+ list_del_init(&rdev->same_set);
+ kobject_del(&rdev->kobj);
+ export_rdev(rdev, mddev);
+ }
md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
@@ -2458,7 +2450,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev, mddev->major_version == -2 ? &claim_rdev : rdev);
+ blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -2488,9 +2480,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
* reconfig_mutex is held, hence it can't be called under
* reconfig_mutex and it's delayed to mddev_unlock().
*/
- mutex_lock(&mddev->delete_mutex);
list_add(&rdev->same_set, &mddev->deleting);
- mutex_unlock(&mddev->delete_mutex);
}
static void export_array(struct mddev *mddev)
@@ -6140,7 +6130,7 @@ static void md_clean(struct mddev *mddev)
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
- mddev->external = 0;
+ /* we still need mddev->external in export_rdev, do not clear it yet */
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index bfd2306bc750..1aef86bf3fc3 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -531,11 +531,9 @@ struct mddev {
/*
* Temporarily store rdev that will be finally removed when
- * reconfig_mutex is unlocked.
+ * reconfig_mutex is unlocked, protected by reconfig_mutex.
*/
struct list_head deleting;
- /* Protect the deleting list */
- struct mutex delete_mutex;
bool has_superblocks:1;
bool fail_last_dev:1;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8ee9a95e25d..d1ac73fcd852 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -270,6 +270,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
+ if (conf->layout == RAID0_ORIG_LAYOUT) {
+ for (i = 1; i < conf->nr_strip_zones; i++) {
+ sector_t first_sector = conf->strip_zone[i-1].zone_end;
+
+ sector_div(first_sector, mddev->chunk_sectors);
+ zone = conf->strip_zone + i;
+ /* disk_shift is first disk index used in the zone */
+ zone->disk_shift = sector_div(first_sector,
+ zone->nb_dev);
+ }
+ }
+
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -431,6 +443,20 @@ exit_acct_set:
return ret;
}
+/*
+ * Convert disk_index to the disk order in which it is read/written.
+ * For example, if we have 4 disks, they are numbered 0,1,2,3. If we
+ * write the disks starting at disk 3, then the read/write order would
+ * be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
+ * to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
+ * to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
+ * that 'output' space to understand the read/write disk ordering.
+ */
+static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
+{
+ return ((disk_index + num_disks - disk_shift) % num_disks);
+}
+
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
@@ -444,7 +470,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
sector_t end_disk_offset;
unsigned int end_disk_index;
unsigned int disk;
+ sector_t orig_start, orig_end;
+ orig_start = start;
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
@@ -458,6 +486,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
} else
end = bio_end_sector(bio);
+ orig_end = end;
if (zone != conf->strip_zone)
end = end - zone[-1].zone_end;
@@ -469,13 +498,26 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
last_stripe_index = end;
sector_div(last_stripe_index, stripe_size);
- start_disk_index = (int)(start - first_stripe_index * stripe_size) /
- mddev->chunk_sectors;
+ /* In the first zone the original and alternate layouts are the same */
+ if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
+ sector_div(orig_start, mddev->chunk_sectors);
+ start_disk_index = sector_div(orig_start, zone->nb_dev);
+ start_disk_index = map_disk_shift(start_disk_index,
+ zone->nb_dev,
+ zone->disk_shift);
+ sector_div(orig_end, mddev->chunk_sectors);
+ end_disk_index = sector_div(orig_end, zone->nb_dev);
+ end_disk_index = map_disk_shift(end_disk_index,
+ zone->nb_dev, zone->disk_shift);
+ } else {
+ start_disk_index = (int)(start - first_stripe_index * stripe_size) /
+ mddev->chunk_sectors;
+ end_disk_index = (int)(end - last_stripe_index * stripe_size) /
+ mddev->chunk_sectors;
+ }
start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
mddev->chunk_sectors) +
first_stripe_index * mddev->chunk_sectors;
- end_disk_index = (int)(end - last_stripe_index * stripe_size) /
- mddev->chunk_sectors;
end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
mddev->chunk_sectors) +
last_stripe_index * mddev->chunk_sectors;
@@ -483,18 +525,22 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
for (disk = 0; disk < zone->nb_dev; disk++) {
sector_t dev_start, dev_end;
struct md_rdev *rdev;
+ int compare_disk;
+
+ compare_disk = map_disk_shift(disk, zone->nb_dev,
+ zone->disk_shift);
- if (disk < start_disk_index)
+ if (compare_disk < start_disk_index)
dev_start = (first_stripe_index + 1) *
mddev->chunk_sectors;
- else if (disk > start_disk_index)
+ else if (compare_disk > start_disk_index)
dev_start = first_stripe_index * mddev->chunk_sectors;
else
dev_start = start_disk_offset;
- if (disk < end_disk_index)
+ if (compare_disk < end_disk_index)
dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
- else if (disk > end_disk_index)
+ else if (compare_disk > end_disk_index)
dev_end = last_stripe_index * mddev->chunk_sectors;
else
dev_end = end_disk_offset;
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 3816e5477db1..8cc761ca7423 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -6,6 +6,7 @@ struct strip_zone {
sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */
int nb_dev; /* # of devices attached to the zone */
+ int disk_shift; /* start disk for the original layout */
};
/* Linux 3.14 (20d0189b101) made an unintended change to
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 169ebe296f2d..3f22edec70e7 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -116,7 +116,7 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
static inline void raid1_submit_write(struct bio *bio)
{
- struct md_rdev *rdev = (struct md_rdev *)bio->bi_bdev;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d0de8c9fb3cf..5051149e27bb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -325,7 +325,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
bio->bi_status = BLK_STS_IOERR;
- if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ if (r10_bio->start_time)
bio_end_io_acct(bio, r10_bio->start_time);
bio_endio(bio);
/*
@@ -1118,7 +1118,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -1127,7 +1127,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
raid1_prepare_flush_writes(mddev->bitmap);
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig
index 70432a1d6918..d912d143fb31 100644
--- a/drivers/media/cec/i2c/Kconfig
+++ b/drivers/media/cec/i2c/Kconfig
@@ -5,6 +5,7 @@
config CEC_CH7322
tristate "Chrontel CH7322 CEC controller"
depends on I2C
+ select REGMAP
select REGMAP_I2C
select CEC_CORE
help
diff --git a/drivers/media/cec/i2c/ch7322.c b/drivers/media/cec/i2c/ch7322.c
index 34fad7123704..439c15bc9e44 100644
--- a/drivers/media/cec/i2c/ch7322.c
+++ b/drivers/media/cec/i2c/ch7322.c
@@ -591,7 +591,7 @@ static struct i2c_driver ch7322_i2c_driver = {
.name = "ch7322",
.of_match_table = of_match_ptr(ch7322_of_match),
},
- .probe_new = ch7322_probe,
+ .probe = ch7322_probe,
.remove = ch7322_remove,
};
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index bcb957883044..27c53eed8fe3 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -133,8 +133,8 @@ int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop)
****************************************************************************/
/* this is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c
- make sure virt has been allocated with vmalloc_32(), otherwise the BUG()
- may be triggered on highmem machines */
+ make sure virt has been allocated with vmalloc_32(), otherwise return NULL
+ on highmem machines */
static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
struct scatterlist *sglist;
@@ -150,7 +150,7 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
if (NULL == pg)
goto err;
if (WARN_ON(PageHighMem(pg)))
- return NULL;
+ goto err;
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
}
return sglist;
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index a4b05e366ccc..305bb21d843c 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -61,21 +61,21 @@ static const char * const dnames[] = {
#define DVB_MAX_IDS 4
static const u8 minor_type[] = {
- [DVB_DEVICE_VIDEO] = 0,
- [DVB_DEVICE_AUDIO] = 1,
- [DVB_DEVICE_SEC] = 2,
- [DVB_DEVICE_FRONTEND] = 3,
- [DVB_DEVICE_DEMUX] = 4,
- [DVB_DEVICE_DVR] = 5,
- [DVB_DEVICE_CA] = 6,
- [DVB_DEVICE_NET] = 7,
- [DVB_DEVICE_OSD] = 8,
+ [DVB_DEVICE_VIDEO] = 0,
+ [DVB_DEVICE_AUDIO] = 1,
+ [DVB_DEVICE_SEC] = 2,
+ [DVB_DEVICE_FRONTEND] = 3,
+ [DVB_DEVICE_DEMUX] = 4,
+ [DVB_DEVICE_DVR] = 5,
+ [DVB_DEVICE_CA] = 6,
+ [DVB_DEVICE_NET] = 7,
+ [DVB_DEVICE_OSD] = 8,
};
#define nums2minor(num, type, id) \
- (((num) << 6) | ((id) << 4) | minor_type[type])
+ (((num) << 6) | ((id) << 4) | minor_type[type])
-#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
+#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS * 64)
#endif
static struct class *dvb_class;
@@ -112,9 +112,7 @@ fail:
return -ENODEV;
}
-
-static const struct file_operations dvb_device_fops =
-{
+static const struct file_operations dvb_device_fops = {
.owner = THIS_MODULE,
.open = dvb_device_open,
.llseek = noop_llseek,
@@ -147,7 +145,6 @@ int dvb_generic_open(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(dvb_generic_open);
-
int dvb_generic_release(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
@@ -155,11 +152,10 @@ int dvb_generic_release(struct inode *inode, struct file *file)
if (!dvbdev)
return -ENODEV;
- if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
dvbdev->readers++;
- } else {
+ else
dvbdev->writers++;
- }
dvbdev->users++;
@@ -169,7 +165,6 @@ int dvb_generic_release(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(dvb_generic_release);
-
long dvb_generic_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -185,13 +180,13 @@ long dvb_generic_ioctl(struct file *file,
}
EXPORT_SYMBOL(dvb_generic_ioctl);
-
-static int dvbdev_get_free_id (struct dvb_adapter *adap, int type)
+static int dvbdev_get_free_id(struct dvb_adapter *adap, int type)
{
u32 id = 0;
while (id < DVB_MAX_IDS) {
struct dvb_device *dev;
+
list_for_each_entry(dev, &adap->device_list, list_head)
if (dev->type == type && dev->id == id)
goto skip;
@@ -245,7 +240,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
static int dvb_create_tsout_entity(struct dvb_device *dvbdev,
- const char *name, int npads)
+ const char *name, int npads)
{
int i;
@@ -387,7 +382,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
static int dvb_register_media_device(struct dvb_device *dvbdev,
int type, int minor,
- unsigned demux_sink_pads)
+ unsigned int demux_sink_pads)
{
#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
struct media_link *link;
@@ -462,7 +457,8 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
mutex_lock(&dvbdev_register_lock);
- if ((id = dvbdev_get_free_id (adap, type)) < 0) {
+ id = dvbdev_get_free_id(adap, type);
+ if (id < 0) {
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
pr_err("%s: couldn't find free device id\n", __func__);
@@ -470,7 +466,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
*pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
- if (!dvbdev){
+ if (!dvbdev) {
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
@@ -482,14 +478,13 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
*/
list_for_each_entry(node, &dvbdevfops_list, list_head) {
if (node->fops->owner == adap->module &&
- node->type == type &&
- node->template == template) {
+ node->type == type && node->template == template) {
dvbdevfops = node->fops;
break;
}
}
- if (dvbdevfops == NULL) {
+ if (!dvbdevfops) {
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
if (!dvbdevfops) {
kfree(dvbdev);
@@ -497,7 +492,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
return -ENOMEM;
}
- new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node) {
kfree(dvbdevfops);
kfree(dvbdev);
@@ -508,7 +503,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
new_node->fops = dvbdevfops;
new_node->type = type;
new_node->template = template;
- list_add_tail (&new_node->list_head, &dvbdevfops_list);
+ list_add_tail(&new_node->list_head, &dvbdevfops_list);
}
memcpy(dvbdev, template, sizeof(struct dvb_device));
@@ -518,21 +513,21 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdev->adapter = adap;
dvbdev->priv = priv;
dvbdev->fops = dvbdevfops;
- init_waitqueue_head (&dvbdev->wait_queue);
+ init_waitqueue_head(&dvbdev->wait_queue);
dvbdevfops->owner = adap->module;
- list_add_tail (&dvbdev->list_head, &adap->device_list);
+ list_add_tail(&dvbdev->list_head, &adap->device_list);
down_write(&minor_rwsem);
#ifdef CONFIG_DVB_DYNAMIC_MINORS
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
- if (dvb_minors[minor] == NULL)
+ if (!dvb_minors[minor])
break;
if (minor == MAX_DVB_MINORS) {
if (new_node) {
- list_del (&new_node->list_head);
+ list_del(&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
- list_del (&dvbdev->list_head);
+ list_del(&dvbdev->list_head);
kfree(dvbdev);
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
@@ -547,14 +542,14 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
- __func__);
+ __func__);
if (new_node) {
- list_del (&new_node->list_head);
+ list_del(&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
dvb_media_device_free(dvbdev);
- list_del (&dvbdev->list_head);
+ list_del(&dvbdev->list_head);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return ret;
@@ -567,12 +562,12 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
if (new_node) {
- list_del (&new_node->list_head);
+ list_del(&new_node->list_head);
kfree(dvbdevfops);
kfree(new_node);
}
dvb_media_device_free(dvbdev);
- list_del (&dvbdev->list_head);
+ list_del(&dvbdev->list_head);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
@@ -586,7 +581,6 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
EXPORT_SYMBOL(dvb_register_device);
-
void dvb_remove_device(struct dvb_device *dvbdev)
{
if (!dvbdev)
@@ -601,19 +595,17 @@ void dvb_remove_device(struct dvb_device *dvbdev)
device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
- list_del (&dvbdev->list_head);
+ list_del(&dvbdev->list_head);
}
EXPORT_SYMBOL(dvb_remove_device);
-
static void dvb_free_device(struct kref *ref)
{
struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
- kfree (dvbdev);
+ kfree(dvbdev);
}
-
struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
{
kref_get(&dvbdev->ref);
@@ -621,14 +613,12 @@ struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
}
EXPORT_SYMBOL(dvb_device_get);
-
void dvb_device_put(struct dvb_device *dvbdev)
{
if (dvbdev)
kref_put(&dvbdev->ref, dvb_free_device);
}
-
void dvb_unregister_device(struct dvb_device *dvbdev)
{
dvb_remove_device(dvbdev);
@@ -636,7 +626,6 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
}
EXPORT_SYMBOL(dvb_unregister_device);
-
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
static int dvb_create_io_intf_links(struct dvb_adapter *adap,
@@ -669,9 +658,9 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
struct media_entity *demux = NULL, *ca = NULL;
struct media_link *link;
struct media_interface *intf;
- unsigned demux_pad = 0;
- unsigned dvr_pad = 0;
- unsigned ntuner = 0, ndemod = 0;
+ unsigned int demux_pad = 0;
+ unsigned int dvr_pad = 0;
+ unsigned int ntuner = 0, ndemod = 0;
int ret, pad_source, pad_sink;
static const char *connector_name = "Television";
@@ -741,7 +730,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
MEDIA_LNK_FL_ENABLED,
false);
} else {
- pad_sink = media_get_pad_index(tuner, true,
+ pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_sink < 0)
return -EINVAL;
@@ -759,7 +748,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
if (ntuner && ndemod) {
/* NOTE: first found tuner source pad presumed correct */
- pad_source = media_get_pad_index(tuner, false,
+ pad_source = media_get_pad_index(tuner, MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
if (pad_source < 0)
return -EINVAL;
@@ -795,18 +784,18 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
media_device_for_each_entity(entity, mdev) {
if (entity->function == MEDIA_ENT_F_IO_DTV) {
if (!strncmp(entity->name, DVR_TSOUT,
- strlen(DVR_TSOUT))) {
+ strlen(DVR_TSOUT))) {
ret = media_create_pad_link(demux,
- ++dvr_pad,
- entity, 0, 0);
+ ++dvr_pad,
+ entity, 0, 0);
if (ret)
return ret;
}
if (!strncmp(entity->name, DEMUX_TSOUT,
- strlen(DEMUX_TSOUT))) {
+ strlen(DEMUX_TSOUT))) {
ret = media_create_pad_link(demux,
- ++demux_pad,
- entity, 0, 0);
+ ++demux_pad,
+ entity, 0, 0);
if (ret)
return ret;
}
@@ -864,8 +853,10 @@ EXPORT_SYMBOL_GPL(dvb_create_media_graph);
static int dvbdev_check_free_adapter_num(int num)
{
struct list_head *entry;
+
list_for_each(entry, &dvb_adapter_list) {
struct dvb_adapter *adap;
+
adap = list_entry(entry, struct dvb_adapter, list_head);
if (adap->num == num)
return 0;
@@ -873,7 +864,7 @@ static int dvbdev_check_free_adapter_num(int num)
return 1;
}
-static int dvbdev_get_free_adapter_num (void)
+static int dvbdev_get_free_adapter_num(void)
{
int num = 0;
@@ -886,7 +877,6 @@ static int dvbdev_get_free_adapter_num (void)
return -ENFILE;
}
-
int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
struct module *module, struct device *device,
short *adapter_nums)
@@ -913,8 +903,8 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
return -ENFILE;
}
- memset (adap, 0, sizeof(struct dvb_adapter));
- INIT_LIST_HEAD (&adap->device_list);
+ memset(adap, 0, sizeof(struct dvb_adapter));
+ INIT_LIST_HEAD(&adap->device_list);
pr_info("DVB: registering new adapter (%s)\n", name);
@@ -924,13 +914,13 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
adap->device = device;
adap->mfe_shared = 0;
adap->mfe_dvbdev = NULL;
- mutex_init (&adap->mfe_lock);
+ mutex_init(&adap->mfe_lock);
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
mutex_init(&adap->mdev_lock);
#endif
- list_add_tail (&adap->list_head, &dvb_adapter_list);
+ list_add_tail(&adap->list_head, &dvb_adapter_list);
mutex_unlock(&dvbdev_register_lock);
@@ -938,25 +928,26 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
}
EXPORT_SYMBOL(dvb_register_adapter);
-
int dvb_unregister_adapter(struct dvb_adapter *adap)
{
mutex_lock(&dvbdev_register_lock);
- list_del (&adap->list_head);
+ list_del(&adap->list_head);
mutex_unlock(&dvbdev_register_lock);
return 0;
}
EXPORT_SYMBOL(dvb_unregister_adapter);
-/* if the miracle happens and "generic_usercopy()" is included into
- the kernel, then this can vanish. please don't make the mistake and
- define this as video_usercopy(). this will introduce a dependency
- to the v4l "videodev.o" module, which is unnecessary for some
- cards (ie. the budget dvb-cards don't need the v4l module...) */
+/*
+ * if the miracle happens and "generic_usercopy()" is included into
+ * the kernel, then this can vanish. please don't make the mistake and
+ * define this as video_usercopy(). this will introduce a dependency
+ * to the v4l "videodev.o" module, which is unnecessary for some
+ * cards (ie. the budget dvb-cards don't need the v4l module...)
+ */
int dvb_usercopy(struct file *file,
- unsigned int cmd, unsigned long arg,
- int (*func)(struct file *file,
- unsigned int cmd, void *arg))
+ unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file,
+ unsigned int cmd, void *arg))
{
char sbuf[128];
void *mbuf = NULL;
@@ -970,7 +961,7 @@ int dvb_usercopy(struct file *file,
* For this command, the pointer is actually an integer
* argument.
*/
- parg = (void *) arg;
+ parg = (void *)arg;
break;
case _IOC_READ: /* some v4l ioctls are marked wrong ... */
case _IOC_WRITE:
@@ -980,7 +971,7 @@ int dvb_usercopy(struct file *file,
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (NULL == mbuf)
+ if (!mbuf)
return -ENOMEM;
parg = mbuf;
}
@@ -992,15 +983,15 @@ int dvb_usercopy(struct file *file,
}
/* call driver */
- if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
+ err = func(file, cmd, parg);
+ if (err == -ENOIOCTLCMD)
err = -ENOTTY;
if (err < 0)
goto out;
/* Copy results into user buffer */
- switch (_IOC_DIR(cmd))
- {
+ switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
@@ -1080,19 +1071,20 @@ static char *dvb_devnode(const struct device *dev, umode_t *mode)
dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
}
-
static int __init init_dvbdev(void)
{
int retval;
dev_t dev = MKDEV(DVB_MAJOR, 0);
- if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
+ retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB");
+ if (retval != 0) {
pr_err("dvb-core: unable to get major %d\n", DVB_MAJOR);
return retval;
}
cdev_init(&dvb_device_cdev, &dvb_device_fops);
- if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
+ retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS);
+ if (retval != 0) {
pr_err("dvb-core: unable register character device\n");
goto error;
}
@@ -1112,7 +1104,6 @@ error:
return retval;
}
-
static void __exit exit_dvbdev(void)
{
struct dvbdevfops_node *node, *next;
@@ -1122,7 +1113,7 @@ static void __exit exit_dvbdev(void)
unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
- list_del (&node->list_head);
+ list_del(&node->list_head);
kfree(node->fops);
kfree(node);
}
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index cca7cbdd4c7c..f39887c04978 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -266,7 +266,7 @@ static struct i2c_driver a8293_driver = {
.name = "a8293",
.suppress_bind_attrs = true,
},
- .probe_new = a8293_probe,
+ .probe = a8293_probe,
.remove = a8293_remove,
.id_table = a8293_id_table,
};
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 206758a73ae2..a829c89792a4 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1563,7 +1563,7 @@ static struct i2c_driver af9013_driver = {
.name = "af9013",
.suppress_bind_attrs = true,
},
- .probe_new = af9013_probe,
+ .probe = af9013_probe,
.remove = af9013_remove,
.id_table = af9013_id_table,
};
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index a30773f62006..49b7b04a7899 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1183,7 +1183,7 @@ static struct i2c_driver af9033_driver = {
.name = "af9033",
.suppress_bind_attrs = true,
},
- .probe_new = af9033_probe,
+ .probe = af9033_probe,
.remove = af9033_remove,
.id_table = af9033_id_table,
};
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 0f748cf46089..acc27376c246 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -776,7 +776,7 @@ static struct i2c_driver au8522_driver = {
.driver = {
.name = "au8522",
},
- .probe_new = au8522_probe,
+ .probe = au8522_probe,
.remove = au8522_remove,
.id_table = au8522_id,
};
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index c0967ad95220..3f3b85743666 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -681,7 +681,7 @@ static struct i2c_driver cxd2099_driver = {
.driver = {
.name = "cxd2099",
},
- .probe_new = cxd2099_probe,
+ .probe = cxd2099_probe,
.remove = cxd2099_remove,
.id_table = cxd2099_id,
};
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 47aa40967171..d7ee294c6833 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -733,7 +733,7 @@ static struct i2c_driver cxd2820r_driver = {
.name = "cxd2820r",
.suppress_bind_attrs = true,
},
- .probe_new = cxd2820r_probe,
+ .probe = cxd2820r_probe,
.remove = cxd2820r_remove,
.id_table = cxd2820r_id_table,
};
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index e35e00db7dbb..90cb41eacf98 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -942,7 +942,7 @@ static struct i2c_driver dvb_pll_driver = {
.driver = {
.name = "dvb_pll",
},
- .probe_new = dvb_pll_probe,
+ .probe = dvb_pll_probe,
.remove = dvb_pll_remove,
.id_table = dvb_pll_id,
};
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index e4bbf6a51a2b..68c1a3e0e2ba 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -1110,7 +1110,7 @@ static struct i2c_driver helene_driver = {
.driver = {
.name = "helene",
},
- .probe_new = helene_probe,
+ .probe = helene_probe,
.id_table = helene_id,
};
module_i2c_driver(helene_driver);
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 6bf723b5ffad..70258884126b 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2249,7 +2249,7 @@ static struct i2c_driver lgdt3306a_driver = {
.name = "lgdt3306a",
.suppress_bind_attrs = true,
},
- .probe_new = lgdt3306a_probe,
+ .probe = lgdt3306a_probe,
.remove = lgdt3306a_remove,
.id_table = lgdt3306a_id_table,
};
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 1d6932d8e497..83565209c3b1 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -993,7 +993,7 @@ static struct i2c_driver lgdt330x_driver = {
.name = "lgdt330x",
.suppress_bind_attrs = true,
},
- .probe_new = lgdt330x_probe,
+ .probe = lgdt330x_probe,
.remove = lgdt330x_remove,
.id_table = lgdt330x_id_table,
};
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index f26508b217ee..cf49ac56a37e 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1941,7 +1941,7 @@ static struct i2c_driver m88ds3103_driver = {
.name = "m88ds3103",
.suppress_bind_attrs = true,
},
- .probe_new = m88ds3103_probe,
+ .probe = m88ds3103_probe,
.remove = m88ds3103_remove,
.id_table = m88ds3103_id_table,
};
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index b74b9afed9a2..125fed4891ba 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1569,7 +1569,7 @@ static int mb86a20s_get_stats(struct dvb_frontend *fe, int status_nr)
u32 t_post_bit_error = 0, t_post_bit_count = 0;
u32 block_error = 0, block_count = 0;
u32 t_block_error = 0, t_block_count = 0;
- int active_layers = 0, pre_ber_layers = 0, post_ber_layers = 0;
+ int pre_ber_layers = 0, post_ber_layers = 0;
int per_layers = 0;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
@@ -1589,9 +1589,6 @@ static int mb86a20s_get_stats(struct dvb_frontend *fe, int status_nr)
for (layer = 0; layer < NUM_LAYERS; layer++) {
if (c->isdbt_layer_enabled & (1 << layer)) {
- /* Layer is active and has rc segments */
- active_layers++;
-
/* Handle BER before vterbi */
rc = mb86a20s_get_pre_ber(fe, layer,
&bit_error, &bit_count);
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index 0782f8377eb2..2ce5692bc22c 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -800,7 +800,7 @@ static struct i2c_driver mn88443x_driver = {
.name = "mn88443x",
.of_match_table = mn88443x_of_match,
},
- .probe_new = mn88443x_probe,
+ .probe = mn88443x_probe,
.remove = mn88443x_remove,
.id_table = mn88443x_i2c_id,
};
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 4a71f1c6371a..73d1e52de569 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -718,7 +718,7 @@ static struct i2c_driver mn88472_driver = {
.name = "mn88472",
.suppress_bind_attrs = true,
},
- .probe_new = mn88472_probe,
+ .probe = mn88472_probe,
.remove = mn88472_remove,
.id_table = mn88472_id_table,
};
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 205b14ae584e..eb50591c0e7a 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -753,7 +753,7 @@ static struct i2c_driver mn88473_driver = {
.name = "mn88473",
.suppress_bind_attrs = true,
},
- .probe_new = mn88473_probe,
+ .probe = mn88473_probe,
.remove = mn88473_remove,
.id_table = mn88473_id_table,
};
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index 9858e11943a0..2a31bde2630f 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -1355,7 +1355,7 @@ static struct i2c_driver mxl692_driver = {
.driver = {
.name = "mxl692",
},
- .probe_new = mxl692_probe,
+ .probe = mxl692_probe,
.remove = mxl692_remove,
.id_table = mxl692_id_table,
};
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index db3254950147..35c969fd2cb5 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -886,7 +886,7 @@ static struct i2c_driver rtl2830_driver = {
.name = "rtl2830",
.suppress_bind_attrs = true,
},
- .probe_new = rtl2830_probe,
+ .probe = rtl2830_probe,
.remove = rtl2830_remove,
.id_table = rtl2830_id_table,
};
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 900d4db8b922..601cf45c3935 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1135,7 +1135,7 @@ static struct i2c_driver rtl2832_driver = {
.name = "rtl2832",
.suppress_bind_attrs = true,
},
- .probe_new = rtl2832_probe,
+ .probe = rtl2832_probe,
.remove = rtl2832_remove,
.id_table = rtl2832_id_table,
};
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index cc07e965c34c..72810efd1a96 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1292,7 +1292,7 @@ static struct i2c_driver si2165_driver = {
.driver = {
.name = "si2165",
},
- .probe_new = si2165_probe,
+ .probe = si2165_probe,
.remove = si2165_remove,
.id_table = si2165_id_table,
};
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 2a0e108c5eb0..dae1f2153e8b 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -798,7 +798,7 @@ static struct i2c_driver si2168_driver = {
.name = "si2168",
.suppress_bind_attrs = true,
},
- .probe_new = si2168_probe,
+ .probe = si2168_probe,
.remove = si2168_remove,
.id_table = si2168_id_table,
};
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 3395f6b5b948..4d7d0b8b51b4 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -416,7 +416,7 @@ static struct i2c_driver sp2_driver = {
.driver = {
.name = "sp2",
},
- .probe_new = sp2_probe,
+ .probe = sp2_probe,
.remove = sp2_remove,
.id_table = sp2_id,
};
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 9bde0ad6f26e..a07dc5fdeb3d 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5084,7 +5084,7 @@ static struct i2c_driver stv090x_driver = {
.name = "stv090x",
.suppress_bind_attrs = true,
},
- .probe_new = stv090x_probe,
+ .probe = stv090x_probe,
.remove = stv090x_remove,
.id_table = stv090x_id_table,
};
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index b2f456116c60..11653f846c12 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -480,7 +480,7 @@ static struct i2c_driver stv6110x_driver = {
.name = "stv6110x",
.suppress_bind_attrs = true,
},
- .probe_new = stv6110x_probe,
+ .probe = stv6110x_probe,
.remove = stv6110x_remove,
.id_table = stv6110x_id_table,
};
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 77a991bf4713..879f028f9682 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -840,7 +840,7 @@ static struct i2c_driver tc90522_driver = {
.driver = {
.name = "tc90522",
},
- .probe_new = tc90522_probe,
+ .probe = tc90522_probe,
.remove = tc90522_remove,
.id_table = tc90522_id,
};
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index c8e5617d08c0..6640851d8bbc 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1240,7 +1240,7 @@ static struct i2c_driver tda10071_driver = {
.name = "tda10071",
.suppress_bind_attrs = true,
},
- .probe_new = tda10071_probe,
+ .probe = tda10071_probe,
.remove = tda10071_remove,
.id_table = tda10071_id_table,
};
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index c28fee7509cd..f5b60f827697 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -720,7 +720,7 @@ static struct i2c_driver ts2020_driver = {
.driver = {
.name = "ts2020",
},
- .probe_new = ts2020_probe,
+ .probe = ts2020_probe,
.remove = ts2020_remove,
.id_table = ts2020_id_table,
};
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 256d55bb2b1d..226454b6a90d 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -338,6 +338,19 @@ config VIDEO_OG01A1B
To compile this driver as a module, choose M here: the
module will be called og01a1b.
+config VIDEO_OV01A10
+ tristate "OmniVision OV01A10 sensor support"
+ depends on VIDEO_DEV && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV01A10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov01a10.
+
config VIDEO_OV02A10
tristate "OmniVision OV02A10 sensor support"
depends on VIDEO_DEV && I2C
@@ -1292,6 +1305,7 @@ config VIDEO_TC358746
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select V4L2_FWNODE
+ select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
select REGMAP_I2C
help
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index b44dacf935f4..c743aeb5d1ad 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
+obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o
obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 44c26af49071..5f605b9be3b1 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -370,7 +370,7 @@ static struct i2c_driver ad5820_i2c_driver = {
.pm = &ad5820_pm,
.of_match_table = ad5820_of_table,
},
- .probe_new = ad5820_probe,
+ .probe = ad5820_probe,
.remove = ad5820_remove,
.id_table = ad5820_id_table,
};
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index a61a77de6eee..98ca417b8004 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -535,7 +535,7 @@ static struct i2c_driver adp1653_i2c_driver = {
.name = ADP1653_NAME,
.pm = &adp1653_pm_ops,
},
- .probe_new = adp1653_probe,
+ .probe = adp1653_probe,
.remove = adp1653_remove,
.id_table = adp1653_id_table,
};
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index aa0f80e299b3..4a2b9fd9e2da 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -387,7 +387,7 @@ static struct i2c_driver adv7170_driver = {
.driver = {
.name = "adv7170",
},
- .probe_new = adv7170_probe,
+ .probe = adv7170_probe,
.remove = adv7170_remove,
.id_table = adv7170_id,
};
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index d9bea2b9ec33..e454cba4b026 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -442,7 +442,7 @@ static struct i2c_driver adv7175_driver = {
.driver = {
.name = "adv7175",
},
- .probe_new = adv7175_probe,
+ .probe = adv7175_probe,
.remove = adv7175_remove,
.id_table = adv7175_id,
};
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index a22402b7acff..99ba925e8ec8 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1610,7 +1610,7 @@ static struct i2c_driver adv7180_driver = {
.pm = ADV7180_PM_OPS,
.of_match_table = of_match_ptr(adv7180_of_id),
},
- .probe_new = adv7180_probe,
+ .probe = adv7180_probe,
.remove = adv7180_remove,
.id_table = adv7180_id,
};
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 98b63d79d33d..3659feafac69 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -631,7 +631,7 @@ static struct i2c_driver adv7183_driver = {
.driver = {
.name = "adv7183",
},
- .probe_new = adv7183_probe,
+ .probe = adv7183_probe,
.remove = adv7183_remove,
.id_table = adv7183_id,
};
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 7e84869d2434..ff21cd4744d3 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -521,7 +521,7 @@ static struct i2c_driver adv7343_driver = {
.of_match_table = of_match_ptr(adv7343_of_match),
.name = "adv7343",
},
- .probe_new = adv7343_probe,
+ .probe = adv7343_probe,
.remove = adv7343_remove,
.id_table = adv7343_id,
};
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 61e916cbe651..7638af455cef 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -455,7 +455,7 @@ static struct i2c_driver adv7393_driver = {
.driver = {
.name = "adv7393",
},
- .probe_new = adv7393_probe,
+ .probe = adv7393_probe,
.remove = adv7393_remove,
.id_table = adv7393_id,
};
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 4498d78a2357..3eb6d5e8f082 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -847,7 +847,7 @@ static struct i2c_driver adv748x_driver = {
.of_match_table = adv748x_of_table,
.pm = &adv748x_pm_ops,
},
- .probe_new = adv748x_probe,
+ .probe = adv748x_probe,
.remove = adv748x_remove,
};
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 3999fa524cab..a9183d9282fd 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1957,7 +1957,7 @@ static struct i2c_driver adv7511_driver = {
.driver = {
.name = "adv7511-v4l2",
},
- .probe_new = adv7511_probe,
+ .probe = adv7511_probe,
.remove = adv7511_remove,
.id_table = adv7511_id,
};
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 3d0898c4175e..b202a85fbeaa 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3689,7 +3689,7 @@ static struct i2c_driver adv76xx_driver = {
.name = "adv7604",
.of_match_table = of_match_ptr(adv76xx_of_id),
},
- .probe_new = adv76xx_probe,
+ .probe = adv76xx_probe,
.remove = adv76xx_remove,
.id_table = adv76xx_i2c_id,
};
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index cb8655574119..c1664a3620c8 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3619,7 +3619,7 @@ static struct i2c_driver adv7842_driver = {
.driver = {
.name = "adv7842",
},
- .probe_new = adv7842_probe,
+ .probe = adv7842_probe,
.remove = adv7842_remove,
.id_table = adv7842_id,
};
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
index e7cec45bc271..463b51d46320 100644
--- a/drivers/media/i2c/ak7375.c
+++ b/drivers/media/i2c/ak7375.c
@@ -306,7 +306,7 @@ static struct i2c_driver ak7375_i2c_driver = {
.pm = &ak7375_pm_ops,
.of_match_table = ak7375_of_table,
},
- .probe_new = ak7375_probe,
+ .probe = ak7375_probe,
.remove = ak7375_remove,
};
module_i2c_driver(ak7375_i2c_driver);
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 7c9ab76e2448..ce840adc2aa7 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -314,7 +314,7 @@ static struct i2c_driver ak881x_i2c_driver = {
.driver = {
.name = "ak881x",
},
- .probe_new = ak881x_probe,
+ .probe = ak881x_probe,
.remove = ak881x_remove,
.id_table = ak881x_id,
};
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 77f597571167..a4e39871e8f7 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1198,7 +1198,7 @@ static struct i2c_driver ar0521_i2c_driver = {
.pm = &ar0521_pm_ops,
.of_match_table = ar0521_dt_ids,
},
- .probe_new = ar0521_probe,
+ .probe = ar0521_probe,
.remove = ar0521_remove,
};
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 39f8a5361166..b4a25cc996dc 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -468,7 +468,7 @@ static struct i2c_driver bt819_driver = {
.driver = {
.name = "bt819",
},
- .probe_new = bt819_probe,
+ .probe = bt819_probe,
.remove = bt819_remove,
.id_table = bt819_id,
};
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index d1d397b15b85..814acbd6a5a8 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -239,7 +239,7 @@ static struct i2c_driver bt856_driver = {
.driver = {
.name = "bt856",
},
- .probe_new = bt856_probe,
+ .probe = bt856_probe,
.remove = bt856_remove,
.id_table = bt856_id,
};
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index d632d9a07f04..dada059cbce4 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -206,7 +206,7 @@ static struct i2c_driver bt866_driver = {
.driver = {
.name = "bt866",
},
- .probe_new = bt866_probe,
+ .probe = bt866_probe,
.remove = bt866_remove,
.id_table = bt866_id,
};
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 559a415fd827..49e0d9a09530 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3731,7 +3731,7 @@ static struct i2c_driver ccs_i2c_driver = {
.name = CCS_NAME,
.pm = &ccs_pm_ops,
},
- .probe_new = ccs_probe,
+ .probe = ccs_probe,
.remove = ccs_remove,
};
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index a0b66c04fe25..61afa3d799d2 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -118,7 +118,7 @@ static struct i2c_driver cs3308_driver = {
.driver = {
.name = "cs3308",
},
- .probe_new = cs3308_probe,
+ .probe = cs3308_probe,
.remove = cs3308_remove,
.id_table = cs3308_id,
};
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index ac4b5632fc46..3019a132e079 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -198,7 +198,7 @@ static struct i2c_driver cs5345_driver = {
.driver = {
.name = "cs5345",
},
- .probe_new = cs5345_probe,
+ .probe = cs5345_probe,
.remove = cs5345_remove,
.id_table = cs5345_id,
};
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 670f89de32d4..82881b79e730 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -209,7 +209,7 @@ static struct i2c_driver cs53l32a_driver = {
.driver = {
.name = "cs53l32a",
},
- .probe_new = cs53l32a_probe,
+ .probe = cs53l32a_probe,
.remove = cs53l32a_remove,
.id_table = cs53l32a_id,
};
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 46cf422270b2..5aec25289062 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -6045,7 +6045,7 @@ static struct i2c_driver cx25840_driver = {
.driver = {
.name = "cx25840",
},
- .probe_new = cx25840_probe,
+ .probe = cx25840_probe,
.remove = cx25840_remove,
.id_table = cx25840_id,
};
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index af59687383aa..cc09b32ede60 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -299,7 +299,7 @@ static struct i2c_driver dw9714_i2c_driver = {
.pm = &dw9714_pm_ops,
.of_match_table = dw9714_of_table,
},
- .probe_new = dw9714_probe,
+ .probe = dw9714_probe,
.remove = dw9714_remove,
.id_table = dw9714_id_table,
};
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index 83a3ee275bbe..daabbece8c7e 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -549,7 +549,7 @@ static struct i2c_driver dw9768_i2c_driver = {
.pm = &dw9768_pm_ops,
.of_match_table = dw9768_of_table,
},
- .probe_new = dw9768_probe,
+ .probe = dw9768_probe,
.remove = dw9768_remove,
};
module_i2c_driver(dw9768_i2c_driver);
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
index 3599720db7e9..4148009e0e01 100644
--- a/drivers/media/i2c/dw9807-vcm.c
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -310,7 +310,7 @@ static struct i2c_driver dw9807_i2c_driver = {
.pm = &dw9807_pm_ops,
.of_match_table = dw9807_of_table,
},
- .probe_new = dw9807_probe,
+ .probe = dw9807_probe,
.remove = dw9807_remove,
};
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index ff9bb9fc97dd..d6fc843f9368 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1501,7 +1501,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
.pm = &et8ek8_pm_ops,
.of_match_table = et8ek8_of_table,
},
- .probe_new = et8ek8_probe,
+ .probe = et8ek8_probe,
.remove = __exit_p(et8ek8_remove),
.id_table = et8ek8_id_table,
};
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 7daefab35cf0..50e78f5b058c 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1350,7 +1350,7 @@ static struct i2c_driver hi556_i2c_driver = {
.pm = &hi556_pm_ops,
.acpi_match_table = ACPI_PTR(hi556_acpi_ids),
},
- .probe_new = hi556_probe,
+ .probe = hi556_probe,
.remove = hi556_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 306dc35e925f..fa0038749a3b 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -1353,7 +1353,8 @@ static int hi846_set_ctrl(struct v4l2_ctrl *ctrl)
exposure_max);
}
- if (!pm_runtime_get_if_in_use(&client->dev))
+ ret = pm_runtime_get_if_in_use(&client->dev);
+ if (!ret || ret == -EAGAIN)
return 0;
switch (ctrl->id) {
@@ -2189,7 +2190,7 @@ static struct i2c_driver hi846_i2c_driver = {
.pm = &hi846_pm_ops,
.of_match_table = hi846_of_match,
},
- .probe_new = hi846_probe,
+ .probe = hi846_probe,
.remove = hi846_remove,
};
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 5a82b15a9513..7cdce392e137 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -2999,7 +2999,7 @@ static struct i2c_driver hi847_i2c_driver = {
.pm = &hi847_pm_ops,
.acpi_match_table = ACPI_PTR(hi847_acpi_ids),
},
- .probe_new = hi847_probe,
+ .probe = hi847_probe,
.remove = hi847_remove,
};
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 64c70ebf9869..3e870fa9ff79 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -1100,7 +1100,7 @@ static struct i2c_driver imx208_i2c_driver = {
.pm = &imx208_pm_ops,
.acpi_match_table = ACPI_PTR(imx208_acpi_ids),
},
- .probe_new = imx208_probe,
+ .probe = imx208_probe,
.remove = imx208_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 710c9fb515fd..2f9c8582f940 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -1112,7 +1112,7 @@ static struct i2c_driver imx214_i2c_driver = {
.pm = &imx214_pm_ops,
.name = "imx214",
},
- .probe_new = imx214_probe,
+ .probe = imx214_probe,
.remove = imx214_remove,
};
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index f9471c9e3a74..d737d5e9a4a6 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1583,7 +1583,7 @@ static struct i2c_driver imx219_i2c_driver = {
.of_match_table = imx219_dt_ids,
.pm = &imx219_pm_ops,
},
- .probe_new = imx219_probe,
+ .probe = imx219_probe,
.remove = imx219_remove,
};
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 85d73b186111..e196565e846e 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1395,7 +1395,7 @@ static struct i2c_driver imx258_i2c_driver = {
.acpi_match_table = ACPI_PTR(imx258_acpi_ids),
.of_match_table = imx258_dt_ids,
},
- .probe_new = imx258_probe,
+ .probe = imx258_probe,
.remove = imx258_remove,
};
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 9219f3c9594b..f33b692e6951 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -2168,7 +2168,7 @@ static struct i2c_driver imx274_i2c_driver = {
.pm = &imx274_pm_ops,
.of_match_table = imx274_of_id_table,
},
- .probe_new = imx274_probe,
+ .probe = imx274_probe,
.remove = imx274_remove,
.id_table = imx274_id,
};
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 5ea25b7acc55..b3f832e9d7e1 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1716,10 +1716,10 @@ static const struct of_device_id imx290_of_match[] = {
MODULE_DEVICE_TABLE(of, imx290_of_match);
static struct i2c_driver imx290_i2c_driver = {
- .probe_new = imx290_probe,
+ .probe = imx290_probe,
.remove = imx290_remove,
.driver = {
- .name = "imx290",
+ .name = "imx290",
.pm = pm_ptr(&imx290_pm_ops),
.of_match_table = imx290_of_match,
},
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 4f22c0515ef8..c0b9a5349668 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -922,10 +922,12 @@ static int imx296_read_temperature(struct imx296 *sensor, int *temp)
if (ret < 0)
return ret;
- tmdout = imx296_read(sensor, IMX296_TMDOUT) & IMX296_TMDOUT_MASK;
+ tmdout = imx296_read(sensor, IMX296_TMDOUT);
if (tmdout < 0)
return tmdout;
+ tmdout &= IMX296_TMDOUT_MASK;
+
/* T(°C) = 246.312 - 0.304 * TMDOUT */;
*temp = 246312 - 304 * tmdout;
@@ -1152,7 +1154,7 @@ static struct i2c_driver imx296_i2c_driver = {
.name = "imx296",
.pm = &imx296_pm_ops
},
- .probe_new = imx296_probe,
+ .probe = imx296_probe,
.remove = imx296_remove,
};
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 45b1b61b2880..a2140848d0d6 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2558,7 +2558,7 @@ static struct i2c_driver imx319_i2c_driver = {
.pm = &imx319_pm_ops,
.acpi_match_table = ACPI_PTR(imx319_acpi_ids),
},
- .probe_new = imx319_probe,
+ .probe = imx319_probe,
.remove = imx319_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 309c706114d2..d722c9b7cd31 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -49,7 +49,8 @@
#define IMX334_INCLK_RATE 24000000
/* CSI2 HW configuration */
-#define IMX334_LINK_FREQ 891000000
+#define IMX334_LINK_FREQ_891M 891000000
+#define IMX334_LINK_FREQ_445M 445500000
#define IMX334_NUM_DATA_LANES 4
#define IMX334_REG_MIN 0x00
@@ -117,6 +118,7 @@ struct imx334_mode {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
+ * @menu_skip_mask: Menu skip mask for link_freq_ctrl
* @cur_code: current selected format code
* @streaming: Flag indicating streaming state
*/
@@ -139,12 +141,14 @@ struct imx334 {
u32 vblank;
const struct imx334_mode *cur_mode;
struct mutex mutex;
+ unsigned long menu_skip_mask;
u32 cur_code;
bool streaming;
};
static const s64 link_freq[] = {
- IMX334_LINK_FREQ,
+ IMX334_LINK_FREQ_891M,
+ IMX334_LINK_FREQ_445M,
};
/* Sensor mode registers for 1920x1080@30fps */
@@ -468,7 +472,7 @@ static const struct imx334_mode supported_modes[] = {
.vblank_min = 45,
.vblank_max = 132840,
.pclk = 297000000,
- .link_freq_idx = 0,
+ .link_freq_idx = 1,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1920x1080_regs),
.regs = mode_1920x1080_regs,
@@ -598,13 +602,22 @@ static int imx334_update_controls(struct imx334 *imx334,
if (ret)
return ret;
+ ret = __v4l2_ctrl_modify_range(imx334->pclk_ctrl, mode->pclk,
+ mode->pclk, 1, mode->pclk);
+ if (ret)
+ return ret;
+
ret = __v4l2_ctrl_modify_range(imx334->hblank_ctrl, mode->hblank,
mode->hblank, 1, mode->hblank);
if (ret)
return ret;
- return __v4l2_ctrl_modify_range(imx334->vblank_ctrl, mode->vblank_min,
+ ret = __v4l2_ctrl_modify_range(imx334->vblank_ctrl, mode->vblank_min,
mode->vblank_max, 1, mode->vblank);
+ if (ret)
+ return ret;
+
+ return __v4l2_ctrl_s_ctrl(imx334->vblank_ctrl, mode->vblank);
}
/**
@@ -698,6 +711,8 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
pm_runtime_put(imx334->dev);
break;
+ case V4L2_CID_PIXEL_RATE:
+ case V4L2_CID_LINK_FREQ:
case V4L2_CID_HBLANK:
ret = 0;
break;
@@ -885,7 +900,17 @@ static int imx334_init_pad_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_format fmt = { 0 };
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- imx334_fill_pad_format(imx334, &supported_modes[0], &fmt);
+
+ mutex_lock(&imx334->mutex);
+
+ imx334_fill_pad_format(imx334, imx334->cur_mode, &fmt);
+
+ __v4l2_ctrl_modify_range(imx334->link_freq_ctrl, 0,
+ __fls(imx334->menu_skip_mask),
+ ~(imx334->menu_skip_mask),
+ __ffs(imx334->menu_skip_mask));
+
+ mutex_unlock(&imx334->mutex);
return imx334_set_pad_format(sd, sd_state, &fmt);
}
@@ -1046,8 +1071,8 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
};
struct fwnode_handle *ep;
unsigned long rate;
+ unsigned int i, j;
int ret;
- int i;
if (!fwnode)
return -ENXIO;
@@ -1097,11 +1122,20 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
goto done_endpoint_free;
}
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
- if (bus_cfg.link_frequencies[i] == IMX334_LINK_FREQ)
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
+ for (j = 0; j < ARRAY_SIZE(link_freq); j++) {
+ if (bus_cfg.link_frequencies[i] == link_freq[j]) {
+ set_bit(j, &imx334->menu_skip_mask);
+ break;
+ }
+ }
+
+ if (j == ARRAY_SIZE(link_freq)) {
+ ret = dev_err_probe(imx334->dev, -EINVAL,
+ "no supported link freq found\n");
goto done_endpoint_free;
-
- ret = -EINVAL;
+ }
+ }
done_endpoint_free:
v4l2_fwnode_endpoint_free(&bus_cfg);
@@ -1232,10 +1266,10 @@ static int imx334_init_controls(struct imx334 *imx334)
imx334->link_freq_ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&imx334_ctrl_ops,
V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(link_freq) -
- 1,
- mode->link_freq_idx,
+ __fls(imx334->menu_skip_mask),
+ __ffs(imx334->menu_skip_mask),
link_freq);
+
if (imx334->link_freq_ctrl)
imx334->link_freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -1302,7 +1336,7 @@ static int imx334_probe(struct i2c_client *client)
}
/* Set default mode to max resolution */
- imx334->cur_mode = &supported_modes[0];
+ imx334->cur_mode = &supported_modes[__ffs(imx334->menu_skip_mask)];
imx334->cur_code = imx334_mbus_codes[0];
imx334->vblank = imx334->cur_mode->vblank;
@@ -1382,7 +1416,7 @@ static const struct of_device_id imx334_of_match[] = {
MODULE_DEVICE_TABLE(of, imx334_of_match);
static struct i2c_driver imx334_driver = {
- .probe_new = imx334_probe,
+ .probe = imx334_probe,
.remove = imx334_remove,
.driver = {
.name = "imx334",
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 078ede2b7a00..482a0b7f040a 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -1112,7 +1112,7 @@ static const struct of_device_id imx335_of_match[] = {
MODULE_DEVICE_TABLE(of, imx335_of_match);
static struct i2c_driver imx335_driver = {
- .probe_new = imx335_probe,
+ .probe = imx335_probe,
.remove = imx335_remove,
.driver = {
.name = "imx335",
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 25d4dbb6041e..6571a98b1e9e 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1845,7 +1845,7 @@ static struct i2c_driver imx355_i2c_driver = {
.pm = &imx355_pm_ops,
.acpi_match_table = ACPI_PTR(imx355_acpi_ids),
},
- .probe_new = imx355_probe,
+ .probe = imx355_probe,
.remove = imx355_remove,
};
module_i2c_driver(imx355_i2c_driver);
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index e1e986dc8856..c7e862ae4040 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -1293,7 +1293,7 @@ static const struct of_device_id imx412_of_match[] = {
MODULE_DEVICE_TABLE(of, imx412_of_match);
static struct i2c_driver imx412_driver = {
- .probe_new = imx412_probe,
+ .probe = imx412_probe,
.remove = imx412_remove,
.driver = {
.name = "imx412",
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index d90392df98c7..4b5d1ee9cc6b 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -1283,7 +1283,7 @@ static const struct of_device_id imx415_of_match[] = {
MODULE_DEVICE_TABLE(of, imx415_of_match);
static struct i2c_driver imx415_driver = {
- .probe_new = imx415_probe,
+ .probe = imx415_probe,
.remove = imx415_remove,
.driver = {
.name = "imx415",
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 51921068931d..b37a2aaf8ac0 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -988,7 +988,7 @@ static struct i2c_driver ir_kbd_driver = {
.driver = {
.name = "ir-kbd-i2c",
},
- .probe_new = ir_probe,
+ .probe = ir_probe,
.remove = ir_remove,
.id_table = ir_kbd_id,
};
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index ae7af2cc94f5..92e49d95363d 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1614,7 +1614,7 @@ static struct i2c_driver isl7998x_i2c_driver = {
.of_match_table = of_match_ptr(isl7998x_of_match),
.pm = &isl7998x_pm_ops,
},
- .probe_new = isl7998x_probe,
+ .probe = isl7998x_probe,
.remove = isl7998x_remove,
.id_table = isl7998x_id,
};
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 0d86f2db7ad2..5c583f57e3f3 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -696,7 +696,7 @@ static struct i2c_driver ks0127_driver = {
.driver = {
.name = "ks0127",
},
- .probe_new = ks0127_probe,
+ .probe = ks0127_probe,
.remove = ks0127_remove,
.id_table = ks0127_id,
};
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 5ef613604be7..05283ac68f2d 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -467,7 +467,7 @@ static struct i2c_driver lm3560_i2c_driver = {
.name = LM3560_NAME,
.pm = NULL,
},
- .probe_new = lm3560_probe,
+ .probe = lm3560_probe,
.remove = lm3560_remove,
.id_table = lm3560_id_table,
};
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index 2a0cf74d2bed..fab3a7e05f92 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -396,7 +396,7 @@ static struct i2c_driver lm3646_i2c_driver = {
.driver = {
.name = LM3646_NAME,
},
- .probe_new = lm3646_probe,
+ .probe = lm3646_probe,
.remove = lm3646_remove,
.id_table = lm3646_id_table,
};
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index 0e6507ab7e08..f8a69142aae9 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -172,7 +172,7 @@ static struct i2c_driver m52790_driver = {
.driver = {
.name = "m52790",
},
- .probe_new = m52790_probe,
+ .probe = m52790_probe,
.remove = m52790_remove,
.id_table = m52790_id,
};
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 1019020f3a37..70c2a2948fd4 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1429,7 +1429,7 @@ static struct i2c_driver max2175_driver = {
.name = DRIVER_NAME,
.of_match_table = max2175_of_ids,
},
- .probe_new = max2175_probe,
+ .probe = max2175_probe,
.remove = max2175_remove,
.id_table = max2175_id,
};
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 13a986b88588..88c58e0c49aa 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1716,7 +1716,7 @@ static struct i2c_driver max9286_i2c_driver = {
.name = "max9286",
.of_match_table = of_match_ptr(max9286_dt_ids),
},
- .probe_new = max9286_probe,
+ .probe = max9286_probe,
.remove = max9286_remove,
};
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index dbd2f0bd3651..5b72d4434224 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -433,7 +433,7 @@ static struct i2c_driver ml86v7667_i2c_driver = {
.driver = {
.name = DRV_NAME,
},
- .probe_new = ml86v7667_probe,
+ .probe = ml86v7667_probe,
.remove = ml86v7667_remove,
.id_table = ml86v7667_id,
};
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 12032e28b428..bec76801487a 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -892,7 +892,7 @@ static struct i2c_driver msp_driver = {
.name = "msp3400",
.pm = &msp3400_pm_ops,
},
- .probe_new = msp_probe,
+ .probe = msp_probe,
.remove = msp_remove,
.id_table = msp_id,
};
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ebf9cf1e1bce..ce9568e8391c 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -877,7 +877,7 @@ static struct i2c_driver mt9m001_i2c_driver = {
.pm = &mt9m001_pm_ops,
.of_match_table = mt9m001_of_match,
},
- .probe_new = mt9m001_probe,
+ .probe = mt9m001_probe,
.remove = mt9m001_remove,
.id_table = mt9m001_id,
};
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index f5fe272d1205..2878d328fc01 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1384,7 +1384,7 @@ static struct i2c_driver mt9m111_i2c_driver = {
.name = "mt9m111",
.of_match_table = of_match_ptr(mt9m111_of_match),
},
- .probe_new = mt9m111_probe,
+ .probe = mt9m111_probe,
.remove = mt9m111_remove,
.id_table = mt9m111_id,
};
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 9e023a4b9bd1..348f1e1098fb 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1248,7 +1248,7 @@ static struct i2c_driver mt9p031_i2c_driver = {
.of_match_table = of_match_ptr(mt9p031_of_match),
.name = "mt9p031",
},
- .probe_new = mt9p031_probe,
+ .probe = mt9p031_probe,
.remove = mt9p031_remove,
.id_table = mt9p031_id,
};
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index a82f056787b8..93f34b767027 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1119,7 +1119,7 @@ static struct i2c_driver mt9t112_i2c_driver = {
.driver = {
.name = "mt9t112",
},
- .probe_new = mt9t112_probe,
+ .probe = mt9t112_probe,
.remove = mt9t112_remove,
.id_table = mt9t112_id,
};
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index c54c7fbf0963..774861ba7747 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -585,7 +585,7 @@ static struct i2c_driver mt9v011_driver = {
.driver = {
.name = "mt9v011",
},
- .probe_new = mt9v011_probe,
+ .probe = mt9v011_probe,
.remove = mt9v011_remove,
.id_table = mt9v011_id,
};
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 7cfd4ebdd2e6..00e7bc6e3235 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -1296,7 +1296,7 @@ static struct i2c_driver mt9v032_driver = {
.name = "mt9v032",
.of_match_table = of_match_ptr(mt9v032_of_match),
},
- .probe_new = mt9v032_probe,
+ .probe = mt9v032_probe,
.remove = mt9v032_remove,
.id_table = mt9v032_id,
};
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 46d91cd0870c..1f7edc0f5b1a 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1265,7 +1265,7 @@ static struct i2c_driver mt9v111_driver = {
.name = "mt9v111",
.of_match_table = mt9v111_of_match,
},
- .probe_new = mt9v111_probe,
+ .probe = mt9v111_probe,
.remove = mt9v111_remove,
};
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 35663c10fcd9..b5948759342e 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1115,7 +1115,7 @@ static struct i2c_driver og01a1b_i2c_driver = {
.pm = &og01a1b_pm_ops,
.acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
},
- .probe_new = og01a1b_probe,
+ .probe = og01a1b_probe,
.remove = og01a1b_remove,
};
diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
new file mode 100644
index 000000000000..de5bc19e715b
--- /dev/null
+++ b/drivers/media/i2c/ov01a10.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Intel Corporation.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV01A10_LINK_FREQ_400MHZ 400000000ULL
+#define OV01A10_SCLK 40000000LL
+#define OV01A10_DATA_LANES 1
+
+#define OV01A10_REG_CHIP_ID 0x300a
+#define OV01A10_CHIP_ID 0x560141
+
+#define OV01A10_REG_MODE_SELECT 0x0100
+#define OV01A10_MODE_STANDBY 0x00
+#define OV01A10_MODE_STREAMING 0x01
+
+/* pixel array */
+#define OV01A10_PIXEL_ARRAY_WIDTH 1296
+#define OV01A10_PIXEL_ARRAY_HEIGHT 816
+#define OV01A10_ACITVE_WIDTH 1280
+#define OV01A10_ACITVE_HEIGHT 800
+
+/* vertical and horizontal timings */
+#define OV01A10_REG_VTS 0x380e
+#define OV01A10_VTS_DEF 0x0380
+#define OV01A10_VTS_MIN 0x0380
+#define OV01A10_VTS_MAX 0xffff
+#define OV01A10_HTS_DEF 1488
+
+/* exposure controls */
+#define OV01A10_REG_EXPOSURE 0x3501
+#define OV01A10_EXPOSURE_MIN 4
+#define OV01A10_EXPOSURE_MAX_MARGIN 8
+#define OV01A10_EXPOSURE_STEP 1
+
+/* analog gain controls */
+#define OV01A10_REG_ANALOG_GAIN 0x3508
+#define OV01A10_ANAL_GAIN_MIN 0x100
+#define OV01A10_ANAL_GAIN_MAX 0xffff
+#define OV01A10_ANAL_GAIN_STEP 1
+
+/* digital gain controls */
+#define OV01A10_REG_DIGITAL_GAIN_B 0x350a
+#define OV01A10_REG_DIGITAL_GAIN_GB 0x3510
+#define OV01A10_REG_DIGITAL_GAIN_GR 0x3513
+#define OV01A10_REG_DIGITAL_GAIN_R 0x3516
+#define OV01A10_DGTL_GAIN_MIN 0
+#define OV01A10_DGTL_GAIN_MAX 0x3ffff
+#define OV01A10_DGTL_GAIN_STEP 1
+#define OV01A10_DGTL_GAIN_DEFAULT 1024
+
+/* test pattern control */
+#define OV01A10_REG_TEST_PATTERN 0x4503
+#define OV01A10_TEST_PATTERN_ENABLE BIT(7)
+#define OV01A10_LINK_FREQ_400MHZ_INDEX 0
+
+/* flip and mirror control */
+#define OV01A10_REG_FORMAT1 0x3820
+#define OV01A10_VFLIP_MASK BIT(4)
+#define OV01A10_HFLIP_MASK BIT(3)
+
+/* window offset */
+#define OV01A10_REG_X_WIN 0x3811
+#define OV01A10_REG_Y_WIN 0x3813
+
+struct ov01a10_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov01a10_reg_list {
+ u32 num_of_regs;
+ const struct ov01a10_reg *regs;
+};
+
+struct ov01a10_link_freq_config {
+ const struct ov01a10_reg_list reg_list;
+};
+
+struct ov01a10_mode {
+ u32 width;
+ u32 height;
+ u32 hts;
+ u32 vts_def;
+ u32 vts_min;
+ u32 link_freq_index;
+
+ const struct ov01a10_reg_list reg_list;
+};
+
+static const struct ov01a10_reg mipi_data_rate_720mbps[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x00},
+ {0x0303, 0x06},
+ {0x0304, 0x01},
+ {0x0305, 0xe0},
+ {0x0306, 0x00},
+ {0x0308, 0x01},
+ {0x0309, 0x00},
+ {0x030c, 0x01},
+ {0x0322, 0x01},
+ {0x0323, 0x06},
+ {0x0324, 0x01},
+ {0x0325, 0x68},
+};
+
+static const struct ov01a10_reg sensor_1280x800_setting[] = {
+ {0x3002, 0xa1},
+ {0x301e, 0xf0},
+ {0x3022, 0x01},
+ {0x3501, 0x03},
+ {0x3502, 0x78},
+ {0x3504, 0x0c},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3601, 0xc0},
+ {0x3603, 0x71},
+ {0x3610, 0x68},
+ {0x3611, 0x86},
+ {0x3640, 0x10},
+ {0x3641, 0x80},
+ {0x3642, 0xdc},
+ {0x3646, 0x55},
+ {0x3647, 0x57},
+ {0x364b, 0x00},
+ {0x3653, 0x10},
+ {0x3655, 0x00},
+ {0x3656, 0x00},
+ {0x365f, 0x0f},
+ {0x3661, 0x45},
+ {0x3662, 0x24},
+ {0x3663, 0x11},
+ {0x3664, 0x07},
+ {0x3709, 0x34},
+ {0x370b, 0x6f},
+ {0x3714, 0x22},
+ {0x371b, 0x27},
+ {0x371c, 0x67},
+ {0x371d, 0xa7},
+ {0x371e, 0xe7},
+ {0x3730, 0x81},
+ {0x3733, 0x10},
+ {0x3734, 0x40},
+ {0x3737, 0x04},
+ {0x3739, 0x1c},
+ {0x3767, 0x00},
+ {0x376c, 0x81},
+ {0x3772, 0x14},
+ {0x37c2, 0x04},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37e0, 0x00},
+ {0x37e1, 0x08},
+ {0x37e2, 0x10},
+ {0x37e3, 0x04},
+ {0x37e4, 0x04},
+ {0x37e5, 0x03},
+ {0x37e6, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x05},
+ {0x3805, 0x0f},
+ {0x3806, 0x03},
+ {0x3807, 0x2f},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x03},
+ {0x380b, 0x20},
+ {0x380c, 0x02},
+ {0x380d, 0xe8},
+ {0x380e, 0x03},
+ {0x380f, 0x80},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x3820, 0xa0},
+ {0x3822, 0x13},
+ {0x3832, 0x28},
+ {0x3833, 0x10},
+ {0x3b00, 0x00},
+ {0x3c80, 0x00},
+ {0x3c88, 0x02},
+ {0x3c8c, 0x07},
+ {0x3c8d, 0x40},
+ {0x3cc7, 0x80},
+ {0x4000, 0xc3},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x02},
+ {0x4009, 0x19},
+ {0x400a, 0x01},
+ {0x400b, 0x6c},
+ {0x4011, 0x00},
+ {0x4041, 0x00},
+ {0x4300, 0xff},
+ {0x4301, 0x00},
+ {0x4302, 0x0f},
+ {0x4503, 0x00},
+ {0x4601, 0x50},
+ {0x4800, 0x64},
+ {0x481f, 0x34},
+ {0x4825, 0x33},
+ {0x4837, 0x11},
+ {0x4881, 0x40},
+ {0x4883, 0x01},
+ {0x4890, 0x00},
+ {0x4901, 0x00},
+ {0x4902, 0x00},
+ {0x4b00, 0x2a},
+ {0x4b0d, 0x00},
+ {0x450a, 0x04},
+ {0x450b, 0x00},
+ {0x5000, 0x65},
+ {0x5200, 0x18},
+ {0x5004, 0x00},
+ {0x5080, 0x40},
+ {0x0305, 0xf4},
+ {0x0325, 0xc2},
+};
+
+static const char * const ov01a10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV01A10_LINK_FREQ_400MHZ,
+};
+
+static const struct ov01a10_link_freq_config link_freq_configs[] = {
+ [OV01A10_LINK_FREQ_400MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+};
+
+static const struct ov01a10_mode supported_modes[] = {
+ {
+ .width = OV01A10_ACITVE_WIDTH,
+ .height = OV01A10_ACITVE_HEIGHT,
+ .hts = OV01A10_HTS_DEF,
+ .vts_def = OV01A10_VTS_DEF,
+ .vts_min = OV01A10_VTS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(sensor_1280x800_setting),
+ .regs = sensor_1280x800_setting,
+ },
+ .link_freq_index = OV01A10_LINK_FREQ_400MHZ_INDEX,
+ },
+};
+
+struct ov01a10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* v4l2 controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ const struct ov01a10_mode *cur_mode;
+
+ /* streaming state */
+ bool streaming;
+};
+
+static inline struct ov01a10 *to_ov01a10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov01a10, sd);
+}
+
+static int ov01a10_read_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret = 0;
+
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov01a10_write_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ u8 buf[6];
+ int ret = 0;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int ov01a10_write_reg_list(struct ov01a10 *ov01a10,
+ const struct ov01a10_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov01a10_write_reg(ov01a10, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x err = %d\n",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov01a10_update_digital_gain(struct ov01a10 *ov01a10, u32 d_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ u32 real = d_gain << 6;
+ int ret = 0;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_B, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set DIGITAL_GAIN_B\n");
+ return ret;
+ }
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GB, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set DIGITAL_GAIN_GB\n");
+ return ret;
+ }
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GR, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set DIGITAL_GAIN_GR\n");
+ return ret;
+ }
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_R, 3, real);
+ if (ret)
+ dev_err(&client->dev, "failed to set DIGITAL_GAIN_R\n");
+
+ return ret;
+}
+
+static int ov01a10_test_pattern(struct ov01a10 *ov01a10, u32 pattern)
+{
+ if (!pattern)
+ return 0;
+
+ pattern = (pattern - 1) | OV01A10_TEST_PATTERN_ENABLE;
+
+ return ov01a10_write_reg(ov01a10, OV01A10_REG_TEST_PATTERN, 1, pattern);
+}
+
+/* for vflip and hflip, use 0x9 as window offset to keep the bayer */
+static int ov01a10_set_hflip(struct ov01a10 *ov01a10, u32 hflip)
+{
+ int ret;
+ u32 val, offset;
+
+ offset = hflip ? 0x9 : 0x8;
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_X_WIN, 1, offset);
+ if (ret)
+ return ret;
+
+ ret = ov01a10_read_reg(ov01a10, OV01A10_REG_FORMAT1, 1, &val);
+ if (ret)
+ return ret;
+
+ val = hflip ? val | FIELD_PREP(OV01A10_HFLIP_MASK, 0x1) :
+ val & ~OV01A10_HFLIP_MASK;
+
+ return ov01a10_write_reg(ov01a10, OV01A10_REG_FORMAT1, 1, val);
+}
+
+static int ov01a10_set_vflip(struct ov01a10 *ov01a10, u32 vflip)
+{
+ int ret;
+ u32 val, offset;
+
+ offset = vflip ? 0x9 : 0x8;
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_Y_WIN, 1, offset);
+ if (ret)
+ return ret;
+
+ ret = ov01a10_read_reg(ov01a10, OV01A10_REG_FORMAT1, 1, &val);
+ if (ret)
+ return ret;
+
+ val = vflip ? val | FIELD_PREP(OV01A10_VFLIP_MASK, 0x1) :
+ val & ~OV01A10_VFLIP_MASK;
+
+ return ov01a10_write_reg(ov01a10, OV01A10_REG_FORMAT1, 1, val);
+}
+
+static int ov01a10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov01a10 *ov01a10 = container_of(ctrl->handler,
+ struct ov01a10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ exposure_max = ov01a10->cur_mode->height + ctrl->val -
+ OV01A10_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov01a10->exposure,
+ ov01a10->exposure->minimum,
+ exposure_max, ov01a10->exposure->step,
+ exposure_max);
+ }
+
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov01a10_update_digital_gain(ov01a10, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_VTS, 2,
+ ov01a10->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov01a10_test_pattern(ov01a10, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ ov01a10_set_hflip(ov01a10, ctrl->val);
+ break;
+
+ case V4L2_CID_VFLIP:
+ ov01a10_set_vflip(ov01a10, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov01a10_ctrl_ops = {
+ .s_ctrl = ov01a10_set_ctrl,
+};
+
+static int ov01a10_init_controls(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ struct v4l2_fwnode_device_properties props;
+ u32 vblank_min, vblank_max, vblank_default;
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ const struct ov01a10_mode *cur_mode;
+ s64 exposure_max, h_blank;
+ int ret = 0;
+ int size;
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr = &ov01a10->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
+ if (ret)
+ return ret;
+
+ cur_mode = ov01a10->cur_mode;
+ size = ARRAY_SIZE(link_freq_menu_items);
+
+ ov01a10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov01a10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ size - 1, 0,
+ link_freq_menu_items);
+ if (ov01a10->link_freq)
+ ov01a10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov01a10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV01A10_SCLK, 1, OV01A10_SCLK);
+
+ vblank_min = cur_mode->vts_min - cur_mode->height;
+ vblank_max = OV01A10_VTS_MAX - cur_mode->height;
+ vblank_default = cur_mode->vts_def - cur_mode->height;
+ ov01a10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = cur_mode->hts - cur_mode->width;
+ ov01a10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov01a10->hblank)
+ ov01a10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV01A10_ANAL_GAIN_MIN, OV01A10_ANAL_GAIN_MAX,
+ OV01A10_ANAL_GAIN_STEP, OV01A10_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV01A10_DGTL_GAIN_MIN, OV01A10_DGTL_GAIN_MAX,
+ OV01A10_DGTL_GAIN_STEP, OV01A10_DGTL_GAIN_DEFAULT);
+
+ exposure_max = cur_mode->vts_def - OV01A10_EXPOSURE_MAX_MARGIN;
+ ov01a10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV01A10_EXPOSURE_MIN,
+ exposure_max,
+ OV01A10_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov01a10_test_pattern_menu) - 1,
+ 0, 0, ov01a10_test_pattern_menu);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov01a10_ctrl_ops,
+ &props);
+ if (ret)
+ goto fail;
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ goto fail;
+ }
+
+ ov01a10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+fail:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void ov01a10_update_pad_format(const struct ov01a10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+}
+
+static int ov01a10_start_streaming(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ const struct ov01a10_reg_list *reg_list;
+ int link_freq_index;
+ int ret = 0;
+
+ link_freq_index = ov01a10->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov01a10_write_reg_list(ov01a10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls\n");
+ return ret;
+ }
+
+ reg_list = &ov01a10->cur_mode->reg_list;
+ ret = ov01a10_write_reg_list(ov01a10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode\n");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov01a10->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1,
+ OV01A10_MODE_STREAMING);
+ if (ret)
+ dev_err(&client->dev, "failed to start streaming\n");
+
+ return ret;
+}
+
+static void ov01a10_stop_streaming(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ int ret = 0;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1,
+ OV01A10_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "failed to stop streaming\n");
+}
+
+static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (ov01a10->streaming == enable)
+ goto unlock;
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto unlock;
+
+ ret = ov01a10_start_streaming(ov01a10);
+ if (ret) {
+ pm_runtime_put(&client->dev);
+ goto unlock;
+ }
+
+ goto done;
+ }
+
+ ov01a10_stop_streaming(ov01a10);
+ pm_runtime_put(&client->dev);
+done:
+ ov01a10->streaming = enable;
+unlock:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int __maybe_unused ov01a10_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (ov01a10->streaming)
+ ov01a10_stop_streaming(ov01a10);
+
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int __maybe_unused ov01a10_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (!ov01a10->streaming)
+ goto exit;
+
+ ret = ov01a10_start_streaming(ov01a10);
+ if (ret) {
+ ov01a10->streaming = false;
+ ov01a10_stop_streaming(ov01a10);
+ }
+
+exit:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ov01a10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ const struct ov01a10_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ ov01a10_update_pad_format(mode, &fmt->format);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ov01a10->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov01a10->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov01a10->pixel_rate, OV01A10_SCLK);
+
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov01a10->vblank,
+ mode->vts_min - mode->height,
+ OV01A10_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov01a10->vblank, vblank_def);
+ h_blank = mode->hts - mode->width;
+ __v4l2_ctrl_modify_range(ov01a10->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ format = v4l2_subdev_get_pad_format(sd, sd_state, fmt->stream);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int ov01a10_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .format = {
+ .width = OV01A10_ACITVE_WIDTH,
+ .height = OV01A10_ACITVE_HEIGHT,
+ },
+ };
+
+ ov01a10_set_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static int ov01a10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov01a10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes) ||
+ fse->code != MEDIA_BUS_FMT_SBGGR10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov01a10_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV01A10_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV01A10_PIXEL_ARRAY_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = (OV01A10_PIXEL_ARRAY_HEIGHT -
+ OV01A10_ACITVE_HEIGHT) / 2;
+ sel->r.left = (OV01A10_PIXEL_ARRAY_WIDTH -
+ OV01A10_ACITVE_WIDTH) / 2;
+ sel->r.width = OV01A10_ACITVE_WIDTH;
+ sel->r.height = OV01A10_ACITVE_HEIGHT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_subdev_core_ops ov01a10_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops ov01a10_video_ops = {
+ .s_stream = ov01a10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov01a10_pad_ops = {
+ .init_cfg = ov01a10_init_cfg,
+ .set_fmt = ov01a10_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .get_selection = ov01a10_get_selection,
+ .enum_mbus_code = ov01a10_enum_mbus_code,
+ .enum_frame_size = ov01a10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov01a10_subdev_ops = {
+ .core = &ov01a10_core_ops,
+ .video = &ov01a10_video_ops,
+ .pad = &ov01a10_pad_ops,
+};
+
+static const struct media_entity_operations ov01a10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ov01a10_identify_module(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ int ret;
+ u32 val;
+
+ ret = ov01a10_read_reg(ov01a10, OV01A10_REG_CHIP_ID, 3, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV01A10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV01A10_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ov01a10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+}
+
+static int ov01a10_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ov01a10 *ov01a10;
+ int ret = 0;
+
+ ov01a10 = devm_kzalloc(dev, sizeof(*ov01a10), GFP_KERNEL);
+ if (!ov01a10)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov01a10->sd, client, &ov01a10_subdev_ops);
+
+ ret = ov01a10_identify_module(ov01a10);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to find sensor\n");
+
+ ov01a10->cur_mode = &supported_modes[0];
+
+ ret = ov01a10_init_controls(ov01a10);
+ if (ret) {
+ dev_err(dev, "failed to init controls: %d\n", ret);
+ return ret;
+ }
+
+ ov01a10->sd.state_lock = ov01a10->ctrl_handler.lock;
+ ov01a10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ ov01a10->sd.entity.ops = &ov01a10_subdev_entity_ops;
+ ov01a10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov01a10->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov01a10->sd.entity, 1, &ov01a10->pad);
+ if (ret) {
+ dev_err(dev, "Failed to init entity pads: %d\n", ret);
+ goto err_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&ov01a10->sd);
+ if (ret) {
+ dev_err(dev, "Failed to allocate subdev state: %d\n", ret);
+ goto err_media_entity_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov01a10->sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+ goto err_media_entity_cleanup;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+
+err_media_entity_cleanup:
+ media_entity_cleanup(&ov01a10->sd.entity);
+
+err_handler_free:
+ v4l2_ctrl_handler_free(ov01a10->sd.ctrl_handler);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov01a10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov01a10_suspend, ov01a10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov01a10_acpi_ids[] = {
+ { "OVTI01A0" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov01a10_acpi_ids);
+#endif
+
+static struct i2c_driver ov01a10_i2c_driver = {
+ .driver = {
+ .name = "ov01a10",
+ .pm = &ov01a10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov01a10_acpi_ids),
+ },
+ .probe_new = ov01a10_probe,
+ .remove = ov01a10_remove,
+};
+
+module_i2c_driver(ov01a10_i2c_driver);
+
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Wang Yating <yating.wang@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV01A10 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 2c1eb724d8e5..741d977a76f3 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -1002,8 +1002,8 @@ static struct i2c_driver ov02a10_i2c_driver = {
.pm = &ov02a10_pm_ops,
.of_match_table = ov02a10_of_match,
},
- .probe_new = &ov02a10_probe,
- .remove = &ov02a10_remove,
+ .probe = ov02a10_probe,
+ .remove = ov02a10_remove,
};
module_i2c_driver(ov02a10_i2c_driver);
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index a39e086a51c5..7d55d4ca24de 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -1520,7 +1520,7 @@ static struct i2c_driver ov08d10_i2c_driver = {
.pm = &ov08d10_pm_ops,
.acpi_match_table = ACPI_PTR(ov08d10_acpi_ids),
},
- .probe_new = ov08d10_probe,
+ .probe = ov08d10_probe,
.remove = ov08d10_remove,
};
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index 72ae7fba94eb..77bcdcd0824c 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -3313,7 +3313,7 @@ static struct i2c_driver ov08x40_i2c_driver = {
.pm = &ov08x40_pm_ops,
.acpi_match_table = ACPI_PTR(ov08x40_acpi_ids),
},
- .probe_new = ov08x40_probe,
+ .probe = ov08x40_probe,
.remove = ov08x40_remove,
};
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 69a7a2c590db..3db3e64fa3ff 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1806,7 +1806,7 @@ static struct i2c_driver ov13858_i2c_driver = {
.pm = &ov13858_pm_ops,
.acpi_match_table = ACPI_PTR(ov13858_acpi_ids),
},
- .probe_new = ov13858_probe,
+ .probe = ov13858_probe,
.remove = ov13858_remove,
.id_table = ov13858_id_table,
};
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index c1430044fb1e..6110fb1e6bc6 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -1496,7 +1496,7 @@ static struct i2c_driver ov13b10_i2c_driver = {
.pm = &ov13b10_pm_ops,
.acpi_match_table = ACPI_PTR(ov13b10_acpi_ids),
},
- .probe_new = ov13b10_probe,
+ .probe = ov13b10_probe,
.remove = ov13b10_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 39d56838a4ef..ec801a81c2d0 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1298,7 +1298,7 @@ static struct i2c_driver ov2640_i2c_driver = {
.name = "ov2640",
.of_match_table = of_match_ptr(ov2640_of_match),
},
- .probe_new = ov2640_probe,
+ .probe = ov2640_probe,
.remove = ov2640_remove,
.id_table = ov2640_id,
};
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 42fc64ada08c..5429bd2eb053 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1584,7 +1584,7 @@ static struct i2c_driver ov2659_i2c_driver = {
.pm = &ov2659_pm_ops,
.of_match_table = of_match_ptr(ov2659_of_match),
},
- .probe_new = ov2659_probe,
+ .probe = ov2659_probe,
.remove = ov2659_remove,
.id_table = ov2659_id,
};
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 54153bf66bdd..d06e9fc37f77 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1158,7 +1158,7 @@ static struct i2c_driver ov2680_i2c_driver = {
.pm = &ov2680_pm_ops,
.of_match_table = of_match_ptr(ov2680_dt_ids),
},
- .probe_new = ov2680_probe,
+ .probe = ov2680_probe,
.remove = ov2680_remove,
};
module_i2c_driver(ov2680_i2c_driver);
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index f119a93e7c64..303793e1f97d 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -903,8 +903,8 @@ static struct i2c_driver ov2685_i2c_driver = {
.pm = &ov2685_pm_ops,
.of_match_table = of_match_ptr(ov2685_of_match),
},
- .probe_new = &ov2685_probe,
- .remove = &ov2685_remove,
+ .probe = ov2685_probe,
+ .remove = ov2685_remove,
};
module_i2c_driver(ov2685_i2c_driver);
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 89d126240c34..158d934733c3 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1215,7 +1215,7 @@ static struct i2c_driver ov2740_i2c_driver = {
.pm = pm_sleep_ptr(&ov2740_pm_ops),
.acpi_match_table = ov2740_acpi_ids,
},
- .probe_new = ov2740_probe,
+ .probe = ov2740_probe,
.remove = ov2740_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index c602e507d42b..fda217d2cb10 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -1008,7 +1008,7 @@ static struct i2c_driver ov4689_i2c_driver = {
.pm = &ov4689_pm_ops,
.of_match_table = ov4689_of_match,
},
- .probe_new = ov4689_probe,
+ .probe = ov4689_probe,
.remove = ov4689_remove,
};
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 1536649b9e90..36b509714c8c 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2815,7 +2815,6 @@ static int ov5640_get_fmt(struct v4l2_subdev *sd,
static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt,
- enum ov5640_frame_rate fr,
const struct ov5640_mode_info **new_mode)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -2927,19 +2926,6 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
hblank, hblank, 1, hblank);
vblank = timings->vblank_def;
-
- if (sensor->current_fr != mode->def_fps) {
- /*
- * Compute the vertical blanking according to the framerate
- * configured with s_frame_interval.
- */
- int fie_num = sensor->frame_interval.numerator;
- int fie_denom = sensor->frame_interval.denominator;
-
- vblank = ((fie_num * pixel_rate / fie_denom) / timings->htot) -
- mode->height;
- }
-
__v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
OV5640_MAX_VTS - mode->height, 1, vblank);
__v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
@@ -2975,8 +2961,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- ret = ov5640_try_fmt_internal(sd, mbus_fmt,
- sensor->current_fr, &new_mode);
+ ret = ov5640_try_fmt_internal(sd, mbus_fmt, &new_mode);
if (ret)
goto out;
@@ -3851,7 +3836,7 @@ static int ov5640_probe(struct i2c_client *client)
/*
* default init sequence initialize sensor to
- * YUV422 UYVY VGA@30fps
+ * YUV422 UYVY VGA(30FPS in parallel mode, 60 in MIPI CSI-2 mode)
*/
sensor->frame_interval.numerator = 1;
sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
@@ -4011,7 +3996,7 @@ static struct i2c_driver ov5640_i2c_driver = {
.pm = &ov5640_pm_ops,
},
.id_table = ov5640_id,
- .probe_new = ov5640_probe,
+ .probe = ov5640_probe,
.remove = ov5640_remove,
};
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index c8999fc4f26f..a70db7e601a4 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -1286,7 +1286,7 @@ static struct i2c_driver ov5645_i2c_driver = {
.name = "ov5645",
.pm = &ov5645_pm_ops,
},
- .probe_new = ov5645_probe,
+ .probe = ov5645_probe,
.remove = ov5645_remove,
.id_table = ov5645_id,
};
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 233576ee9503..8de398423b7c 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1515,7 +1515,7 @@ static struct i2c_driver ov5647_driver = {
.name = "ov5647",
.pm = &ov5647_pm_ops,
},
- .probe_new = ov5647_probe,
+ .probe = ov5647_probe,
.remove = ov5647_remove,
.id_table = ov5647_id,
};
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 17465fcf28e3..aa10eb4e3991 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2616,8 +2616,8 @@ static struct i2c_driver ov5648_driver = {
.of_match_table = ov5648_of_match,
.pm = &ov5648_pm_ops,
},
- .probe_new = ov5648_probe,
- .remove = ov5648_remove,
+ .probe = ov5648_probe,
+ .remove = ov5648_remove,
};
module_i2c_driver(ov5648_driver);
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index c026610d0f31..d722348b938b 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2853,7 +2853,7 @@ static struct i2c_driver ov5670_i2c_driver = {
.acpi_match_table = ACPI_PTR(ov5670_acpi_ids),
.of_match_table = ov5670_of_ids,
},
- .probe_new = ov5670_probe,
+ .probe = ov5670_probe,
.remove = ov5670_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index d55180b3b7aa..700c4b69846f 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1435,7 +1435,7 @@ static struct i2c_driver ov5675_i2c_driver = {
.acpi_match_table = ACPI_PTR(ov5675_acpi_ids),
.of_match_table = ov5675_of_match,
},
- .probe_new = ov5675_probe,
+ .probe = ov5675_probe,
.remove = ov5675_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index e3c3bed69ad6..7f9212cce239 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -404,8 +404,8 @@ static int ov5693_read_reg(struct ov5693_device *ov5693, u32 addr, u32 *value)
ret = i2c_transfer(client->adapter, msg, 2);
if (ret < 0)
return dev_err_probe(&client->dev, ret,
- "Failed to read register 0x%04x: %d\n",
- addr & OV5693_REG_ADDR_MASK, ret);
+ "Failed to read register 0x%04x\n",
+ addr & OV5693_REG_ADDR_MASK);
*value = 0;
for (i = 0; i < len; ++i) {
@@ -1554,7 +1554,7 @@ static struct i2c_driver ov5693_driver = {
.of_match_table = ov5693_of_match,
.pm = &ov5693_pm_ops,
},
- .probe_new = ov5693_probe,
+ .probe = ov5693_probe,
.remove = ov5693_remove,
};
module_i2c_driver(ov5693_driver);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index b287c28920a6..3023b7254167 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1392,8 +1392,8 @@ static struct i2c_driver ov5695_i2c_driver = {
.pm = &ov5695_pm_ops,
.of_match_table = of_match_ptr(ov5695_of_match),
},
- .probe_new = &ov5695_probe,
- .remove = &ov5695_remove,
+ .probe = ov5695_probe,
+ .remove = ov5695_remove,
};
module_i2c_driver(ov5695_i2c_driver);
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 4c0ea2ae671b..1ad07935f046 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1113,7 +1113,7 @@ static struct i2c_driver ov6650_i2c_driver = {
.driver = {
.name = "ov6650",
},
- .probe_new = ov6650_probe,
+ .probe = ov6650_probe,
.remove = ov6650_remove,
.id_table = ov6650_id,
};
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 88e987435285..675fb37a6fea 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1806,7 +1806,7 @@ static struct i2c_driver ov7251_i2c_driver = {
.name = "ov7251",
.pm = &ov7251_pm_ops,
},
- .probe_new = ov7251_probe,
+ .probe = ov7251_probe,
.remove = ov7251_remove,
};
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index e6751d5cc64b..293f5f404358 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -86,7 +86,7 @@ static struct i2c_driver ov7640_driver = {
.driver = {
.name = "ov7640",
},
- .probe_new = ov7640_probe,
+ .probe = ov7640_probe,
.remove = ov7640_remove,
.id_table = ov7640_id,
};
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index ecbded4f0765..2f55491ef571 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -2033,7 +2033,7 @@ static struct i2c_driver ov7670_driver = {
.name = "ov7670",
.of_match_table = of_match_ptr(ov7670_of_match),
},
- .probe_new = ov7670_probe,
+ .probe = ov7670_probe,
.remove = ov7670_remove,
.id_table = ov7670_id,
};
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index a238e63425f8..386d69c8e074 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1551,7 +1551,7 @@ static struct i2c_driver ov772x_i2c_driver = {
.name = "ov772x",
.of_match_table = ov772x_of_match,
},
- .probe_new = ov772x_probe,
+ .probe = ov772x_probe,
.remove = ov772x_remove,
.id_table = ov772x_id,
};
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index c9fd9b0bc54a..10e47c7d4e0c 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1212,7 +1212,7 @@ static struct i2c_driver ov7740_i2c_driver = {
.pm = &ov7740_pm_ops,
.of_match_table = of_match_ptr(ov7740_of_match),
},
- .probe_new = ov7740_probe,
+ .probe = ov7740_probe,
.remove = ov7740_remove,
.id_table = ov7740_id,
};
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index b5c7881383ca..f053c3a7676a 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2527,7 +2527,7 @@ static struct i2c_driver ov8856_i2c_driver = {
.acpi_match_table = ACPI_PTR(ov8856_acpi_ids),
.of_match_table = ov8856_of_match,
},
- .probe_new = ov8856_probe,
+ .probe = ov8856_probe,
.remove = ov8856_remove,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 9ca8a17bfbb9..3af6125a2eee 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1998,8 +1998,8 @@ static struct i2c_driver ov8858_i2c_driver = {
.pm = &ov8858_pm_ops,
.of_match_table = ov8858_of_match,
},
- .probe_new = &ov8858_probe,
- .remove = &ov8858_remove,
+ .probe = ov8858_probe,
+ .remove = ov8858_remove,
};
module_i2c_driver(ov8858_i2c_driver);
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index cae1866134a0..f2213c6158d3 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -3158,8 +3158,8 @@ static struct i2c_driver ov8865_driver = {
.acpi_match_table = ov8865_acpi_match,
.pm = &ov8865_pm_ops,
},
- .probe_new = ov8865_probe,
- .remove = ov8865_remove,
+ .probe = ov8865_probe,
+ .remove = ov8865_remove,
};
module_i2c_driver(ov8865_driver);
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 7f46cac38aab..068c7449f50e 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1512,7 +1512,7 @@ static const struct of_device_id ov9282_of_match[] = {
MODULE_DEVICE_TABLE(of, ov9282_of_match);
static struct i2c_driver ov9282_driver = {
- .probe_new = ov9282_probe,
+ .probe = ov9282_probe,
.remove = ov9282_remove,
.driver = {
.name = "ov9282",
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index a80fa59bf2ae..cbaea049531d 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -762,7 +762,7 @@ static struct i2c_driver ov9640_i2c_driver = {
.driver = {
.name = "ov9640",
},
- .probe_new = ov9640_probe,
+ .probe = ov9640_probe,
.remove = ov9640_remove,
.id_table = ov9640_id,
};
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 7e7cb1e4520e..da1ab5135eaa 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1571,7 +1571,7 @@ static struct i2c_driver ov965x_i2c_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ov965x_of_match),
},
- .probe_new = ov965x_probe,
+ .probe = ov965x_probe,
.remove = ov965x_remove,
.id_table = ov965x_id,
};
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index 8b0a158cb297..b6244772bc59 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -1028,7 +1028,7 @@ static struct i2c_driver ov9734_i2c_driver = {
.pm = &ov9734_pm_ops,
.acpi_match_table = ov9734_acpi_ids,
},
- .probe_new = ov9734_probe,
+ .probe = ov9734_probe,
.remove = ov9734_remove,
};
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index a2263fa825b5..01a2596282f0 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -676,7 +676,7 @@ static struct i2c_driver rdacm20_i2c_driver = {
.name = "rdacm20",
.of_match_table = rdacm20_of_ids,
},
- .probe_new = rdacm20_probe,
+ .probe = rdacm20_probe,
.remove = rdacm20_remove,
.shutdown = rdacm20_shutdown,
};
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 9ccc56c30d3b..043fec778a5e 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -635,7 +635,7 @@ static struct i2c_driver rdacm21_i2c_driver = {
.name = "rdacm21",
.of_match_table = rdacm21_of_ids,
},
- .probe_new = rdacm21_probe,
+ .probe = rdacm21_probe,
.remove = rdacm21_remove,
};
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index 9db5473daba0..b430046f9e2a 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1421,7 +1421,7 @@ static struct i2c_driver rj54n1_i2c_driver = {
.driver = {
.name = "rj54n1cb0c",
},
- .probe_new = rj54n1_probe,
+ .probe = rj54n1_probe,
.remove = rj54n1_remove,
.id_table = rj54n1_id,
};
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 7938a3327d3e..ed5b10731a14 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1729,7 +1729,7 @@ static struct i2c_driver s5c73m3_i2c_driver = {
.of_match_table = of_match_ptr(s5c73m3_of_match),
.name = DRIVER_NAME,
},
- .probe_new = s5c73m3_probe,
+ .probe = s5c73m3_probe,
.remove = s5c73m3_remove,
.id_table = s5c73m3_id,
};
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 960fbf6428ea..67da2045f543 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -2021,7 +2021,7 @@ static struct i2c_driver s5k5baf_i2c_driver = {
.of_match_table = s5k5baf_of_match,
.name = S5K5BAF_DRIVER_NAME
},
- .probe_new = s5k5baf_probe,
+ .probe = s5k5baf_probe,
.remove = s5k5baf_remove,
.id_table = s5k5baf_id,
};
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index ef6673b10580..b3560c8f8b41 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -373,7 +373,7 @@ static struct i2c_driver s5k6a3_driver = {
.of_match_table = of_match_ptr(s5k6a3_of_match),
.name = S5K6A3_DRV_NAME,
},
- .probe_new = s5k6a3_probe,
+ .probe = s5k6a3_probe,
.remove = s5k6a3_remove,
.id_table = s5k6a3_ids,
};
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 8752f7cff611..dea9fc09356f 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -505,7 +505,7 @@ static struct i2c_driver saa6588_driver = {
.driver = {
.name = "saa6588",
},
- .probe_new = saa6588_probe,
+ .probe = saa6588_probe,
.remove = saa6588_remove,
.id_table = saa6588_id,
};
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 892d64fe6e81..c106e7a7d1f4 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -781,7 +781,7 @@ static struct i2c_driver saa6752hs_driver = {
.driver = {
.name = "saa6752hs",
},
- .probe_new = saa6752hs_probe,
+ .probe = saa6752hs_probe,
.remove = saa6752hs_remove,
.id_table = saa6752hs_id,
};
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index b58e71517376..1520790338ce 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -448,7 +448,7 @@ static struct i2c_driver saa7110_driver = {
.driver = {
.name = "saa7110",
},
- .probe_new = saa7110_probe,
+ .probe = saa7110_probe,
.remove = saa7110_remove,
.id_table = saa7110_id,
};
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index efeda3956f81..a1c71187e773 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1951,7 +1951,7 @@ static struct i2c_driver saa711x_driver = {
.driver = {
.name = "saa7115",
},
- .probe_new = saa711x_probe,
+ .probe = saa711x_probe,
.remove = saa711x_remove,
.id_table = saa711x_id,
};
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index f98f3a1c38a9..818ed19cf37b 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -810,7 +810,7 @@ static struct i2c_driver saa7127_driver = {
.driver = {
.name = "saa7127",
},
- .probe_new = saa7127_probe,
+ .probe = saa7127_probe,
.remove = saa7127_remove,
.id_table = saa7127_id,
};
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index df01059076fa..933ec0171430 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1343,7 +1343,7 @@ static struct i2c_driver saa717x_driver = {
.driver = {
.name = "saa717x",
},
- .probe_new = saa717x_probe,
+ .probe = saa717x_probe,
.remove = saa717x_remove,
.id_table = saa717x_id,
};
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index c78f2e95ba37..5535d71f4860 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -343,7 +343,7 @@ static struct i2c_driver saa7185_driver = {
.driver = {
.name = "saa7185",
},
- .probe_new = saa7185_probe,
+ .probe = saa7185_probe,
.remove = saa7185_remove,
.id_table = saa7185_id,
};
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index eef6c8a7c9c9..0f53834f3ae4 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -375,7 +375,7 @@ static struct i2c_driver sony_btf_mpx_driver = {
.driver = {
.name = "sony-btf-mpx",
},
- .probe_new = sony_btf_mpx_probe,
+ .probe = sony_btf_mpx_probe,
.remove = sony_btf_mpx_remove,
.id_table = sony_btf_mpx_id,
};
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 31b89aff0e86..906553a28676 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -736,8 +736,13 @@ static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
- /* source pad mirror active sink pad */
- format->format = bridge->fmt;
+ /* source pad mirror sink pad */
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = bridge->fmt;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ MIPID02_SINK_0);
+
/* but code may need to be converted */
format->format.code = serial_to_parallel_code(format->format.code);
@@ -745,7 +750,8 @@ static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
if (format->which != V4L2_SUBDEV_FORMAT_TRY)
return;
- *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, MIPID02_SOURCE) =
+ format->format;
}
static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
@@ -763,6 +769,9 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
fmt = &bridge->fmt;
*fmt = format->format;
+
+ /* Propagate the format change to the source pad */
+ mipid02_set_fmt_source(sd, sd_state, format);
}
static int mipid02_set_fmt(struct v4l2_subdev *sd,
@@ -1091,7 +1100,7 @@ static struct i2c_driver mipid02_i2c_driver = {
.name = "st-mipid02",
.of_match_table = mipid02_dt_ids,
},
- .probe_new = mipid02_probe,
+ .probe = mipid02_probe,
.remove = mipid02_remove,
};
diff --git a/drivers/media/i2c/st-vgxy61.c b/drivers/media/i2c/st-vgxy61.c
index adbd093ad190..30f82ca344c4 100644
--- a/drivers/media/i2c/st-vgxy61.c
+++ b/drivers/media/i2c/st-vgxy61.c
@@ -1951,7 +1951,7 @@ static struct i2c_driver vgxy61_i2c_driver = {
.of_match_table = vgxy61_dt_ids,
.pm = &vgxy61_pm_ops,
},
- .probe_new = vgxy61_probe,
+ .probe = vgxy61_probe,
.remove = vgxy61_remove,
};
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 9197fa0b1bc2..15f8163be9bf 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2206,7 +2206,7 @@ static struct i2c_driver tc358743_driver = {
.name = "tc358743",
.of_match_table = of_match_ptr(tc358743_of_match),
},
- .probe_new = tc358743_probe,
+ .probe = tc358743_probe,
.remove = tc358743_remove,
.id_table = tc358743_id,
};
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index ec1a193ba161..e9b2d906c177 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -1686,7 +1686,7 @@ static struct i2c_driver tc358746_driver = {
.pm = pm_ptr(&tc358746_pm_ops),
.of_match_table = tc358746_of_match,
},
- .probe_new = tc358746_probe,
+ .probe = tc358746_probe,
.remove = tc358746_remove,
};
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 27f6393dc327..325e99125941 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2834,7 +2834,7 @@ static struct i2c_driver tda1997x_i2c_driver = {
.name = "tda1997x",
.of_match_table = of_match_ptr(tda1997x_of_id),
},
- .probe_new = tda1997x_probe,
+ .probe = tda1997x_probe,
.remove = tda1997x_remove,
.id_table = tda1997x_i2c_id,
};
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index bbceaac8e0b3..6ecdc8e2e0c6 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -409,7 +409,7 @@ static struct i2c_driver tda7432_driver = {
.driver = {
.name = "tda7432",
},
- .probe_new = tda7432_probe,
+ .probe = tda7432_probe,
.remove = tda7432_remove,
.id_table = tda7432_id,
};
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 25fbd7e3950e..1911ef2126be 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -191,7 +191,7 @@ static struct i2c_driver tda9840_driver = {
.driver = {
.name = "tda9840",
},
- .probe_new = tda9840_probe,
+ .probe = tda9840_probe,
.remove = tda9840_remove,
.id_table = tda9840_id,
};
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index d375d2d24354..3ed6e441d515 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -150,7 +150,7 @@ static struct i2c_driver tea6415c_driver = {
.driver = {
.name = "tea6415c",
},
- .probe_new = tea6415c_probe,
+ .probe = tea6415c_probe,
.remove = tea6415c_remove,
.id_table = tea6415c_id,
};
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 9da1f3b02c57..63f23784bb41 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -132,7 +132,7 @@ static struct i2c_driver tea6420_driver = {
.driver = {
.name = "tea6420",
},
- .probe_new = tea6420_probe,
+ .probe = tea6420_probe,
.remove = tea6420_remove,
.id_table = tea6420_id,
};
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 67de90cf696e..ea70c1c13872 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -376,7 +376,7 @@ static struct i2c_driver ths7303_driver = {
.driver = {
.name = "ths73x3",
},
- .probe_new = ths7303_probe,
+ .probe = ths7303_probe,
.remove = ths7303_remove,
.id_table = ths7303_id,
};
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 081ef5a4b950..0e0f676cd221 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -499,7 +499,7 @@ static struct i2c_driver ths8200_driver = {
.name = "ths8200",
.of_match_table = of_match_ptr(ths8200_of_match),
},
- .probe_new = ths8200_probe,
+ .probe = ths8200_probe,
.remove = ths8200_remove,
.id_table = ths8200_id,
};
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index 47198e803817..d800ff8af1ff 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -197,7 +197,7 @@ static struct i2c_driver tlv320aic23b_driver = {
.driver = {
.name = "tlv320aic23b",
},
- .probe_new = tlv320aic23b_probe,
+ .probe = tlv320aic23b_probe,
.remove = tlv320aic23b_remove,
.id_table = tlv320aic23b_id,
};
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index a54c76d9e23b..ba20f35cafd5 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2095,7 +2095,7 @@ static struct i2c_driver tvaudio_driver = {
.driver = {
.name = "tvaudio",
},
- .probe_new = tvaudio_probe,
+ .probe = tvaudio_probe,
.remove = tvaudio_remove,
.id_table = tvaudio_id,
};
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index f294cae72b01..aa6d4b67b6d5 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1208,7 +1208,7 @@ static struct i2c_driver tvp514x_driver = {
.of_match_table = of_match_ptr(tvp514x_of_match),
.name = TVP514X_MODULE_NAME,
},
- .probe_new = tvp514x_probe,
+ .probe = tvp514x_probe,
.remove = tvp514x_remove,
.id_table = tvp514x_id,
};
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 859f1cb2fa74..c7fb35ee3f9d 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -2280,7 +2280,7 @@ static struct i2c_driver tvp5150_driver = {
.name = "tvp5150",
.pm = &tvp5150_pm_ops,
},
- .probe_new = tvp5150_probe,
+ .probe = tvp5150_probe,
.remove = tvp5150_remove,
.id_table = tvp5150_id,
};
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4ccd218f5584..a2d7bc799849 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1079,7 +1079,7 @@ static struct i2c_driver tvp7002_driver = {
.of_match_table = of_match_ptr(tvp7002_of_match),
.name = TVP7002_MODULE_NAME,
},
- .probe_new = tvp7002_probe,
+ .probe = tvp7002_probe,
.remove = tvp7002_remove,
.id_table = tvp7002_id,
};
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index 710790ece11b..6a2521e3a25c 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -423,7 +423,7 @@ static struct i2c_driver tw2804_driver = {
.driver = {
.name = "tw2804",
},
- .probe_new = tw2804_probe,
+ .probe = tw2804_probe,
.remove = tw2804_remove,
.id_table = tw2804_id,
};
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index 428ee55787e1..996be3960af3 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -254,7 +254,7 @@ static struct i2c_driver tw9903_driver = {
.driver = {
.name = "tw9903",
},
- .probe_new = tw9903_probe,
+ .probe = tw9903_probe,
.remove = tw9903_remove,
.id_table = tw9903_id,
};
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 7824ed9b04ed..25c625f6d6e4 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -222,7 +222,7 @@ static struct i2c_driver tw9906_driver = {
.driver = {
.name = "tw9906",
},
- .probe_new = tw9906_probe,
+ .probe = tw9906_probe,
.remove = tw9906_remove,
.id_table = tw9906_id,
};
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 459fa22f4341..477a64d8f8ab 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -1012,7 +1012,7 @@ static struct i2c_driver tw9910_i2c_driver = {
.driver = {
.name = "tw9910",
},
- .probe_new = tw9910_probe,
+ .probe = tw9910_probe,
.remove = tw9910_remove,
.id_table = tw9910_id,
};
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index b6873d866272..da7bc4700bed 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -88,7 +88,7 @@ static struct i2c_driver uda1342_driver = {
.driver = {
.name = "uda1342",
},
- .probe_new = uda1342_probe,
+ .probe = uda1342_probe,
.remove = uda1342_remove,
.id_table = uda1342_id,
};
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 47eed3aab060..54c2ba0ba375 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -228,7 +228,7 @@ static struct i2c_driver upd64031a_driver = {
.driver = {
.name = "upd64031a",
},
- .probe_new = upd64031a_probe,
+ .probe = upd64031a_probe,
.remove = upd64031a_remove,
.id_table = upd64031a_id,
};
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 3f5a7d4853a1..2a820589a4cb 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -199,7 +199,7 @@ static struct i2c_driver upd64083_driver = {
.driver = {
.name = "upd64083",
},
- .probe_new = upd64083_probe,
+ .probe = upd64083_probe,
.remove = upd64083_remove,
.id_table = upd64083_id,
};
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index dddf9827b314..6f98abc7ccc1 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -274,7 +274,7 @@ static const struct hwmon_channel_info amg88xx_temp = {
.config = amg88xx_temp_config,
};
-static const struct hwmon_channel_info *amg88xx_info[] = {
+static const struct hwmon_channel_info * const amg88xx_info[] = {
&amg88xx_temp,
NULL
};
@@ -959,7 +959,7 @@ static struct i2c_driver video_i2c_driver = {
.of_match_table = video_i2c_of_match,
.pm = &video_i2c_pm_ops,
},
- .probe_new = video_i2c_probe,
+ .probe = video_i2c_probe,
.remove = video_i2c_remove,
.id_table = video_i2c_id_table,
};
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index ed1c58ea8ed3..0ba3c2b68037 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -181,7 +181,7 @@ static struct i2c_driver vp27smpx_driver = {
.driver = {
.name = "vp27smpx",
},
- .probe_new = vp27smpx_probe,
+ .probe = vp27smpx_probe,
.remove = vp27smpx_remove,
.id_table = vp27smpx_id,
};
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index aa73d5dcc3e7..1eaae886f217 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -546,7 +546,7 @@ static struct i2c_driver vpx3220_driver = {
.driver = {
.name = "vpx3220",
},
- .probe_new = vpx3220_probe,
+ .probe = vpx3220_probe,
.remove = vpx3220_remove,
.id_table = vpx3220_id,
};
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index 8b34a673ffd3..19bf7a00dff9 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -252,7 +252,7 @@ static struct i2c_driver wm8739_driver = {
.driver = {
.name = "wm8739",
},
- .probe_new = wm8739_probe,
+ .probe = wm8739_probe,
.remove = wm8739_remove,
.id_table = wm8739_id,
};
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 56d98518f7eb..d1b716fd6f11 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -298,7 +298,7 @@ static struct i2c_driver wm8775_driver = {
.driver = {
.name = "wm8775",
},
- .probe_new = wm8775_probe,
+ .probe = wm8775_probe,
.remove = wm8775_remove,
.id_table = wm8775_id,
};
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index e7216a985ba6..83468d4a440b 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -1052,25 +1052,19 @@ static void __media_entity_remove_link(struct media_entity *entity,
kfree(link);
}
-int media_get_pad_index(struct media_entity *entity, bool is_sink,
+int media_get_pad_index(struct media_entity *entity, u32 pad_type,
enum media_pad_signal_type sig_type)
{
- int i;
- bool pad_is_sink;
+ unsigned int i;
if (!entity)
return -EINVAL;
for (i = 0; i < entity->num_pads; i++) {
- if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
- pad_is_sink = true;
- else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- pad_is_sink = false;
- else
- continue; /* This is an error! */
-
- if (pad_is_sink != is_sink)
+ if ((entity->pads[i].flags &
+ (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) != pad_type)
continue;
+
if (entity->pads[i].sig_type == sig_type)
return i;
}
@@ -1416,7 +1410,7 @@ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad)
EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique);
int media_entity_get_fwnode_pad(struct media_entity *entity,
- struct fwnode_handle *fwnode,
+ const struct fwnode_handle *fwnode,
unsigned long direction_flags)
{
struct fwnode_endpoint endpoint;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 85fcdc59f0d1..d234a0f404d6 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -534,7 +534,7 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
mutex_lock(&dst_ca_mutex);
dvbdev = file->private_data;
- state = (struct dst_state *)dvbdev->priv;
+ state = dvbdev->priv;
p_ca_message = kmalloc(sizeof (struct ca_msg), GFP_KERNEL);
p_ca_slot_info = kmalloc(sizeof (struct ca_slot_info), GFP_KERNEL);
p_ca_caps = kmalloc(sizeof (struct ca_caps), GFP_KERNEL);
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index a0d465924e75..65281d40c681 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -51,7 +51,7 @@ struct vbi_anc_data {
u8 sdid;
u8 data_count;
u8 idid[2];
- u8 payload[1]; /* data_count of payload */
+ u8 payload[]; /* data_count of payload */
/* u8 checksum; */
/* u8 fill[]; Variable number of fill bytes */
};
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 33e5a5b5fab4..cf82360a503d 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -234,7 +234,7 @@ static int dvb_register(struct cx18_stream *stream);
static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct cx18_stream *stream = (struct cx18_stream *) demux->priv;
+ struct cx18_stream *stream = demux->priv;
struct cx18 *cx;
int ret;
u32 v;
@@ -305,7 +305,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
static int cx18_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct cx18_stream *stream = (struct cx18_stream *)demux->priv;
+ struct cx18_stream *stream = demux->priv;
struct cx18 *cx;
int ret = -EINVAL;
diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig
index e0e3af67c99c..4498c37f4990 100644
--- a/drivers/media/pci/dm1105/Kconfig
+++ b/drivers/media/pci/dm1105/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DVB_DM1105
tristate "SDMC DM1105 based PCI cards"
- depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT
+ depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT && HAS_IOPORT
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 3c84cb121632..34984a7474ed 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -1375,7 +1375,8 @@ struct sensor_async_subdev {
struct csi2_bus_info csi2;
};
-#define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
+#define to_sensor_asd(__asd) \
+ container_of_const(__asd, struct sensor_async_subdev, asd)
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
@@ -1417,31 +1418,27 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
struct sensor_async_subdev *s_asd;
struct v4l2_async_subdev *asd;
struct cio2_queue *q;
- unsigned int pad;
int ret;
list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
- for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
- if (q->sensor->entity.pads[pad].flags &
- MEDIA_PAD_FL_SOURCE)
- break;
-
- if (pad == q->sensor->entity.num_pads) {
- dev_err(dev, "failed to find src pad for %s\n",
- q->sensor->name);
- return -ENXIO;
+ ret = media_entity_get_fwnode_pad(&q->sensor->entity,
+ s_asd->asd.match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "no pad for endpoint %pfw (%d)\n",
+ s_asd->asd.match.fwnode, ret);
+ return ret;
}
- ret = media_create_pad_link(
- &q->sensor->entity, pad,
- &q->subdev.entity, CIO2_PAD_SINK,
- 0);
+ ret = media_create_pad_link(&q->sensor->entity, ret,
+ &q->subdev.entity, CIO2_PAD_SINK,
+ 0);
if (ret) {
- dev_err(dev, "failed to create link for %s\n",
- q->sensor->name);
+ dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
+ q->sensor->name, s_asd->asd.match.fwnode, ret);
return ret;
}
}
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index 24421c116b0b..3eb749db1ca7 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -280,7 +280,7 @@ out:
static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_port *port = (struct saa7164_port *) demux->priv;
+ struct saa7164_port *port = demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
@@ -307,7 +307,7 @@ static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_port *port = (struct saa7164_port *) demux->priv;
+ struct saa7164_port *port = demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 5d5796f24469..710595987522 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -308,7 +308,7 @@ int ttpci_budget_debiwrite(struct budget *budget, u32 config, int addr,
static int budget_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct budget *budget = (struct budget *) demux->priv;
+ struct budget *budget = demux->priv;
int status = 0;
dprintk(2, "budget: %p\n", budget);
@@ -327,7 +327,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
static int budget_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct budget *budget = (struct budget *) demux->priv;
+ struct budget *budget = demux->priv;
int status = 0;
dprintk(2, "budget: %p\n", budget);
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 74cba1368cfa..1ae3845b6743 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -59,7 +59,7 @@ void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
}
spin_unlock_irqrestore(&ac->lock, flags);
- if (!done || !next)
+ if (!done)
continue;
/*
* Checking for a non-nil dma_desc[pb]->virt buffer is
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 3fa1a74a2e20..6515f3cdb7a7 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -279,6 +279,7 @@ static void vdec_handle_resolution_change(struct vpu_inst *inst)
vdec->source_change--;
vpu_notify_source_change(inst);
+ vpu_set_last_buffer_dequeued(inst, false);
}
static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state, u32 force)
@@ -314,7 +315,7 @@ static void vdec_set_last_buffer_dequeued(struct vpu_inst *inst)
return;
if (vdec->eos_received) {
- if (!vpu_set_last_buffer_dequeued(inst)) {
+ if (!vpu_set_last_buffer_dequeued(inst, true)) {
vdec->eos_received--;
vdec_update_state(inst, VPU_CODEC_STATE_DRAIN, 0);
}
@@ -569,7 +570,7 @@ static int vdec_drain(struct vpu_inst *inst)
return 0;
if (!vdec->params.frame_count) {
- vpu_set_last_buffer_dequeued(inst);
+ vpu_set_last_buffer_dequeued(inst, true);
return 0;
}
@@ -608,7 +609,7 @@ static int vdec_cmd_stop(struct vpu_inst *inst)
vpu_trace(inst->dev, "[%d]\n", inst->id);
if (inst->state == VPU_CODEC_STATE_DEINIT) {
- vpu_set_last_buffer_dequeued(inst);
+ vpu_set_last_buffer_dequeued(inst, true);
} else {
vdec->drain = 1;
vdec_drain(inst);
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index e6e8fe45fc7c..58480e2755ec 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -458,7 +458,7 @@ static int venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd
vpu_inst_lock(inst);
if (cmd->cmd == V4L2_ENC_CMD_STOP) {
if (inst->state == VPU_CODEC_STATE_DEINIT)
- vpu_set_last_buffer_dequeued(inst);
+ vpu_set_last_buffer_dequeued(inst, true);
else
venc_request_eos(inst);
}
@@ -878,7 +878,7 @@ static void venc_set_last_buffer_dequeued(struct vpu_inst *inst)
struct venc_t *venc = inst->priv;
if (venc->stopped && list_empty(&venc->frames))
- vpu_set_last_buffer_dequeued(inst);
+ vpu_set_last_buffer_dequeued(inst, true);
}
static void venc_stop_done(struct vpu_inst *inst)
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index ef44bff9fbaf..c1d6606ad7e5 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -1313,6 +1313,15 @@ static int vpu_malone_insert_scode_pic(struct malone_scode_t *scode, u32 codec_i
return sizeof(hdr);
}
+static int vpu_malone_insert_scode_vc1_g_seq(struct malone_scode_t *scode)
+{
+ if (!scode->inst->total_input_count)
+ return 0;
+ if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
+ scode->need_data = 0;
+ return 0;
+}
+
static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
{
struct vb2_v4l2_buffer *vbuf;
@@ -1344,6 +1353,8 @@ static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode)
int size = 0;
u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
+ if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
+ scode->need_data = 0;
if (scode->inst->total_input_count)
return 0;
scode->need_data = 0;
@@ -1458,6 +1469,7 @@ static const struct malone_scode_handler scode_handlers[] = {
},
{
.pixelformat = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .insert_scode_seq = vpu_malone_insert_scode_vc1_g_seq,
.insert_scode_pic = vpu_malone_insert_scode_vc1_g_pic,
},
{
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index a48edb445eea..021235e1c144 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -100,7 +100,7 @@ int vpu_notify_source_change(struct vpu_inst *inst)
return 0;
}
-int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
+int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos)
{
struct vb2_queue *q;
@@ -116,7 +116,8 @@ int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
vpu_trace(inst->dev, "last buffer dequeued\n");
q->last_buffer_dequeued = true;
wake_up(&q->done_wq);
- vpu_notify_eos(inst);
+ if (eos)
+ vpu_notify_eos(inst);
return 0;
}
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
index ef5de6b66e47..60f43056a7a2 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.h
+++ b/drivers/media/platform/amphion/vpu_v4l2.h
@@ -27,7 +27,7 @@ struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32
void vpu_v4l2_set_error(struct vpu_inst *inst);
int vpu_notify_eos(struct vpu_inst *inst);
int vpu_notify_source_change(struct vpu_inst *inst);
-int vpu_set_last_buffer_dequeued(struct vpu_inst *inst);
+int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
bool vpu_is_source_empty(struct vpu_inst *inst);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 0051f372a66c..4768156181c9 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -28,6 +28,7 @@
#include "mtk_jpeg_core.h"
#include "mtk_jpeg_dec_parse.h"
+#if defined(CONFIG_OF)
static struct mtk_jpeg_fmt mtk_jpeg_enc_formats[] = {
{
.fourcc = V4L2_PIX_FMT_JPEG,
@@ -101,6 +102,7 @@ static struct mtk_jpeg_fmt mtk_jpeg_dec_formats[] = {
.flags = MTK_JPEG_FMT_FLAG_CAPTURE,
},
};
+#endif
#define MTK_JPEG_ENC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_enc_formats)
#define MTK_JPEG_DEC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_dec_formats)
@@ -936,148 +938,6 @@ static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx,
return 0;
}
-static int mtk_jpegenc_get_hw(struct mtk_jpeg_ctx *ctx)
-{
- struct mtk_jpegenc_comp_dev *comp_jpeg;
- struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- unsigned long flags;
- int hw_id = -1;
- int i;
-
- spin_lock_irqsave(&jpeg->hw_lock, flags);
- for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) {
- comp_jpeg = jpeg->enc_hw_dev[i];
- if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) {
- hw_id = i;
- comp_jpeg->hw_state = MTK_JPEG_HW_BUSY;
- break;
- }
- }
- spin_unlock_irqrestore(&jpeg->hw_lock, flags);
-
- return hw_id;
-}
-
-static int mtk_jpegenc_set_hw_param(struct mtk_jpeg_ctx *ctx,
- int hw_id,
- struct vb2_v4l2_buffer *src_buf,
- struct vb2_v4l2_buffer *dst_buf)
-{
- struct mtk_jpegenc_comp_dev *jpeg = ctx->jpeg->enc_hw_dev[hw_id];
-
- jpeg->hw_param.curr_ctx = ctx;
- jpeg->hw_param.src_buffer = src_buf;
- jpeg->hw_param.dst_buffer = dst_buf;
-
- return 0;
-}
-
-static int mtk_jpegenc_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&jpeg->hw_lock, flags);
- jpeg->enc_hw_dev[hw_id]->hw_state = MTK_JPEG_HW_IDLE;
- spin_unlock_irqrestore(&jpeg->hw_lock, flags);
-
- return 0;
-}
-
-static void mtk_jpegenc_worker(struct work_struct *work)
-{
- struct mtk_jpegenc_comp_dev *comp_jpeg[MTK_JPEGENC_HW_MAX];
- enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
- struct mtk_jpeg_src_buf *jpeg_dst_buf;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- int ret, i, hw_id = 0;
- unsigned long flags;
-
- struct mtk_jpeg_ctx *ctx = container_of(work,
- struct mtk_jpeg_ctx,
- jpeg_work);
- struct mtk_jpeg_dev *jpeg = ctx->jpeg;
-
- for (i = 0; i < MTK_JPEGENC_HW_MAX; i++)
- comp_jpeg[i] = jpeg->enc_hw_dev[i];
- i = 0;
-
-retry_select:
- hw_id = mtk_jpegenc_get_hw(ctx);
- if (hw_id < 0) {
- ret = wait_event_interruptible(jpeg->hw_wq,
- atomic_read(&jpeg->hw_rdy) > 0);
- if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) {
- dev_err(jpeg->dev, "%s : %d, all HW are busy\n",
- __func__, __LINE__);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- return;
- }
-
- goto retry_select;
- }
-
- atomic_dec(&jpeg->hw_rdy);
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- if (!src_buf)
- goto getbuf_fail;
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- if (!dst_buf)
- goto getbuf_fail;
-
- v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
-
- mtk_jpegenc_set_hw_param(ctx, hw_id, src_buf, dst_buf);
- ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev);
- if (ret < 0) {
- dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n",
- __func__, __LINE__);
- goto enc_end;
- }
-
- ret = clk_prepare_enable(comp_jpeg[hw_id]->venc_clk.clks->clk);
- if (ret) {
- dev_err(jpeg->dev, "%s : %d, jpegenc clk_prepare_enable fail\n",
- __func__, __LINE__);
- goto enc_end;
- }
-
- v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
-
- spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
- jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf);
- jpeg_dst_buf->curr_ctx = ctx;
- jpeg_dst_buf->frame_num = ctx->total_frame_num;
- ctx->total_frame_num++;
- mtk_jpeg_enc_reset(comp_jpeg[hw_id]->reg_base);
- mtk_jpeg_set_enc_dst(ctx,
- comp_jpeg[hw_id]->reg_base,
- &dst_buf->vb2_buf);
- mtk_jpeg_set_enc_src(ctx,
- comp_jpeg[hw_id]->reg_base,
- &src_buf->vb2_buf);
- mtk_jpeg_set_enc_params(ctx, comp_jpeg[hw_id]->reg_base);
- mtk_jpeg_enc_start(comp_jpeg[hw_id]->reg_base);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags);
-
- return;
-
-enc_end:
- v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(src_buf, buf_state);
- v4l2_m2m_buf_done(dst_buf, buf_state);
-getbuf_fail:
- atomic_inc(&jpeg->hw_rdy);
- mtk_jpegenc_put_hw(jpeg, hw_id);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
-}
-
static void mtk_jpeg_enc_device_run(void *priv)
{
struct mtk_jpeg_ctx *ctx = priv;
@@ -1128,173 +988,6 @@ static void mtk_jpeg_multicore_enc_device_run(void *priv)
queue_work(jpeg->workqueue, &ctx->jpeg_work);
}
-static int mtk_jpegdec_get_hw(struct mtk_jpeg_ctx *ctx)
-{
- struct mtk_jpegdec_comp_dev *comp_jpeg;
- struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- unsigned long flags;
- int hw_id = -1;
- int i;
-
- spin_lock_irqsave(&jpeg->hw_lock, flags);
- for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) {
- comp_jpeg = jpeg->dec_hw_dev[i];
- if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) {
- hw_id = i;
- comp_jpeg->hw_state = MTK_JPEG_HW_BUSY;
- break;
- }
- }
- spin_unlock_irqrestore(&jpeg->hw_lock, flags);
-
- return hw_id;
-}
-
-static int mtk_jpegdec_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&jpeg->hw_lock, flags);
- jpeg->dec_hw_dev[hw_id]->hw_state =
- MTK_JPEG_HW_IDLE;
- spin_unlock_irqrestore(&jpeg->hw_lock, flags);
-
- return 0;
-}
-
-static int mtk_jpegdec_set_hw_param(struct mtk_jpeg_ctx *ctx,
- int hw_id,
- struct vb2_v4l2_buffer *src_buf,
- struct vb2_v4l2_buffer *dst_buf)
-{
- struct mtk_jpegdec_comp_dev *jpeg =
- ctx->jpeg->dec_hw_dev[hw_id];
-
- jpeg->hw_param.curr_ctx = ctx;
- jpeg->hw_param.src_buffer = src_buf;
- jpeg->hw_param.dst_buffer = dst_buf;
-
- return 0;
-}
-
-static void mtk_jpegdec_worker(struct work_struct *work)
-{
- struct mtk_jpeg_ctx *ctx = container_of(work, struct mtk_jpeg_ctx,
- jpeg_work);
- struct mtk_jpegdec_comp_dev *comp_jpeg[MTK_JPEGDEC_HW_MAX];
- enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
- struct mtk_jpeg_src_buf *jpeg_src_buf, *jpeg_dst_buf;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- int ret, i, hw_id = 0;
- struct mtk_jpeg_bs bs;
- struct mtk_jpeg_fb fb;
- unsigned long flags;
-
- for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++)
- comp_jpeg[i] = jpeg->dec_hw_dev[i];
- i = 0;
-
-retry_select:
- hw_id = mtk_jpegdec_get_hw(ctx);
- if (hw_id < 0) {
- ret = wait_event_interruptible_timeout(jpeg->hw_wq,
- atomic_read(&jpeg->hw_rdy) > 0,
- MTK_JPEG_HW_TIMEOUT_MSEC);
- if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) {
- dev_err(jpeg->dev, "%s : %d, all HW are busy\n",
- __func__, __LINE__);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- return;
- }
-
- goto retry_select;
- }
-
- atomic_dec(&jpeg->hw_rdy);
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- if (!src_buf)
- goto getbuf_fail;
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- if (!dst_buf)
- goto getbuf_fail;
-
- v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
- jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf);
-
- if (mtk_jpeg_check_resolution_change(ctx,
- &jpeg_src_buf->dec_param)) {
- mtk_jpeg_queue_src_chg_event(ctx);
- ctx->state = MTK_JPEG_SOURCE_CHANGE;
- goto getbuf_fail;
- }
-
- jpeg_src_buf->curr_ctx = ctx;
- jpeg_src_buf->frame_num = ctx->total_frame_num;
- jpeg_dst_buf->curr_ctx = ctx;
- jpeg_dst_buf->frame_num = ctx->total_frame_num;
-
- mtk_jpegdec_set_hw_param(ctx, hw_id, src_buf, dst_buf);
- ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev);
- if (ret < 0) {
- dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n",
- __func__, __LINE__);
- goto dec_end;
- }
-
- ret = clk_prepare_enable(comp_jpeg[hw_id]->jdec_clk.clks->clk);
- if (ret) {
- dev_err(jpeg->dev, "%s : %d, jpegdec clk_prepare_enable fail\n",
- __func__, __LINE__);
- goto clk_end;
- }
-
- v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
-
- mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
- if (mtk_jpeg_set_dec_dst(ctx,
- &jpeg_src_buf->dec_param,
- &dst_buf->vb2_buf, &fb)) {
- dev_err(jpeg->dev, "%s : %d, mtk_jpeg_set_dec_dst fail\n",
- __func__, __LINE__);
- goto setdst_end;
- }
-
- spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
- ctx->total_frame_num++;
- mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
- mtk_jpeg_dec_set_config(comp_jpeg[hw_id]->reg_base,
- &jpeg_src_buf->dec_param,
- jpeg_src_buf->bs_size,
- &bs,
- &fb);
- mtk_jpeg_dec_start(comp_jpeg[hw_id]->reg_base);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags);
-
- return;
-
-setdst_end:
- clk_disable_unprepare(comp_jpeg[hw_id]->jdec_clk.clks->clk);
-clk_end:
- pm_runtime_put(comp_jpeg[hw_id]->dev);
-dec_end:
- v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(src_buf, buf_state);
- v4l2_m2m_buf_done(dst_buf, buf_state);
-getbuf_fail:
- atomic_inc(&jpeg->hw_rdy);
- mtk_jpegdec_put_hw(jpeg, hw_id);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
-}
-
static void mtk_jpeg_multicore_dec_device_run(void *priv)
{
struct mtk_jpeg_ctx *ctx = priv;
@@ -1430,101 +1123,6 @@ static void mtk_jpeg_clk_off(struct mtk_jpeg_dev *jpeg)
jpeg->variant->clks);
}
-static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
-{
- struct mtk_jpeg_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
- u32 result_size;
-
- ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
- if (!ctx) {
- v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
- return IRQ_HANDLED;
- }
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
-
- buf_state = VB2_BUF_STATE_DONE;
-
- v4l2_m2m_buf_done(src_buf, buf_state);
- v4l2_m2m_buf_done(dst_buf, buf_state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- pm_runtime_put(ctx->jpeg->dev);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mtk_jpeg_enc_irq(int irq, void *priv)
-{
- struct mtk_jpeg_dev *jpeg = priv;
- u32 irq_status;
- irqreturn_t ret = IRQ_NONE;
-
- cancel_delayed_work(&jpeg->job_timeout_work);
-
- irq_status = readl(jpeg->reg_base + JPEG_ENC_INT_STS) &
- JPEG_ENC_INT_STATUS_MASK_ALLIRQ;
- if (irq_status)
- writel(0, jpeg->reg_base + JPEG_ENC_INT_STS);
-
- if (!(irq_status & JPEG_ENC_INT_STATUS_DONE))
- return ret;
-
- ret = mtk_jpeg_enc_done(jpeg);
- return ret;
-}
-
-static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
-{
- struct mtk_jpeg_dev *jpeg = priv;
- struct mtk_jpeg_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct mtk_jpeg_src_buf *jpeg_src_buf;
- enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
- u32 dec_irq_ret;
- u32 dec_ret;
- int i;
-
- cancel_delayed_work(&jpeg->job_timeout_work);
-
- dec_ret = mtk_jpeg_dec_get_int_status(jpeg->reg_base);
- dec_irq_ret = mtk_jpeg_dec_enum_result(dec_ret);
- ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
- if (!ctx) {
- v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
- return IRQ_HANDLED;
- }
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
-
- if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
- mtk_jpeg_dec_reset(jpeg->reg_base);
-
- if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) {
- dev_err(jpeg->dev, "decode failed\n");
- goto dec_end;
- }
-
- for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
- vb2_set_plane_payload(&dst_buf->vb2_buf, i,
- jpeg_src_buf->dec_param.comp_size[i]);
-
- buf_state = VB2_BUF_STATE_DONE;
-
-dec_end:
- v4l2_m2m_buf_done(src_buf, buf_state);
- v4l2_m2m_buf_done(dst_buf, buf_state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
- pm_runtime_put(ctx->jpeg->dev);
- return IRQ_HANDLED;
-}
-
static void mtk_jpeg_set_default_params(struct mtk_jpeg_ctx *ctx)
{
struct mtk_jpeg_q_data *q = &ctx->out_q;
@@ -1637,15 +1235,6 @@ static const struct v4l2_file_operations mtk_jpeg_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
-static struct clk_bulk_data mt8173_jpeg_dec_clocks[] = {
- { .id = "jpgdec-smi" },
- { .id = "jpgdec" },
-};
-
-static struct clk_bulk_data mtk_jpeg_clocks[] = {
- { .id = "jpgenc" },
-};
-
static void mtk_jpeg_job_timeout_work(struct work_struct *work)
{
struct mtk_jpeg_dev *jpeg = container_of(work, struct mtk_jpeg_dev,
@@ -1867,6 +1456,419 @@ static const struct dev_pm_ops mtk_jpeg_pm_ops = {
};
#if defined(CONFIG_OF)
+static int mtk_jpegenc_get_hw(struct mtk_jpeg_ctx *ctx)
+{
+ struct mtk_jpegenc_comp_dev *comp_jpeg;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ unsigned long flags;
+ int hw_id = -1;
+ int i;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) {
+ comp_jpeg = jpeg->enc_hw_dev[i];
+ if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) {
+ hw_id = i;
+ comp_jpeg->hw_state = MTK_JPEG_HW_BUSY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+
+ return hw_id;
+}
+
+static int mtk_jpegenc_set_hw_param(struct mtk_jpeg_ctx *ctx,
+ int hw_id,
+ struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ struct mtk_jpegenc_comp_dev *jpeg = ctx->jpeg->enc_hw_dev[hw_id];
+
+ jpeg->hw_param.curr_ctx = ctx;
+ jpeg->hw_param.src_buffer = src_buf;
+ jpeg->hw_param.dst_buffer = dst_buf;
+
+ return 0;
+}
+
+static int mtk_jpegenc_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ jpeg->enc_hw_dev[hw_id]->hw_state = MTK_JPEG_HW_IDLE;
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+
+ return 0;
+}
+
+static int mtk_jpegdec_get_hw(struct mtk_jpeg_ctx *ctx)
+{
+ struct mtk_jpegdec_comp_dev *comp_jpeg;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ unsigned long flags;
+ int hw_id = -1;
+ int i;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) {
+ comp_jpeg = jpeg->dec_hw_dev[i];
+ if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) {
+ hw_id = i;
+ comp_jpeg->hw_state = MTK_JPEG_HW_BUSY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+
+ return hw_id;
+}
+
+static int mtk_jpegdec_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ jpeg->dec_hw_dev[hw_id]->hw_state =
+ MTK_JPEG_HW_IDLE;
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+
+ return 0;
+}
+
+static int mtk_jpegdec_set_hw_param(struct mtk_jpeg_ctx *ctx,
+ int hw_id,
+ struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ struct mtk_jpegdec_comp_dev *jpeg =
+ ctx->jpeg->dec_hw_dev[hw_id];
+
+ jpeg->hw_param.curr_ctx = ctx;
+ jpeg->hw_param.src_buffer = src_buf;
+ jpeg->hw_param.dst_buffer = dst_buf;
+
+ return 0;
+}
+
+static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
+{
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ u32 result_size;
+
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
+ return IRQ_HANDLED;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ pm_runtime_put(ctx->jpeg->dev);
+ return IRQ_HANDLED;
+}
+
+static void mtk_jpegenc_worker(struct work_struct *work)
+{
+ struct mtk_jpegenc_comp_dev *comp_jpeg[MTK_JPEGENC_HW_MAX];
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ struct mtk_jpeg_src_buf *jpeg_dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ int ret, i, hw_id = 0;
+ unsigned long flags;
+
+ struct mtk_jpeg_ctx *ctx = container_of(work,
+ struct mtk_jpeg_ctx,
+ jpeg_work);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+
+ for (i = 0; i < MTK_JPEGENC_HW_MAX; i++)
+ comp_jpeg[i] = jpeg->enc_hw_dev[i];
+ i = 0;
+
+retry_select:
+ hw_id = mtk_jpegenc_get_hw(ctx);
+ if (hw_id < 0) {
+ ret = wait_event_interruptible(jpeg->hw_wq,
+ atomic_read(&jpeg->hw_rdy) > 0);
+ if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) {
+ dev_err(jpeg->dev, "%s : %d, all HW are busy\n",
+ __func__, __LINE__);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ goto retry_select;
+ }
+
+ atomic_dec(&jpeg->hw_rdy);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_buf)
+ goto getbuf_fail;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ if (!dst_buf)
+ goto getbuf_fail;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+
+ mtk_jpegenc_set_hw_param(ctx, hw_id, src_buf, dst_buf);
+ ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev);
+ if (ret < 0) {
+ dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n",
+ __func__, __LINE__);
+ goto enc_end;
+ }
+
+ ret = clk_prepare_enable(comp_jpeg[hw_id]->venc_clk.clks->clk);
+ if (ret) {
+ dev_err(jpeg->dev, "%s : %d, jpegenc clk_prepare_enable fail\n",
+ __func__, __LINE__);
+ goto enc_end;
+ }
+
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+
+ spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
+ jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf);
+ jpeg_dst_buf->curr_ctx = ctx;
+ jpeg_dst_buf->frame_num = ctx->total_frame_num;
+ ctx->total_frame_num++;
+ mtk_jpeg_enc_reset(comp_jpeg[hw_id]->reg_base);
+ mtk_jpeg_set_enc_dst(ctx,
+ comp_jpeg[hw_id]->reg_base,
+ &dst_buf->vb2_buf);
+ mtk_jpeg_set_enc_src(ctx,
+ comp_jpeg[hw_id]->reg_base,
+ &src_buf->vb2_buf);
+ mtk_jpeg_set_enc_params(ctx, comp_jpeg[hw_id]->reg_base);
+ mtk_jpeg_enc_start(comp_jpeg[hw_id]->reg_base);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags);
+
+ return;
+
+enc_end:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+getbuf_fail:
+ atomic_inc(&jpeg->hw_rdy);
+ mtk_jpegenc_put_hw(jpeg, hw_id);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void mtk_jpegdec_worker(struct work_struct *work)
+{
+ struct mtk_jpeg_ctx *ctx = container_of(work, struct mtk_jpeg_ctx,
+ jpeg_work);
+ struct mtk_jpegdec_comp_dev *comp_jpeg[MTK_JPEGDEC_HW_MAX];
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ struct mtk_jpeg_src_buf *jpeg_src_buf, *jpeg_dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int ret, i, hw_id = 0;
+ struct mtk_jpeg_bs bs;
+ struct mtk_jpeg_fb fb;
+ unsigned long flags;
+
+ for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++)
+ comp_jpeg[i] = jpeg->dec_hw_dev[i];
+ i = 0;
+
+retry_select:
+ hw_id = mtk_jpegdec_get_hw(ctx);
+ if (hw_id < 0) {
+ ret = wait_event_interruptible_timeout(jpeg->hw_wq,
+ atomic_read(&jpeg->hw_rdy) > 0,
+ MTK_JPEG_HW_TIMEOUT_MSEC);
+ if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) {
+ dev_err(jpeg->dev, "%s : %d, all HW are busy\n",
+ __func__, __LINE__);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ goto retry_select;
+ }
+
+ atomic_dec(&jpeg->hw_rdy);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_buf)
+ goto getbuf_fail;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ if (!dst_buf)
+ goto getbuf_fail;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
+ jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf);
+
+ if (mtk_jpeg_check_resolution_change(ctx,
+ &jpeg_src_buf->dec_param)) {
+ mtk_jpeg_queue_src_chg_event(ctx);
+ ctx->state = MTK_JPEG_SOURCE_CHANGE;
+ goto getbuf_fail;
+ }
+
+ jpeg_src_buf->curr_ctx = ctx;
+ jpeg_src_buf->frame_num = ctx->total_frame_num;
+ jpeg_dst_buf->curr_ctx = ctx;
+ jpeg_dst_buf->frame_num = ctx->total_frame_num;
+
+ mtk_jpegdec_set_hw_param(ctx, hw_id, src_buf, dst_buf);
+ ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev);
+ if (ret < 0) {
+ dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n",
+ __func__, __LINE__);
+ goto dec_end;
+ }
+
+ ret = clk_prepare_enable(comp_jpeg[hw_id]->jdec_clk.clks->clk);
+ if (ret) {
+ dev_err(jpeg->dev, "%s : %d, jpegdec clk_prepare_enable fail\n",
+ __func__, __LINE__);
+ goto clk_end;
+ }
+
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx,
+ &jpeg_src_buf->dec_param,
+ &dst_buf->vb2_buf, &fb)) {
+ dev_err(jpeg->dev, "%s : %d, mtk_jpeg_set_dec_dst fail\n",
+ __func__, __LINE__);
+ goto setdst_end;
+ }
+
+ spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
+ ctx->total_frame_num++;
+ mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
+ mtk_jpeg_dec_set_config(comp_jpeg[hw_id]->reg_base,
+ &jpeg_src_buf->dec_param,
+ jpeg_src_buf->bs_size,
+ &bs,
+ &fb);
+ mtk_jpeg_dec_start(comp_jpeg[hw_id]->reg_base);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags);
+
+ return;
+
+setdst_end:
+ clk_disable_unprepare(comp_jpeg[hw_id]->jdec_clk.clks->clk);
+clk_end:
+ pm_runtime_put(comp_jpeg[hw_id]->dev);
+dec_end:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+getbuf_fail:
+ atomic_inc(&jpeg->hw_rdy);
+ mtk_jpegdec_put_hw(jpeg, hw_id);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static irqreturn_t mtk_jpeg_enc_irq(int irq, void *priv)
+{
+ struct mtk_jpeg_dev *jpeg = priv;
+ u32 irq_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ cancel_delayed_work(&jpeg->job_timeout_work);
+
+ irq_status = readl(jpeg->reg_base + JPEG_ENC_INT_STS) &
+ JPEG_ENC_INT_STATUS_MASK_ALLIRQ;
+ if (irq_status)
+ writel(0, jpeg->reg_base + JPEG_ENC_INT_STS);
+
+ if (!(irq_status & JPEG_ENC_INT_STATUS_DONE))
+ return ret;
+
+ ret = mtk_jpeg_enc_done(jpeg);
+ return ret;
+}
+
+static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
+{
+ struct mtk_jpeg_dev *jpeg = priv;
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ u32 dec_irq_ret;
+ u32 dec_ret;
+ int i;
+
+ cancel_delayed_work(&jpeg->job_timeout_work);
+
+ dec_ret = mtk_jpeg_dec_get_int_status(jpeg->reg_base);
+ dec_irq_ret = mtk_jpeg_dec_enum_result(dec_ret);
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
+ return IRQ_HANDLED;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
+
+ if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
+ mtk_jpeg_dec_reset(jpeg->reg_base);
+
+ if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) {
+ dev_err(jpeg->dev, "decode failed\n");
+ goto dec_end;
+ }
+
+ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, i,
+ jpeg_src_buf->dec_param.comp_size[i]);
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+dec_end:
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ pm_runtime_put(ctx->jpeg->dev);
+ return IRQ_HANDLED;
+}
+
+static struct clk_bulk_data mtk_jpeg_clocks[] = {
+ { .id = "jpgenc" },
+};
+
+static struct clk_bulk_data mt8173_jpeg_dec_clocks[] = {
+ { .id = "jpgdec-smi" },
+ { .id = "jpgdec" },
+};
+
static const struct mtk_jpeg_variant mt8173_jpeg_drvdata = {
.clks = mt8173_jpeg_dec_clocks,
.num_clks = ARRAY_SIZE(mt8173_jpeg_dec_clocks),
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c
index b95c45791c29..bb9cdc9e0e90 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_parse.c
@@ -7,15 +7,10 @@
#include <linux/kernel.h>
#include <linux/videodev2.h>
+#include <media/jpeg.h>
#include "mtk_jpeg_dec_parse.h"
-#define TEM 0x01
-#define SOF0 0xc0
-#define RST 0xd0
-#define SOI 0xd8
-#define EOI 0xd9
-
struct mtk_jpeg_stream {
u8 *addr;
u32 size;
@@ -83,7 +78,7 @@ static bool mtk_jpeg_do_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
length = 0;
switch (byte) {
- case SOF0:
+ case JPEG_MARKER_SOF0:
/* length */
if (read_word_be(&stream, &word))
break;
@@ -123,10 +118,10 @@ static bool mtk_jpeg_do_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
notfound = !(i == param->comp_num);
break;
- case RST ... RST + 7:
- case SOI:
- case EOI:
- case TEM:
+ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+ case JPEG_MARKER_SOI:
+ case JPEG_MARKER_EOI:
+ case JPEG_MARKER_TEM:
break;
default:
if (read_word_be(&stream, &word))
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 19a4a085f73a..a605e80c7dc3 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1035,6 +1035,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
+ int ret = 0;
parent = dev->of_node->parent;
@@ -1060,16 +1061,22 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
dev_err(dev,
"Fail to get sub comp. id: type %d alias %d\n",
type, alias_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_node;
}
mdp_comp_alias_id[type]++;
comp = mdp_comp_create(mdp, node, id);
- if (IS_ERR(comp))
- return PTR_ERR(comp);
+ if (IS_ERR(comp)) {
+ ret = PTR_ERR(comp);
+ goto err_free_node;
+ }
}
+ return ret;
- return 0;
+err_free_node:
+ of_node_put(node);
+ return ret;
}
void mdp_comp_destroy(struct mdp_dev *mdp)
diff --git a/drivers/media/platform/mediatek/vcodec/Makefile b/drivers/media/platform/mediatek/vcodec/Makefile
index 93e7a343b5b0..5f4c30fec85a 100644
--- a/drivers/media/platform/mediatek/vcodec/Makefile
+++ b/drivers/media/platform/mediatek/vcodec/Makefile
@@ -10,9 +10,11 @@ mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
vdec/vdec_vp8_req_if.o \
vdec/vdec_vp9_if.o \
vdec/vdec_vp9_req_lat_if.o \
+ vdec/vdec_av1_req_lat_if.o \
vdec/vdec_h264_req_if.o \
vdec/vdec_h264_req_common.o \
vdec/vdec_h264_req_multi_if.o \
+ vdec/vdec_hevc_req_multi_if.o \
mtk_vcodec_dec_drv.o \
vdec_drv_if.o \
vdec_vpu_if.o \
@@ -44,3 +46,9 @@ endif
ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP),)
mtk-vcodec-common-y += mtk_vcodec_fw_scp.o
endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dbgfs.o
+
+mtk-vcodec-dbgfs-y := mtk_vcodec_dbgfs.o
+endif \ No newline at end of file
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c
new file mode 100644
index 000000000000..b5cdbbfcc388
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "mtk_vcodec_dbgfs.h"
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+
+static void mtk_vdec_dbgfs_get_format_type(struct mtk_vcodec_ctx *ctx, char *buf,
+ int *used, int total)
+{
+ int curr_len;
+
+ switch (ctx->current_codec) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\toutput format: h264 slice\n");
+ break;
+ case V4L2_PIX_FMT_VP8_FRAME:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\toutput format: vp8 slice\n");
+ break;
+ case V4L2_PIX_FMT_VP9_FRAME:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\toutput format: vp9 slice\n");
+ break;
+ default:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\tunsupported output format: 0x%x\n",
+ ctx->current_codec);
+ }
+ *used += curr_len;
+
+ switch (ctx->capture_fourcc) {
+ case V4L2_PIX_FMT_MM21:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\tcapture format: MM21\n");
+ break;
+ case V4L2_PIX_FMT_MT21C:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\tcapture format: MT21C\n");
+ break;
+ default:
+ curr_len = snprintf(buf + *used, total - *used,
+ "\tunsupported capture format: 0x%x\n",
+ ctx->capture_fourcc);
+ }
+ *used += curr_len;
+}
+
+static void mtk_vdec_dbgfs_get_help(char *buf, int *used, int total)
+{
+ int curr_len;
+
+ curr_len = snprintf(buf + *used, total - *used,
+ "help: (1: echo -'info' > vdec 2: cat vdec)\n");
+ *used += curr_len;
+
+ curr_len = snprintf(buf + *used, total - *used,
+ "\t-picinfo: get resolution\n");
+ *used += curr_len;
+
+ curr_len = snprintf(buf + *used, total - *used,
+ "\t-format: get output & capture queue format\n");
+ *used += curr_len;
+}
+
+static ssize_t mtk_vdec_dbgfs_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_vcodec_dev *vcodec_dev = filp->private_data;
+ struct mtk_vcodec_dbgfs *dbgfs = &vcodec_dev->dbgfs;
+
+ mutex_lock(&dbgfs->dbgfs_lock);
+ dbgfs->buf_size = simple_write_to_buffer(dbgfs->dbgfs_buf, sizeof(dbgfs->dbgfs_buf),
+ ppos, ubuf, count);
+ mutex_unlock(&dbgfs->dbgfs_lock);
+ if (dbgfs->buf_size > 0)
+ return count;
+
+ return dbgfs->buf_size;
+}
+
+static ssize_t mtk_vdec_dbgfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_vcodec_dev *vcodec_dev = filp->private_data;
+ struct mtk_vcodec_dbgfs *dbgfs = &vcodec_dev->dbgfs;
+ struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
+ struct mtk_vcodec_ctx *ctx;
+ int total_len = 200 * (dbgfs->inst_count == 0 ? 1 : dbgfs->inst_count);
+ int used_len = 0, curr_len, ret;
+ bool dbgfs_index[MTK_VDEC_DBGFS_MAX] = {0};
+ char *buf = kmalloc(total_len, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (strstr(dbgfs->dbgfs_buf, "-help") || dbgfs->buf_size == 1) {
+ mtk_vdec_dbgfs_get_help(buf, &used_len, total_len);
+ goto read_buffer;
+ }
+
+ if (strstr(dbgfs->dbgfs_buf, "-picinfo"))
+ dbgfs_index[MTK_VDEC_DBGFS_PICINFO] = true;
+
+ if (strstr(dbgfs->dbgfs_buf, "-format"))
+ dbgfs_index[MTK_VDEC_DBGFS_FORMAT] = true;
+
+ mutex_lock(&dbgfs->dbgfs_lock);
+ list_for_each_entry(dbgfs_inst, &dbgfs->dbgfs_head, node) {
+ ctx = dbgfs_inst->vcodec_ctx;
+
+ curr_len = snprintf(buf + used_len, total_len - used_len,
+ "inst[%d]:\n ", ctx->id);
+ used_len += curr_len;
+
+ if (dbgfs_index[MTK_VDEC_DBGFS_PICINFO]) {
+ curr_len = snprintf(buf + used_len, total_len - used_len,
+ "\treal(%dx%d)=>align(%dx%d)\n",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ used_len += curr_len;
+ }
+
+ if (dbgfs_index[MTK_VDEC_DBGFS_FORMAT])
+ mtk_vdec_dbgfs_get_format_type(ctx, buf, &used_len, total_len);
+ }
+ mutex_unlock(&dbgfs->dbgfs_lock);
+read_buffer:
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, used_len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations vdec_fops = {
+ .open = simple_open,
+ .write = mtk_vdec_dbgfs_write,
+ .read = mtk_vdec_dbgfs_read,
+};
+
+void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
+ struct mtk_vcodec_dev *vcodec_dev = ctx->dev;
+
+ dbgfs_inst = kzalloc(sizeof(*dbgfs_inst), GFP_KERNEL);
+ if (!dbgfs_inst)
+ return;
+
+ list_add_tail(&dbgfs_inst->node, &vcodec_dev->dbgfs.dbgfs_head);
+
+ vcodec_dev->dbgfs.inst_count++;
+
+ dbgfs_inst->inst_id = ctx->id;
+ dbgfs_inst->vcodec_ctx = ctx;
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_create);
+
+void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id)
+{
+ struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
+
+ list_for_each_entry(dbgfs_inst, &vcodec_dev->dbgfs.dbgfs_head, node) {
+ if (dbgfs_inst->inst_id == ctx_id) {
+ vcodec_dev->dbgfs.inst_count--;
+ break;
+ }
+ }
+
+ if (dbgfs_inst) {
+ list_del(&dbgfs_inst->node);
+ kfree(dbgfs_inst);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_remove);
+
+void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode)
+{
+ struct dentry *vcodec_root;
+
+ if (is_encode)
+ vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-enc", NULL);
+ else
+ vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-dec", NULL);
+ if (IS_ERR(vcodec_dev->dbgfs.vcodec_root))
+ dev_err(&vcodec_dev->plat_dev->dev, "create vcodec dir err:%d\n",
+ IS_ERR(vcodec_dev->dbgfs.vcodec_root));
+
+ vcodec_root = vcodec_dev->dbgfs.vcodec_root;
+ debugfs_create_x32("mtk_v4l2_dbg_level", 0644, vcodec_root, &mtk_v4l2_dbg_level);
+ debugfs_create_x32("mtk_vcodec_dbg", 0644, vcodec_root, &mtk_vcodec_dbg);
+
+ vcodec_dev->dbgfs.inst_count = 0;
+ if (is_encode)
+ return;
+
+ INIT_LIST_HEAD(&vcodec_dev->dbgfs.dbgfs_head);
+ debugfs_create_file("vdec", 0200, vcodec_root, vcodec_dev, &vdec_fops);
+ mutex_init(&vcodec_dev->dbgfs.dbgfs_lock);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_init);
+
+void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev)
+{
+ debugfs_remove_recursive(vcodec_dev->dbgfs.vcodec_root);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec driver");
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h
new file mode 100644
index 000000000000..241ff8197e73
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef __MTK_VCODEC_DBGFS_H__
+#define __MTK_VCODEC_DBGFS_H__
+
+struct mtk_vcodec_dev;
+struct mtk_vcodec_ctx;
+
+/*
+ * enum mtk_vdec_dbgfs_log_index - used to get different debug information
+ */
+enum mtk_vdec_dbgfs_log_index {
+ MTK_VDEC_DBGFS_PICINFO,
+ MTK_VDEC_DBGFS_FORMAT,
+ MTK_VDEC_DBGFS_MAX,
+};
+
+/**
+ * struct mtk_vcodec_dbgfs_inst - debugfs information for each inst
+ * @node: list node for each inst
+ * @vcodec_ctx: struct mtk_vcodec_ctx
+ * @inst_id: index of the context that the same with ctx->id
+ */
+struct mtk_vcodec_dbgfs_inst {
+ struct list_head node;
+ struct mtk_vcodec_ctx *vcodec_ctx;
+ int inst_id;
+};
+
+/**
+ * struct mtk_vcodec_dbgfs - dbgfs information
+ * @dbgfs_head: list head used to link each instance
+ * @vcodec_root: vcodec dbgfs entry
+ * @dbgfs_lock: dbgfs lock used to protect dbgfs_buf
+ * @dbgfs_buf: dbgfs buf used to store dbgfs cmd
+ * @buf_size: buffer size of dbgfs
+ * @inst_count: the count of total instance
+ */
+struct mtk_vcodec_dbgfs {
+ struct list_head dbgfs_head;
+ struct dentry *vcodec_root;
+ struct mutex dbgfs_lock;
+ char dbgfs_buf[1024];
+ int buf_size;
+ int inst_count;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id);
+void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode);
+void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev);
+#else
+static inline void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx)
+{
+}
+
+static inline void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id)
+{
+}
+
+static inline void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode)
+{
+}
+
+static inline void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index 9c652beb3f19..d41f2121b94f 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -215,6 +215,7 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
list_add(&ctx->list, &dev->ctx_list);
+ mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -256,6 +257,7 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mtk_vcodec_dbgfs_remove(dev, ctx->id);
list_del_init(&ctx->list);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -310,7 +312,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch)) {
- vdec_msg_queue_init_ctx(&dev->msg_queue_core_ctx, MTK_VDEC_CORE);
dev->core_workqueue =
alloc_ordered_workqueue("core-decoder",
WQ_MEM_RECLAIM | WQ_FREEZABLE);
@@ -423,6 +424,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
}
+ mtk_vcodec_dbgfs_init(dev, false);
mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
@@ -498,6 +500,7 @@ static void mtk_vcodec_dec_remove(struct platform_device *pdev)
if (dev->vfd_dec)
video_unregister_device(dev->vfd_dec);
+ mtk_vcodec_dbgfs_deinit(dev);
v4l2_device_unregister(&dev->v4l2_dev);
if (!dev->vdec_pdata->is_subdev_supported)
pm_runtime_disable(dev->pm.dev);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
index b753bf54ebd9..e1cb2f8dca33 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
@@ -148,20 +148,21 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
ret = mtk_vcodec_init_dec_clk(pdev, &subdev_dev->pm);
if (ret)
return ret;
- pm_runtime_enable(&pdev->dev);
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
of_id = of_match_device(mtk_vdec_hw_match, dev);
if (!of_id) {
dev_err(dev, "Can't get vdec subdev id.\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
hw_idx = (enum mtk_vdec_hw_id)(uintptr_t)of_id->data;
if (hw_idx >= MTK_VDEC_HW_MAX) {
dev_err(dev, "Hardware index %d not correct.\n", hw_idx);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
main_dev->subdev_dev[hw_idx] = subdev_dev;
@@ -173,14 +174,14 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
if (IS_SUPPORT_VDEC_HW_IRQ(hw_idx)) {
ret = mtk_vdec_hw_init_irq(subdev_dev);
if (ret)
- goto err;
+ return ret;
}
subdev_dev->reg_base[VDEC_HW_MISC] =
devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR((__force void *)subdev_dev->reg_base[VDEC_HW_MISC])) {
ret = PTR_ERR((__force void *)subdev_dev->reg_base[VDEC_HW_MISC]);
- goto err;
+ return ret;
}
if (!main_dev->subdev_prob_done)
@@ -188,21 +189,10 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, subdev_dev);
return 0;
-err:
- pm_runtime_disable(subdev_dev->pm.dev);
- return ret;
-}
-
-static int mtk_vdec_hw_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static struct platform_driver mtk_vdec_driver = {
.probe = mtk_vdec_hw_probe,
- .remove = mtk_vdec_hw_remove,
.driver = {
.name = "mtk-vdec-comp",
.of_match_table = mtk_vdec_hw_match,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
index 3000db975e5f..db1e14a1bd6c 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
@@ -107,11 +107,103 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
},
.codec_type = V4L2_PIX_FMT_VP9_FRAME,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE),
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_SEQUENCE,
+
+ },
+ .codec_type = V4L2_PIX_FMT_AV1_FRAME,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_FRAME,
+
+ },
+ .codec_type = V4L2_PIX_FMT_AV1_FRAME,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
+ .dims = { V4L2_AV1_MAX_TILE_COUNT },
+
+ },
+ .codec_type = V4L2_PIX_FMT_AV1_FRAME,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_AV1_PROFILE,
+ .min = V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN,
+ .def = V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN,
+ },
+ .codec_type = V4L2_PIX_FMT_AV1_FRAME,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_AV1_LEVEL,
+ .min = V4L2_MPEG_VIDEO_AV1_LEVEL_2_0,
+ .def = V4L2_MPEG_VIDEO_AV1_LEVEL_4_0,
+ .max = V4L2_MPEG_VIDEO_AV1_LEVEL_5_1,
+ },
+ .codec_type = V4L2_PIX_FMT_AV1_FRAME,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
-static struct mtk_video_fmt mtk_video_formats[5];
+static struct mtk_video_fmt mtk_video_formats[7];
static struct mtk_video_fmt default_out_format;
static struct mtk_video_fmt default_cap_format;
@@ -240,7 +332,7 @@ static void mtk_vdec_worker(struct work_struct *work)
mtk_v4l2_err("vb2 buffer media request is NULL");
ret = vdec_if_decode(ctx, bs_src, NULL, &res_chg);
- if (ret) {
+ if (ret && ret != -EAGAIN) {
mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu vdec_if_decode() ret=%d res_chg=%d===>",
ctx->id, vb2_src->index, bs_src->size,
vb2_src->timestamp, ret, res_chg);
@@ -356,6 +448,8 @@ static void mtk_vcodec_add_formats(unsigned int fourcc,
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
case V4L2_PIX_FMT_VP9_FRAME:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_AV1_FRAME:
mtk_video_formats[count_formats].fourcc = fourcc;
mtk_video_formats[count_formats].type = MTK_FMT_DEC;
mtk_video_formats[count_formats].num_planes = 1;
@@ -412,6 +506,14 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
mtk_vcodec_add_formats(V4L2_PIX_FMT_VP9_FRAME, ctx);
out_format_count++;
}
+ if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_HEVC_FRAME) {
+ mtk_vcodec_add_formats(V4L2_PIX_FMT_HEVC_SLICE, ctx);
+ out_format_count++;
+ }
+ if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_AV1_FRAME) {
+ mtk_vcodec_add_formats(V4L2_PIX_FMT_AV1_FRAME, ctx);
+ out_format_count++;
+ }
if (cap_format_count)
default_cap_format = mtk_video_formats[cap_format_count - 1];
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
index 9acab54fd650..f17d67e781c9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
@@ -16,6 +16,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
+#include "mtk_vcodec_dbgfs.h"
#include "mtk_vcodec_util.h"
#include "vdec_msg_queue.h"
@@ -347,6 +348,8 @@ enum mtk_vdec_format_types {
MTK_VDEC_FORMAT_H264_SLICE = 0x100,
MTK_VDEC_FORMAT_VP8_FRAME = 0x200,
MTK_VDEC_FORMAT_VP9_FRAME = 0x400,
+ MTK_VDEC_FORMAT_AV1_FRAME = 0x800,
+ MTK_VDEC_FORMAT_HEVC_FRAME = 0x1000,
MTK_VCODEC_INNER_RACING = 0x20000,
};
@@ -461,7 +464,6 @@ struct mtk_vcodec_enc_pdata {
* @enc_capability: used to identify encode capability
*
* @core_workqueue: queue used for core hardware decode
- * @msg_queue_core_ctx: msg queue context used for core workqueue
*
* @subdev_dev: subdev hardware device
* @subdev_prob_done: check whether all used hw device is prob done
@@ -470,6 +472,7 @@ struct mtk_vcodec_enc_pdata {
* @dec_active_cnt: used to mark whether need to record register value
* @vdec_racing_info: record register value
* @dec_racing_info_mutex: mutex lock used for inner racing mode
+ * @dbgfs: debug log related information
*/
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
@@ -510,7 +513,6 @@ struct mtk_vcodec_dev {
unsigned int enc_capability;
struct workqueue_struct *core_workqueue;
- struct vdec_msg_queue_ctx msg_queue_core_ctx;
void *subdev_dev[MTK_VDEC_HW_MAX];
int (*subdev_prob_done)(struct mtk_vcodec_dev *vdec_dev);
@@ -520,6 +522,8 @@ struct mtk_vcodec_dev {
u32 vdec_racing_info[132];
/* Protects access to vdec_racing_info data */
struct mutex dec_racing_info_mutex;
+
+ struct mtk_vcodec_dbgfs dbgfs;
};
static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index db65e77bd373..9ff439a50f53 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -505,13 +505,13 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
f->fmt.pix.pixelformat = fmt->fourcc;
}
- q_data->visible_width = f->fmt.pix_mp.width;
- q_data->visible_height = f->fmt.pix_mp.height;
- q_data->fmt = fmt;
- ret = vidioc_try_fmt_out(ctx, f, q_data->fmt);
+ ret = vidioc_try_fmt_out(ctx, f, fmt);
if (ret)
return ret;
+ q_data->fmt = fmt;
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 168004a08888..5df0a22ff3b5 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -358,6 +358,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_enc_reg;
}
+ mtk_vcodec_dbgfs_init(dev, true);
mtk_v4l2_debug(0, "encoder %d registered as /dev/video%d",
dev->venc_pdata->core_id, vfd_enc->num);
@@ -468,6 +469,7 @@ static void mtk_vcodec_enc_remove(struct platform_device *pdev)
if (dev->vfd_enc)
video_unregister_device(dev->vfd_enc);
+ mtk_vcodec_dbgfs_deinit(dev);
v4l2_device_unregister(&dev->v4l2_dev);
pm_runtime_disable(dev->pm.dev);
mtk_vcodec_fw_release(dev->fw_handler);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
index ace78c4b5b9e..f214e6f67005 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
@@ -13,6 +13,14 @@
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_util.h"
+#if defined(CONFIG_DEBUG_FS)
+int mtk_vcodec_dbg;
+EXPORT_SYMBOL(mtk_vcodec_dbg);
+
+int mtk_v4l2_dbg_level;
+EXPORT_SYMBOL(mtk_v4l2_dbg_level);
+#endif
+
void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
unsigned int reg_idx)
{
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
index 71956627a0e2..88d389b65f13 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
@@ -35,15 +35,35 @@ struct mtk_vcodec_dev;
pr_err("[MTK_VCODEC][ERROR][%d]: " fmt "\n", \
((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
+#if defined(CONFIG_DEBUG_FS)
+extern int mtk_v4l2_dbg_level;
+extern int mtk_vcodec_dbg;
-#define mtk_v4l2_debug(level, fmt, args...) pr_debug(fmt, ##args)
+#define mtk_v4l2_debug(level, fmt, args...) \
+ do { \
+ if (mtk_v4l2_dbg_level >= (level)) \
+ pr_debug("[MTK_V4L2] %s, %d: " fmt "\n", \
+ __func__, __LINE__, ##args); \
+ } while (0)
-#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
-#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
+#define mtk_vcodec_debug(h, fmt, args...) \
+ do { \
+ if (mtk_vcodec_dbg) \
+ dev_dbg(&(((struct mtk_vcodec_ctx *)(h)->ctx)->dev->plat_dev->dev), \
+ "[MTK_VCODEC][%d]: %s, %d " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)(h)->ctx)->id, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define mtk_v4l2_debug(level, fmt, args...) pr_debug(fmt, ##args)
#define mtk_vcodec_debug(h, fmt, args...) \
pr_debug("[MTK_VCODEC][%d]: " fmt "\n", \
((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
+#endif
+
+#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
+#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
new file mode 100644
index 000000000000..404a1a23fd40
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
@@ -0,0 +1,2207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Xiaoyong Lu <xiaoyong.lu@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_drv_if.h"
+#include "../vdec_vpu_if.h"
+
+#define AV1_MAX_FRAME_BUF_COUNT (V4L2_AV1_TOTAL_REFS_PER_FRAME + 1)
+#define AV1_TILE_BUF_SIZE 64
+#define AV1_SCALE_SUBPEL_BITS 10
+#define AV1_REF_SCALE_SHIFT 14
+#define AV1_REF_NO_SCALE BIT(AV1_REF_SCALE_SHIFT)
+#define AV1_REF_INVALID_SCALE -1
+#define AV1_CDF_TABLE_BUFFER_SIZE 16384
+#define AV1_PRIMARY_REF_NONE 7
+
+#define AV1_INVALID_IDX -1
+
+#define AV1_DIV_ROUND_UP_POW2(value, n) \
+({ \
+ typeof(n) _n = n; \
+ typeof(value) _value = value; \
+ (_value + (BIT(_n) >> 1)) >> _n; \
+})
+
+#define AV1_DIV_ROUND_UP_POW2_SIGNED(value, n) \
+({ \
+ typeof(n) _n_ = n; \
+ typeof(value) _value_ = value; \
+ (((_value_) < 0) ? -AV1_DIV_ROUND_UP_POW2(-(_value_), (_n_)) \
+ : AV1_DIV_ROUND_UP_POW2((_value_), (_n_))); \
+})
+
+#define BIT_FLAG(x, bit) (!!((x)->flags & (bit)))
+#define SEGMENTATION_FLAG(x, name) (!!((x)->flags & V4L2_AV1_SEGMENTATION_FLAG_##name))
+#define QUANT_FLAG(x, name) (!!((x)->flags & V4L2_AV1_QUANTIZATION_FLAG_##name))
+#define SEQUENCE_FLAG(x, name) (!!((x)->flags & V4L2_AV1_SEQUENCE_FLAG_##name))
+#define FH_FLAG(x, name) (!!((x)->flags & V4L2_AV1_FRAME_FLAG_##name))
+
+#define MINQ 0
+#define MAXQ 255
+
+#define DIV_LUT_PREC_BITS 14
+#define DIV_LUT_BITS 8
+#define DIV_LUT_NUM BIT(DIV_LUT_BITS)
+#define WARP_PARAM_REDUCE_BITS 6
+#define WARPEDMODEL_PREC_BITS 16
+
+#define SEG_LVL_ALT_Q 0
+#define SECONDARY_FILTER_STRENGTH_NUM_BITS 2
+
+static const short div_lut[DIV_LUT_NUM + 1] = {
+ 16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
+ 15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
+ 15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
+ 14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
+ 13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
+ 13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
+ 13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
+ 12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
+ 12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
+ 11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
+ 11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
+ 11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
+ 10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
+ 10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
+ 10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
+ 9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
+ 9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
+ 9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
+ 9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
+ 9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
+ 8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
+ 8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
+ 8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
+ 8240, 8224, 8208, 8192,
+};
+
+/**
+ * struct vdec_av1_slice_init_vsi - VSI used to initialize instance
+ * @architecture: architecture type
+ * @reserved: reserved
+ * @core_vsi: for core vsi
+ * @cdf_table_addr: cdf table addr
+ * @cdf_table_size: cdf table size
+ * @iq_table_addr: iq table addr
+ * @iq_table_size: iq table size
+ * @vsi_size: share vsi structure size
+ */
+struct vdec_av1_slice_init_vsi {
+ u32 architecture;
+ u32 reserved;
+ u64 core_vsi;
+ u64 cdf_table_addr;
+ u32 cdf_table_size;
+ u64 iq_table_addr;
+ u32 iq_table_size;
+ u32 vsi_size;
+};
+
+/**
+ * struct vdec_av1_slice_mem - memory address and size
+ * @buf: dma_addr padding
+ * @dma_addr: buffer address
+ * @size: buffer size
+ * @dma_addr_end: buffer end address
+ * @padding: for padding
+ */
+struct vdec_av1_slice_mem {
+ union {
+ u64 buf;
+ dma_addr_t dma_addr;
+ };
+ union {
+ size_t size;
+ dma_addr_t dma_addr_end;
+ u64 padding;
+ };
+};
+
+/**
+ * struct vdec_av1_slice_state - decoding state
+ * @err : err type for decode
+ * @full : transcoded buffer is full or not
+ * @timeout : decode timeout or not
+ * @perf : performance enable
+ * @crc : hw checksum
+ * @out_size : hw output size
+ */
+struct vdec_av1_slice_state {
+ int err;
+ u32 full;
+ u32 timeout;
+ u32 perf;
+ u32 crc[16];
+ u32 out_size;
+};
+
+/*
+ * enum vdec_av1_slice_resolution_level - resolution level
+ */
+enum vdec_av1_slice_resolution_level {
+ AV1_RES_NONE,
+ AV1_RES_FHD,
+ AV1_RES_4K,
+ AV1_RES_8K,
+};
+
+/*
+ * enum vdec_av1_slice_frame_type - av1 frame type
+ */
+enum vdec_av1_slice_frame_type {
+ AV1_KEY_FRAME = 0,
+ AV1_INTER_FRAME,
+ AV1_INTRA_ONLY_FRAME,
+ AV1_SWITCH_FRAME,
+ AV1_FRAME_TYPES,
+};
+
+/*
+ * enum vdec_av1_slice_reference_mode - reference mode type
+ */
+enum vdec_av1_slice_reference_mode {
+ AV1_SINGLE_REFERENCE = 0,
+ AV1_COMPOUND_REFERENCE,
+ AV1_REFERENCE_MODE_SELECT,
+ AV1_REFERENCE_MODES,
+};
+
+/**
+ * struct vdec_av1_slice_tile_group - info for each tile
+ * @num_tiles: tile number
+ * @tile_size: input size for each tile
+ * @tile_start_offset: tile offset to input buffer
+ */
+struct vdec_av1_slice_tile_group {
+ u32 num_tiles;
+ u32 tile_size[V4L2_AV1_MAX_TILE_COUNT];
+ u32 tile_start_offset[V4L2_AV1_MAX_TILE_COUNT];
+};
+
+/**
+ * struct vdec_av1_slice_scale_factors - scale info for each ref frame
+ * @is_scaled: frame is scaled or not
+ * @x_scale: frame width scale coefficient
+ * @y_scale: frame height scale coefficient
+ * @x_step: width step for x_scale
+ * @y_step: height step for y_scale
+ */
+struct vdec_av1_slice_scale_factors {
+ u8 is_scaled;
+ int x_scale;
+ int y_scale;
+ int x_step;
+ int y_step;
+};
+
+/**
+ * struct vdec_av1_slice_frame_refs - ref frame info
+ * @ref_fb_idx: ref slot index
+ * @ref_map_idx: ref frame index
+ * @scale_factors: scale factors for each ref frame
+ */
+struct vdec_av1_slice_frame_refs {
+ int ref_fb_idx;
+ int ref_map_idx;
+ struct vdec_av1_slice_scale_factors scale_factors;
+};
+
+/**
+ * struct vdec_av1_slice_gm - AV1 Global Motion parameters
+ * @wmtype: The type of global motion transform used
+ * @wmmat: gm_params
+ * @alpha: alpha info
+ * @beta: beta info
+ * @gamma: gamma info
+ * @delta: delta info
+ * @invalid: is invalid or not
+ */
+struct vdec_av1_slice_gm {
+ int wmtype;
+ int wmmat[8];
+ short alpha;
+ short beta;
+ short gamma;
+ short delta;
+ char invalid;
+};
+
+/**
+ * struct vdec_av1_slice_sm - AV1 Skip Mode parameters
+ * @skip_mode_allowed: Skip Mode is allowed or not
+ * @skip_mode_present: specified that the skip_mode will be present or not
+ * @skip_mode_frame: specifies the frames to use for compound prediction
+ */
+struct vdec_av1_slice_sm {
+ u8 skip_mode_allowed;
+ u8 skip_mode_present;
+ int skip_mode_frame[2];
+};
+
+/**
+ * struct vdec_av1_slice_seg - AV1 Segmentation params
+ * @segmentation_enabled: this frame makes use of the segmentation tool or not
+ * @segmentation_update_map: segmentation map are updated during the decoding frame
+ * @segmentation_temporal_update:segmentation map are coded relative the existing segmentaion map
+ * @segmentation_update_data: new parameters are about to be specified for each segment
+ * @feature_data: specifies the feature data for a segment feature
+ * @feature_enabled_mask: the corresponding feature value is coded or not.
+ * @segid_preskip: segment id will be read before the skip syntax element.
+ * @last_active_segid: the highest numbered segment id that has some enabled feature
+ */
+struct vdec_av1_slice_seg {
+ u8 segmentation_enabled;
+ u8 segmentation_update_map;
+ u8 segmentation_temporal_update;
+ u8 segmentation_update_data;
+ int feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
+ u16 feature_enabled_mask[V4L2_AV1_MAX_SEGMENTS];
+ int segid_preskip;
+ int last_active_segid;
+};
+
+/**
+ * struct vdec_av1_slice_delta_q_lf - AV1 Loop Filter delta parameters
+ * @delta_q_present: specified whether quantizer index delta values are present
+ * @delta_q_res: specifies the left shift which should be applied to decoded quantizer index
+ * @delta_lf_present: specifies whether loop filter delta values are present
+ * @delta_lf_res: specifies the left shift which should be applied to decoded
+ * loop filter delta values
+ * @delta_lf_multi: specifies that separate loop filter deltas are sent for horizontal
+ * luma edges,vertical luma edges,the u edges, and the v edges.
+ */
+struct vdec_av1_slice_delta_q_lf {
+ u8 delta_q_present;
+ u8 delta_q_res;
+ u8 delta_lf_present;
+ u8 delta_lf_res;
+ u8 delta_lf_multi;
+};
+
+/**
+ * struct vdec_av1_slice_quantization - AV1 Quantization params
+ * @base_q_idx: indicates the base frame qindex. This is used for Y AC
+ * coefficients and as the base value for the other quantizers.
+ * @qindex: qindex
+ * @delta_qydc: indicates the Y DC quantizer relative to base_q_idx
+ * @delta_qudc: indicates the U DC quantizer relative to base_q_idx.
+ * @delta_quac: indicates the U AC quantizer relative to base_q_idx
+ * @delta_qvdc: indicates the V DC quantizer relative to base_q_idx
+ * @delta_qvac: indicates the V AC quantizer relative to base_q_idx
+ * @using_qmatrix: specifies that the quantizer matrix will be used to
+ * compute quantizers
+ * @qm_y: specifies the level in the quantizer matrix that should
+ * be used for luma plane decoding
+ * @qm_u: specifies the level in the quantizer matrix that should
+ * be used for chroma U plane decoding.
+ * @qm_v: specifies the level in the quantizer matrix that should be
+ * used for chroma V plane decoding
+ */
+struct vdec_av1_slice_quantization {
+ int base_q_idx;
+ int qindex[V4L2_AV1_MAX_SEGMENTS];
+ int delta_qydc;
+ int delta_qudc;
+ int delta_quac;
+ int delta_qvdc;
+ int delta_qvac;
+ u8 using_qmatrix;
+ u8 qm_y;
+ u8 qm_u;
+ u8 qm_v;
+};
+
+/**
+ * struct vdec_av1_slice_lr - AV1 Loop Restauration parameters
+ * @use_lr: whether to use loop restoration
+ * @use_chroma_lr: whether to use chroma loop restoration
+ * @frame_restoration_type: specifies the type of restoration used for each plane
+ * @loop_restoration_size: pecifies the size of loop restoration units in units
+ * of samples in the current plane
+ */
+struct vdec_av1_slice_lr {
+ u8 use_lr;
+ u8 use_chroma_lr;
+ u8 frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
+ u32 loop_restoration_size[V4L2_AV1_NUM_PLANES_MAX];
+};
+
+/**
+ * struct vdec_av1_slice_loop_filter - AV1 Loop filter parameters
+ * @loop_filter_level: an array containing loop filter strength values.
+ * @loop_filter_ref_deltas: contains the adjustment needed for the filter
+ * level based on the chosen reference frame
+ * @loop_filter_mode_deltas: contains the adjustment needed for the filter
+ * level based on the chosen mode
+ * @loop_filter_sharpness: indicates the sharpness level. The loop_filter_level
+ * and loop_filter_sharpness together determine when
+ * a block edge is filtered, and by how much the
+ * filtering can change the sample values
+ * @loop_filter_delta_enabled: filetr level depends on the mode and reference
+ * frame used to predict a block
+ */
+struct vdec_av1_slice_loop_filter {
+ u8 loop_filter_level[4];
+ int loop_filter_ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ int loop_filter_mode_deltas[2];
+ u8 loop_filter_sharpness;
+ u8 loop_filter_delta_enabled;
+};
+
+/**
+ * struct vdec_av1_slice_cdef - AV1 CDEF parameters
+ * @cdef_damping: controls the amount of damping in the deringing filter
+ * @cdef_y_strength: specifies the strength of the primary filter and secondary filter
+ * @cdef_uv_strength: specifies the strength of the primary filter and secondary filter
+ * @cdef_bits: specifies the number of bits needed to specify which
+ * CDEF filter to apply
+ */
+struct vdec_av1_slice_cdef {
+ u8 cdef_damping;
+ u8 cdef_y_strength[8];
+ u8 cdef_uv_strength[8];
+ u8 cdef_bits;
+};
+
+/**
+ * struct vdec_av1_slice_mfmv - AV1 mfmv parameters
+ * @mfmv_valid_ref: mfmv_valid_ref
+ * @mfmv_dir: mfmv_dir
+ * @mfmv_ref_to_cur: mfmv_ref_to_cur
+ * @mfmv_ref_frame_idx: mfmv_ref_frame_idx
+ * @mfmv_count: mfmv_count
+ */
+struct vdec_av1_slice_mfmv {
+ u32 mfmv_valid_ref[3];
+ u32 mfmv_dir[3];
+ int mfmv_ref_to_cur[3];
+ int mfmv_ref_frame_idx[3];
+ int mfmv_count;
+};
+
+/**
+ * struct vdec_av1_slice_tile - AV1 Tile info
+ * @tile_cols: specifies the number of tiles across the frame
+ * @tile_rows: pecifies the number of tiles down the frame
+ * @mi_col_starts: an array specifying the start column
+ * @mi_row_starts: an array specifying the start row
+ * @context_update_tile_id: specifies which tile to use for the CDF update
+ * @uniform_tile_spacing_flag: tiles are uniformly spaced across the frame
+ * or the tile sizes are coded
+ */
+struct vdec_av1_slice_tile {
+ u8 tile_cols;
+ u8 tile_rows;
+ int mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
+ int mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
+ u8 context_update_tile_id;
+ u8 uniform_tile_spacing_flag;
+};
+
+/**
+ * struct vdec_av1_slice_uncompressed_header - Represents an AV1 Frame Header OBU
+ * @use_ref_frame_mvs: use_ref_frame_mvs flag
+ * @order_hint: specifies OrderHintBits least significant bits of the expected
+ * @gm: global motion param
+ * @upscaled_width: the upscaled width
+ * @frame_width: frame's width
+ * @frame_height: frame's height
+ * @reduced_tx_set: frame is restricted to a reduced subset of the full
+ * set of transform types
+ * @tx_mode: specifies how the transform size is determined
+ * @uniform_tile_spacing_flag: tiles are uniformly spaced across the frame
+ * or the tile sizes are coded
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction
+ * @allow_warped_motion: motion_mode may be present or not
+ * @is_motion_mode_switchable : euqlt to 0 specifies that only the SIMPLE motion mode will be used
+ * @reference_mode : frame reference mode selected
+ * @allow_high_precision_mv: specifies that motion vectors are specified to
+ * quarter pel precision or to eighth pel precision
+ * @allow_intra_bc: ubducates that intra block copy may be used in this frame
+ * @force_integer_mv: specifies motion vectors will always be integers or
+ * can contain fractional bits
+ * @allow_screen_content_tools: intra blocks may use palette encoding
+ * @error_resilient_mode: error resislent mode is enable/disable
+ * @frame_type: specifies the AV1 frame type
+ * @primary_ref_frame: specifies which reference frame contains the CDF values
+ * and other state that should be loaded at the start of the frame
+ * slots will be updated with the current frame after it is decoded
+ * @disable_frame_end_update_cdf:indicates the end of frame CDF update is disable or enable
+ * @disable_cdf_update: specified whether the CDF update in the symbol
+ * decoding process should be disables
+ * @skip_mode: av1 skip mode parameters
+ * @seg: av1 segmentaon parameters
+ * @delta_q_lf: av1 delta loop fileter
+ * @quant: av1 Quantization params
+ * @lr: av1 Loop Restauration parameters
+ * @superres_denom: the denominator for the upscaling ratio
+ * @loop_filter: av1 Loop filter parameters
+ * @cdef: av1 CDEF parameters
+ * @mfmv: av1 mfmv parameters
+ * @tile: av1 Tile info
+ * @frame_is_intra: intra frame
+ * @loss_less_array: loss less array
+ * @coded_loss_less: coded lsss less
+ * @mi_rows: size of mi unit in rows
+ * @mi_cols: size of mi unit in cols
+ */
+struct vdec_av1_slice_uncompressed_header {
+ u8 use_ref_frame_mvs;
+ int order_hint;
+ struct vdec_av1_slice_gm gm[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ u32 upscaled_width;
+ u32 frame_width;
+ u32 frame_height;
+ u8 reduced_tx_set;
+ u8 tx_mode;
+ u8 uniform_tile_spacing_flag;
+ u8 interpolation_filter;
+ u8 allow_warped_motion;
+ u8 is_motion_mode_switchable;
+ u8 reference_mode;
+ u8 allow_high_precision_mv;
+ u8 allow_intra_bc;
+ u8 force_integer_mv;
+ u8 allow_screen_content_tools;
+ u8 error_resilient_mode;
+ u8 frame_type;
+ u8 primary_ref_frame;
+ u8 disable_frame_end_update_cdf;
+ u32 disable_cdf_update;
+ struct vdec_av1_slice_sm skip_mode;
+ struct vdec_av1_slice_seg seg;
+ struct vdec_av1_slice_delta_q_lf delta_q_lf;
+ struct vdec_av1_slice_quantization quant;
+ struct vdec_av1_slice_lr lr;
+ u32 superres_denom;
+ struct vdec_av1_slice_loop_filter loop_filter;
+ struct vdec_av1_slice_cdef cdef;
+ struct vdec_av1_slice_mfmv mfmv;
+ struct vdec_av1_slice_tile tile;
+ u8 frame_is_intra;
+ u8 loss_less_array[V4L2_AV1_MAX_SEGMENTS];
+ u8 coded_loss_less;
+ u32 mi_rows;
+ u32 mi_cols;
+};
+
+/**
+ * struct vdec_av1_slice_seq_header - Represents an AV1 Sequence OBU
+ * @bitdepth: the bitdepth to use for the sequence
+ * @enable_superres: specifies whether the use_superres syntax element may be present
+ * @enable_filter_intra: specifies the use_filter_intra syntax element may be present
+ * @enable_intra_edge_filter: whether the intra edge filtering process should be enabled
+ * @enable_interintra_compound: specifies the mode info fo rinter blocks may
+ * contain the syntax element interintra
+ * @enable_masked_compound: specifies the mode info fo rinter blocks may
+ * contain the syntax element compound_type
+ * @enable_dual_filter: the inter prediction filter type may be specified independently
+ * @enable_jnt_comp: distance weights process may be used for inter prediction
+ * @mono_chrome: indicates the video does not contain U and V color planes
+ * @enable_order_hint: tools based on the values of order hints may be used
+ * @order_hint_bits: the number of bits used for the order_hint field at each frame
+ * @use_128x128_superblock: indicates superblocks contain 128*128 luma samples
+ * @subsampling_x: the chroma subsamling format
+ * @subsampling_y: the chroma subsamling format
+ * @max_frame_width: the maximum frame width for the frames represented by sequence
+ * @max_frame_height: the maximum frame height for the frames represented by sequence
+ */
+struct vdec_av1_slice_seq_header {
+ u8 bitdepth;
+ u8 enable_superres;
+ u8 enable_filter_intra;
+ u8 enable_intra_edge_filter;
+ u8 enable_interintra_compound;
+ u8 enable_masked_compound;
+ u8 enable_dual_filter;
+ u8 enable_jnt_comp;
+ u8 mono_chrome;
+ u8 enable_order_hint;
+ u8 order_hint_bits;
+ u8 use_128x128_superblock;
+ u8 subsampling_x;
+ u8 subsampling_y;
+ u32 max_frame_width;
+ u32 max_frame_height;
+};
+
+/**
+ * struct vdec_av1_slice_frame - Represents current Frame info
+ * @uh: uncompressed header info
+ * @seq: sequence header info
+ * @large_scale_tile: is large scale mode
+ * @cur_ts: current frame timestamp
+ * @prev_fb_idx: prev slot id
+ * @ref_frame_sign_bias: arrays for ref_frame sign bias
+ * @order_hints: arrays for ref_frame order hint
+ * @ref_frame_valid: arrays for valid ref_frame
+ * @ref_frame_map: map to slot frame info
+ * @frame_refs: ref_frame info
+ */
+struct vdec_av1_slice_frame {
+ struct vdec_av1_slice_uncompressed_header uh;
+ struct vdec_av1_slice_seq_header seq;
+ u8 large_scale_tile;
+ u64 cur_ts;
+ int prev_fb_idx;
+ u8 ref_frame_sign_bias[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ u32 order_hints[V4L2_AV1_REFS_PER_FRAME];
+ u32 ref_frame_valid[V4L2_AV1_REFS_PER_FRAME];
+ int ref_frame_map[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ struct vdec_av1_slice_frame_refs frame_refs[V4L2_AV1_REFS_PER_FRAME];
+};
+
+/**
+ * struct vdec_av1_slice_work_buffer - work buffer for lat
+ * @mv_addr: mv buffer memory info
+ * @cdf_addr: cdf buffer memory info
+ * @segid_addr: segid buffer memory info
+ */
+struct vdec_av1_slice_work_buffer {
+ struct vdec_av1_slice_mem mv_addr;
+ struct vdec_av1_slice_mem cdf_addr;
+ struct vdec_av1_slice_mem segid_addr;
+};
+
+/**
+ * struct vdec_av1_slice_frame_info - frame info for each slot
+ * @frame_type: frame type
+ * @frame_is_intra: is intra frame
+ * @order_hint: order hint
+ * @order_hints: referece frame order hint
+ * @upscaled_width: upscale width
+ * @pic_pitch: buffer pitch
+ * @frame_width: frane width
+ * @frame_height: frame height
+ * @mi_rows: rows in mode info
+ * @mi_cols: cols in mode info
+ * @ref_count: mark to reference frame counts
+ */
+struct vdec_av1_slice_frame_info {
+ u8 frame_type;
+ u8 frame_is_intra;
+ int order_hint;
+ u32 order_hints[V4L2_AV1_REFS_PER_FRAME];
+ u32 upscaled_width;
+ u32 pic_pitch;
+ u32 frame_width;
+ u32 frame_height;
+ u32 mi_rows;
+ u32 mi_cols;
+ int ref_count;
+};
+
+/**
+ * struct vdec_av1_slice_slot - slot info that needs to be saved in the global instance
+ * @frame_info: frame info for each slot
+ * @timestamp: time stamp info
+ */
+struct vdec_av1_slice_slot {
+ struct vdec_av1_slice_frame_info frame_info[AV1_MAX_FRAME_BUF_COUNT];
+ u64 timestamp[AV1_MAX_FRAME_BUF_COUNT];
+};
+
+/**
+ * struct vdec_av1_slice_fb - frame buffer for decoding
+ * @y: current y buffer address info
+ * @c: current c buffer address info
+ */
+struct vdec_av1_slice_fb {
+ struct vdec_av1_slice_mem y;
+ struct vdec_av1_slice_mem c;
+};
+
+/**
+ * struct vdec_av1_slice_vsi - exchange frame information between Main CPU and MicroP
+ * @bs: input buffer info
+ * @work_buffer: working buffe for hw
+ * @cdf_table: cdf_table buffer
+ * @cdf_tmp: cdf temp buffer
+ * @rd_mv: mv buffer for lat output , core input
+ * @ube: ube buffer
+ * @trans: transcoded buffer
+ * @err_map: err map buffer
+ * @row_info: row info buffer
+ * @fb: current y/c buffer
+ * @ref: ref y/c buffer
+ * @iq_table: iq table buffer
+ * @tile: tile buffer
+ * @slots: slots info for each frame
+ * @slot_id: current frame slot id
+ * @frame: current frame info
+ * @state: status after decode done
+ * @cur_lst_tile_id: tile id for large scale
+ */
+struct vdec_av1_slice_vsi {
+ /* lat */
+ struct vdec_av1_slice_mem bs;
+ struct vdec_av1_slice_work_buffer work_buffer[AV1_MAX_FRAME_BUF_COUNT];
+ struct vdec_av1_slice_mem cdf_table;
+ struct vdec_av1_slice_mem cdf_tmp;
+ /* LAT stage's output, Core stage's input */
+ struct vdec_av1_slice_mem rd_mv;
+ struct vdec_av1_slice_mem ube;
+ struct vdec_av1_slice_mem trans;
+ struct vdec_av1_slice_mem err_map;
+ struct vdec_av1_slice_mem row_info;
+ /* core */
+ struct vdec_av1_slice_fb fb;
+ struct vdec_av1_slice_fb ref[V4L2_AV1_REFS_PER_FRAME];
+ struct vdec_av1_slice_mem iq_table;
+ /* lat and core share*/
+ struct vdec_av1_slice_mem tile;
+ struct vdec_av1_slice_slot slots;
+ s8 slot_id;
+ struct vdec_av1_slice_frame frame;
+ struct vdec_av1_slice_state state;
+ u32 cur_lst_tile_id;
+};
+
+/**
+ * struct vdec_av1_slice_pfc - per-frame context that contains a local vsi.
+ * pass it from lat to core
+ * @vsi: local vsi. copy to/from remote vsi before/after decoding
+ * @ref_idx: reference buffer timestamp
+ * @seq: picture sequence
+ */
+struct vdec_av1_slice_pfc {
+ struct vdec_av1_slice_vsi vsi;
+ u64 ref_idx[V4L2_AV1_REFS_PER_FRAME];
+ int seq;
+};
+
+/**
+ * struct vdec_av1_slice_instance - represent one av1 instance
+ * @ctx: pointer to codec's context
+ * @vpu: VPU instance
+ * @iq_table: iq table buffer
+ * @cdf_table: cdf table buffer
+ * @mv: mv working buffer
+ * @cdf: cdf working buffer
+ * @seg: segmentation working buffer
+ * @cdf_temp: cdf temp buffer
+ * @tile: tile buffer
+ * @slots: slots info
+ * @tile_group: tile_group entry
+ * @level: level of current resolution
+ * @width: width of last picture
+ * @height: height of last picture
+ * @frame_type: frame_type of last picture
+ * @irq_enabled: irq to Main CPU or MicroP
+ * @inneracing_mode: is inneracing mode
+ * @init_vsi: vsi used for initialized AV1 instance
+ * @vsi: vsi used for decoding/flush ...
+ * @core_vsi: vsi used for Core stage
+ * @seq: global picture sequence
+ */
+struct vdec_av1_slice_instance {
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+
+ struct mtk_vcodec_mem iq_table;
+ struct mtk_vcodec_mem cdf_table;
+
+ struct mtk_vcodec_mem mv[AV1_MAX_FRAME_BUF_COUNT];
+ struct mtk_vcodec_mem cdf[AV1_MAX_FRAME_BUF_COUNT];
+ struct mtk_vcodec_mem seg[AV1_MAX_FRAME_BUF_COUNT];
+ struct mtk_vcodec_mem cdf_temp;
+ struct mtk_vcodec_mem tile;
+ struct vdec_av1_slice_slot slots;
+ struct vdec_av1_slice_tile_group tile_group;
+
+ /* for resolution change and get_pic_info */
+ enum vdec_av1_slice_resolution_level level;
+ u32 width;
+ u32 height;
+
+ u32 frame_type;
+ u32 irq_enabled;
+ u32 inneracing_mode;
+
+ /* MicroP vsi */
+ union {
+ struct vdec_av1_slice_init_vsi *init_vsi;
+ struct vdec_av1_slice_vsi *vsi;
+ };
+ struct vdec_av1_slice_vsi *core_vsi;
+ int seq;
+};
+
+static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf);
+
+static inline int vdec_av1_slice_get_msb(u32 n)
+{
+ if (n == 0)
+ return 0;
+ return 31 ^ __builtin_clz(n);
+}
+
+static inline bool vdec_av1_slice_need_scale(u32 ref_width, u32 ref_height,
+ u32 this_width, u32 this_height)
+{
+ return ((this_width << 1) >= ref_width) &&
+ ((this_height << 1) >= ref_height) &&
+ (this_width <= (ref_width << 4)) &&
+ (this_height <= (ref_height << 4));
+}
+
+static void *vdec_av1_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
+
+ if (!ctrl)
+ return ERR_PTR(-EINVAL);
+
+ return ctrl->p_cur.p;
+}
+
+static int vdec_av1_slice_init_cdf_table(struct vdec_av1_slice_instance *instance)
+{
+ u8 *remote_cdf_table;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_av1_slice_init_vsi *vsi;
+ int ret;
+
+ ctx = instance->ctx;
+ vsi = instance->vpu.vsi;
+ remote_cdf_table = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
+ (u32)vsi->cdf_table_addr);
+ if (IS_ERR(remote_cdf_table)) {
+ mtk_vcodec_err(instance, "failed to map cdf table\n");
+ return PTR_ERR(remote_cdf_table);
+ }
+
+ mtk_vcodec_debug(instance, "map cdf table to 0x%p\n",
+ remote_cdf_table);
+
+ if (instance->cdf_table.va)
+ mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+ instance->cdf_table.size = vsi->cdf_table_size;
+
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->cdf_table);
+ if (ret)
+ return ret;
+
+ memcpy(instance->cdf_table.va, remote_cdf_table, vsi->cdf_table_size);
+
+ return 0;
+}
+
+static int vdec_av1_slice_init_iq_table(struct vdec_av1_slice_instance *instance)
+{
+ u8 *remote_iq_table;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_av1_slice_init_vsi *vsi;
+ int ret;
+
+ ctx = instance->ctx;
+ vsi = instance->vpu.vsi;
+ remote_iq_table = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
+ (u32)vsi->iq_table_addr);
+ if (IS_ERR(remote_iq_table)) {
+ mtk_vcodec_err(instance, "failed to map iq table\n");
+ return PTR_ERR(remote_iq_table);
+ }
+
+ mtk_vcodec_debug(instance, "map iq table to 0x%p\n", remote_iq_table);
+
+ if (instance->iq_table.va)
+ mtk_vcodec_mem_free(ctx, &instance->iq_table);
+ instance->iq_table.size = vsi->iq_table_size;
+
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->iq_table);
+ if (ret)
+ return ret;
+
+ memcpy(instance->iq_table.va, remote_iq_table, vsi->iq_table_size);
+
+ return 0;
+}
+
+static int vdec_av1_slice_get_new_slot(struct vdec_av1_slice_vsi *vsi)
+{
+ struct vdec_av1_slice_slot *slots = &vsi->slots;
+ int new_slot_idx = AV1_INVALID_IDX;
+ int i;
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ if (slots->frame_info[i].ref_count == 0) {
+ new_slot_idx = i;
+ break;
+ }
+ }
+
+ if (new_slot_idx != AV1_INVALID_IDX) {
+ slots->frame_info[new_slot_idx].ref_count++;
+ slots->timestamp[new_slot_idx] = vsi->frame.cur_ts;
+ }
+
+ return new_slot_idx;
+}
+
+static inline void vdec_av1_slice_clear_fb(struct vdec_av1_slice_frame_info *frame_info)
+{
+ memset((void *)frame_info, 0, sizeof(struct vdec_av1_slice_frame_info));
+}
+
+static void vdec_av1_slice_decrease_ref_count(struct vdec_av1_slice_slot *slots, int fb_idx)
+{
+ struct vdec_av1_slice_frame_info *frame_info = slots->frame_info;
+
+ frame_info[fb_idx].ref_count--;
+ if (frame_info[fb_idx].ref_count < 0) {
+ frame_info[fb_idx].ref_count = 0;
+ mtk_v4l2_err("av1_error: %s() fb_idx %d decrease ref_count error\n",
+ __func__, fb_idx);
+ }
+
+ vdec_av1_slice_clear_fb(&frame_info[fb_idx]);
+}
+
+static void vdec_av1_slice_cleanup_slots(struct vdec_av1_slice_slot *slots,
+ struct vdec_av1_slice_frame *frame,
+ struct v4l2_ctrl_av1_frame *ctrl_fh)
+{
+ int slot_id, ref_id;
+
+ for (ref_id = 0; ref_id < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref_id++)
+ frame->ref_frame_map[ref_id] = AV1_INVALID_IDX;
+
+ for (slot_id = 0; slot_id < AV1_MAX_FRAME_BUF_COUNT; slot_id++) {
+ u64 timestamp = slots->timestamp[slot_id];
+ bool ref_used = false;
+
+ /* ignored unused slots */
+ if (slots->frame_info[slot_id].ref_count == 0)
+ continue;
+
+ for (ref_id = 0; ref_id < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref_id++) {
+ if (ctrl_fh->reference_frame_ts[ref_id] == timestamp) {
+ frame->ref_frame_map[ref_id] = slot_id;
+ ref_used = true;
+ }
+ }
+
+ if (!ref_used)
+ vdec_av1_slice_decrease_ref_count(slots, slot_id);
+ }
+}
+
+static void vdec_av1_slice_setup_slot(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi,
+ struct v4l2_ctrl_av1_frame *ctrl_fh)
+{
+ struct vdec_av1_slice_frame_info *cur_frame_info;
+ struct vdec_av1_slice_uncompressed_header *uh = &vsi->frame.uh;
+ int ref_id;
+
+ memcpy(&vsi->slots, &instance->slots, sizeof(instance->slots));
+ vdec_av1_slice_cleanup_slots(&vsi->slots, &vsi->frame, ctrl_fh);
+ vsi->slot_id = vdec_av1_slice_get_new_slot(vsi);
+
+ if (vsi->slot_id == AV1_INVALID_IDX) {
+ mtk_v4l2_err("warning:av1 get invalid index slot\n");
+ vsi->slot_id = 0;
+ }
+ cur_frame_info = &vsi->slots.frame_info[vsi->slot_id];
+ cur_frame_info->frame_type = uh->frame_type;
+ cur_frame_info->frame_is_intra = ((uh->frame_type == AV1_INTRA_ONLY_FRAME) ||
+ (uh->frame_type == AV1_KEY_FRAME));
+ cur_frame_info->order_hint = uh->order_hint;
+ cur_frame_info->upscaled_width = uh->upscaled_width;
+ cur_frame_info->pic_pitch = 0;
+ cur_frame_info->frame_width = uh->frame_width;
+ cur_frame_info->frame_height = uh->frame_height;
+ cur_frame_info->mi_cols = ((uh->frame_width + 7) >> 3) << 1;
+ cur_frame_info->mi_rows = ((uh->frame_height + 7) >> 3) << 1;
+
+ /* ensure current frame is properly mapped if referenced */
+ for (ref_id = 0; ref_id < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref_id++) {
+ u64 timestamp = vsi->slots.timestamp[vsi->slot_id];
+
+ if (ctrl_fh->reference_frame_ts[ref_id] == timestamp)
+ vsi->frame.ref_frame_map[ref_id] = vsi->slot_id;
+ }
+}
+
+static int vdec_av1_slice_alloc_working_buffer(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi)
+{
+ struct mtk_vcodec_ctx *ctx = instance->ctx;
+ enum vdec_av1_slice_resolution_level level;
+ u32 max_sb_w, max_sb_h, max_w, max_h, w, h;
+ int i, ret;
+
+ w = vsi->frame.uh.frame_width;
+ h = vsi->frame.uh.frame_height;
+
+ if (w > VCODEC_DEC_4K_CODED_WIDTH || h > VCODEC_DEC_4K_CODED_HEIGHT)
+ /* 8K */
+ return -EINVAL;
+
+ if (w > MTK_VDEC_MAX_W || h > MTK_VDEC_MAX_H) {
+ /* 4K */
+ level = AV1_RES_4K;
+ max_w = VCODEC_DEC_4K_CODED_WIDTH;
+ max_h = VCODEC_DEC_4K_CODED_HEIGHT;
+ } else {
+ /* FHD */
+ level = AV1_RES_FHD;
+ max_w = MTK_VDEC_MAX_W;
+ max_h = MTK_VDEC_MAX_H;
+ }
+
+ if (level == instance->level)
+ return 0;
+
+ mtk_vcodec_debug(instance, "resolution level changed from %u to %u, %ux%u",
+ instance->level, level, w, h);
+
+ max_sb_w = DIV_ROUND_UP(max_w, 128);
+ max_sb_h = DIV_ROUND_UP(max_h, 128);
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ if (instance->mv[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+ instance->mv[i].size = max_sb_w * max_sb_h * SZ_1K;
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->mv[i]);
+ if (ret)
+ goto err;
+
+ if (instance->seg[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+ instance->seg[i].size = max_sb_w * max_sb_h * 512;
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->seg[i]);
+ if (ret)
+ goto err;
+
+ if (instance->cdf[i].va)
+ mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
+ instance->cdf[i].size = AV1_CDF_TABLE_BUFFER_SIZE;
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->cdf[i]);
+ if (ret)
+ goto err;
+ }
+
+ if (!instance->cdf_temp.va) {
+ instance->cdf_temp.size = (SZ_1K * 16 * 100);
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->cdf_temp);
+ if (ret)
+ goto err;
+ vsi->cdf_tmp.buf = instance->cdf_temp.dma_addr;
+ vsi->cdf_tmp.size = instance->cdf_temp.size;
+ }
+
+ if (instance->tile.va)
+ mtk_vcodec_mem_free(ctx, &instance->tile);
+
+ instance->tile.size = AV1_TILE_BUF_SIZE * V4L2_AV1_MAX_TILE_COUNT;
+ ret = mtk_vcodec_mem_alloc(ctx, &instance->tile);
+ if (ret)
+ goto err;
+
+ instance->level = level;
+ return 0;
+
+err:
+ instance->level = AV1_RES_NONE;
+ return ret;
+}
+
+static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *instance)
+{
+ struct mtk_vcodec_ctx *ctx = instance->ctx;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
+ mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+
+ for (i = 0; i < ARRAY_SIZE(instance->seg); i++)
+ mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+
+ for (i = 0; i < ARRAY_SIZE(instance->cdf); i++)
+ mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
+
+ mtk_vcodec_mem_free(ctx, &instance->tile);
+ mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
+ mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+ mtk_vcodec_mem_free(ctx, &instance->iq_table);
+
+ instance->level = AV1_RES_NONE;
+}
+
+static inline void vdec_av1_slice_vsi_from_remote(struct vdec_av1_slice_vsi *vsi,
+ struct vdec_av1_slice_vsi *remote_vsi)
+{
+ memcpy(&vsi->trans, &remote_vsi->trans, sizeof(vsi->trans));
+ memcpy(&vsi->state, &remote_vsi->state, sizeof(vsi->state));
+}
+
+static inline void vdec_av1_slice_vsi_to_remote(struct vdec_av1_slice_vsi *vsi,
+ struct vdec_av1_slice_vsi *remote_vsi)
+{
+ memcpy(remote_vsi, vsi, sizeof(*vsi));
+}
+
+static int vdec_av1_slice_setup_lat_from_src_buf(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+
+ src = v4l2_m2m_next_src_buf(instance->ctx->m2m_ctx);
+ if (!src)
+ return -EINVAL;
+
+ lat_buf->src_buf_req = src->vb2_buf.req_obj.req;
+ dst = &lat_buf->ts_info;
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+ vsi->frame.cur_ts = dst->vb2_buf.timestamp;
+
+ return 0;
+}
+
+static short vdec_av1_slice_resolve_divisor_32(u32 D, short *shift)
+{
+ int f;
+ int e;
+
+ *shift = vdec_av1_slice_get_msb(D);
+ /* e is obtained from D after resetting the most significant 1 bit. */
+ e = D - ((u32)1 << *shift);
+ /* Get the most significant DIV_LUT_BITS (8) bits of e into f */
+ if (*shift > DIV_LUT_BITS)
+ f = AV1_DIV_ROUND_UP_POW2(e, *shift - DIV_LUT_BITS);
+ else
+ f = e << (DIV_LUT_BITS - *shift);
+ if (f > DIV_LUT_NUM)
+ return -1;
+ *shift += DIV_LUT_PREC_BITS;
+ /* Use f as lookup into the precomputed table of multipliers */
+ return div_lut[f];
+}
+
+static void vdec_av1_slice_get_shear_params(struct vdec_av1_slice_gm *gm_params)
+{
+ const int *mat = gm_params->wmmat;
+ short shift;
+ short y;
+ long long gv, dv;
+
+ if (gm_params->wmmat[2] <= 0)
+ return;
+
+ gm_params->alpha = clamp_val(mat[2] - (1 << WARPEDMODEL_PREC_BITS), S16_MIN, S16_MAX);
+ gm_params->beta = clamp_val(mat[3], S16_MIN, S16_MAX);
+
+ y = vdec_av1_slice_resolve_divisor_32(abs(mat[2]), &shift) * (mat[2] < 0 ? -1 : 1);
+
+ gv = ((long long)mat[4] * (1 << WARPEDMODEL_PREC_BITS)) * y;
+ gm_params->gamma = clamp_val((int)AV1_DIV_ROUND_UP_POW2_SIGNED(gv, shift),
+ S16_MIN, S16_MAX);
+
+ dv = ((long long)mat[3] * mat[4]) * y;
+ gm_params->delta = clamp_val(mat[5] - (int)AV1_DIV_ROUND_UP_POW2_SIGNED(dv, shift) -
+ (1 << WARPEDMODEL_PREC_BITS), S16_MIN, S16_MAX);
+
+ gm_params->alpha = AV1_DIV_ROUND_UP_POW2_SIGNED(gm_params->alpha, WARP_PARAM_REDUCE_BITS) *
+ (1 << WARP_PARAM_REDUCE_BITS);
+ gm_params->beta = AV1_DIV_ROUND_UP_POW2_SIGNED(gm_params->beta, WARP_PARAM_REDUCE_BITS) *
+ (1 << WARP_PARAM_REDUCE_BITS);
+ gm_params->gamma = AV1_DIV_ROUND_UP_POW2_SIGNED(gm_params->gamma, WARP_PARAM_REDUCE_BITS) *
+ (1 << WARP_PARAM_REDUCE_BITS);
+ gm_params->delta = AV1_DIV_ROUND_UP_POW2_SIGNED(gm_params->delta, WARP_PARAM_REDUCE_BITS) *
+ (1 << WARP_PARAM_REDUCE_BITS);
+}
+
+static void vdec_av1_slice_setup_gm(struct vdec_av1_slice_gm *gm,
+ struct v4l2_av1_global_motion *ctrl_gm)
+{
+ u32 i, j;
+
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++) {
+ gm[i].wmtype = ctrl_gm->type[i];
+ for (j = 0; j < 6; j++)
+ gm[i].wmmat[j] = ctrl_gm->params[i][j];
+
+ gm[i].invalid = !!(ctrl_gm->invalid & BIT(i));
+ gm[i].alpha = 0;
+ gm[i].beta = 0;
+ gm[i].gamma = 0;
+ gm[i].delta = 0;
+ if (gm[i].wmtype <= V4L2_AV1_WARP_MODEL_AFFINE)
+ vdec_av1_slice_get_shear_params(&gm[i]);
+ }
+}
+
+static void vdec_av1_slice_setup_seg(struct vdec_av1_slice_seg *seg,
+ struct v4l2_av1_segmentation *ctrl_seg)
+{
+ u32 i, j;
+
+ seg->segmentation_enabled = SEGMENTATION_FLAG(ctrl_seg, ENABLED);
+ seg->segmentation_update_map = SEGMENTATION_FLAG(ctrl_seg, UPDATE_MAP);
+ seg->segmentation_temporal_update = SEGMENTATION_FLAG(ctrl_seg, TEMPORAL_UPDATE);
+ seg->segmentation_update_data = SEGMENTATION_FLAG(ctrl_seg, UPDATE_DATA);
+ seg->segid_preskip = SEGMENTATION_FLAG(ctrl_seg, SEG_ID_PRE_SKIP);
+ seg->last_active_segid = ctrl_seg->last_active_seg_id;
+
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ seg->feature_enabled_mask[i] = ctrl_seg->feature_enabled[i];
+ for (j = 0; j < V4L2_AV1_SEG_LVL_MAX; j++)
+ seg->feature_data[i][j] = ctrl_seg->feature_data[i][j];
+ }
+}
+
+static void vdec_av1_slice_setup_quant(struct vdec_av1_slice_quantization *quant,
+ struct v4l2_av1_quantization *ctrl_quant)
+{
+ quant->base_q_idx = ctrl_quant->base_q_idx;
+ quant->delta_qydc = ctrl_quant->delta_q_y_dc;
+ quant->delta_qudc = ctrl_quant->delta_q_u_dc;
+ quant->delta_quac = ctrl_quant->delta_q_u_ac;
+ quant->delta_qvdc = ctrl_quant->delta_q_v_dc;
+ quant->delta_qvac = ctrl_quant->delta_q_v_ac;
+ quant->qm_y = ctrl_quant->qm_y;
+ quant->qm_u = ctrl_quant->qm_u;
+ quant->qm_v = ctrl_quant->qm_v;
+ quant->using_qmatrix = QUANT_FLAG(ctrl_quant, USING_QMATRIX);
+}
+
+static int vdec_av1_slice_get_qindex(struct vdec_av1_slice_uncompressed_header *uh,
+ int segmentation_id)
+{
+ struct vdec_av1_slice_seg *seg = &uh->seg;
+ struct vdec_av1_slice_quantization *quant = &uh->quant;
+ int data = 0, qindex = 0;
+
+ if (seg->segmentation_enabled &&
+ (seg->feature_enabled_mask[segmentation_id] & BIT(SEG_LVL_ALT_Q))) {
+ data = seg->feature_data[segmentation_id][SEG_LVL_ALT_Q];
+ qindex = quant->base_q_idx + data;
+ return clamp_val(qindex, 0, MAXQ);
+ }
+
+ return quant->base_q_idx;
+}
+
+static void vdec_av1_slice_setup_lr(struct vdec_av1_slice_lr *lr,
+ struct v4l2_av1_loop_restoration *ctrl_lr)
+{
+ int i;
+
+ lr->use_lr = 0;
+ lr->use_chroma_lr = 0;
+ for (i = 0; i < V4L2_AV1_NUM_PLANES_MAX; i++) {
+ lr->frame_restoration_type[i] = ctrl_lr->frame_restoration_type[i];
+ lr->loop_restoration_size[i] = ctrl_lr->loop_restoration_size[i];
+ if (lr->frame_restoration_type[i]) {
+ lr->use_lr = 1;
+ if (i > 0)
+ lr->use_chroma_lr = 1;
+ }
+ }
+}
+
+static void vdec_av1_slice_setup_lf(struct vdec_av1_slice_loop_filter *lf,
+ struct v4l2_av1_loop_filter *ctrl_lf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lf->loop_filter_level); i++)
+ lf->loop_filter_level[i] = ctrl_lf->level[i];
+
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++)
+ lf->loop_filter_ref_deltas[i] = ctrl_lf->ref_deltas[i];
+
+ for (i = 0; i < ARRAY_SIZE(lf->loop_filter_mode_deltas); i++)
+ lf->loop_filter_mode_deltas[i] = ctrl_lf->mode_deltas[i];
+
+ lf->loop_filter_sharpness = ctrl_lf->sharpness;
+ lf->loop_filter_delta_enabled =
+ BIT_FLAG(ctrl_lf, V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED);
+}
+
+static void vdec_av1_slice_setup_cdef(struct vdec_av1_slice_cdef *cdef,
+ struct v4l2_av1_cdef *ctrl_cdef)
+{
+ int i;
+
+ cdef->cdef_damping = ctrl_cdef->damping_minus_3 + 3;
+ cdef->cdef_bits = ctrl_cdef->bits;
+
+ for (i = 0; i < V4L2_AV1_CDEF_MAX; i++) {
+ if (ctrl_cdef->y_sec_strength[i] == 4)
+ ctrl_cdef->y_sec_strength[i] -= 1;
+
+ if (ctrl_cdef->uv_sec_strength[i] == 4)
+ ctrl_cdef->uv_sec_strength[i] -= 1;
+
+ cdef->cdef_y_strength[i] =
+ ctrl_cdef->y_pri_strength[i] << SECONDARY_FILTER_STRENGTH_NUM_BITS |
+ ctrl_cdef->y_sec_strength[i];
+ cdef->cdef_uv_strength[i] =
+ ctrl_cdef->uv_pri_strength[i] << SECONDARY_FILTER_STRENGTH_NUM_BITS |
+ ctrl_cdef->uv_sec_strength[i];
+ }
+}
+
+static void vdec_av1_slice_setup_seq(struct vdec_av1_slice_seq_header *seq,
+ struct v4l2_ctrl_av1_sequence *ctrl_seq)
+{
+ seq->bitdepth = ctrl_seq->bit_depth;
+ seq->max_frame_width = ctrl_seq->max_frame_width_minus_1 + 1;
+ seq->max_frame_height = ctrl_seq->max_frame_height_minus_1 + 1;
+ seq->enable_superres = SEQUENCE_FLAG(ctrl_seq, ENABLE_SUPERRES);
+ seq->enable_filter_intra = SEQUENCE_FLAG(ctrl_seq, ENABLE_FILTER_INTRA);
+ seq->enable_intra_edge_filter = SEQUENCE_FLAG(ctrl_seq, ENABLE_INTRA_EDGE_FILTER);
+ seq->enable_interintra_compound = SEQUENCE_FLAG(ctrl_seq, ENABLE_INTERINTRA_COMPOUND);
+ seq->enable_masked_compound = SEQUENCE_FLAG(ctrl_seq, ENABLE_MASKED_COMPOUND);
+ seq->enable_dual_filter = SEQUENCE_FLAG(ctrl_seq, ENABLE_DUAL_FILTER);
+ seq->enable_jnt_comp = SEQUENCE_FLAG(ctrl_seq, ENABLE_JNT_COMP);
+ seq->mono_chrome = SEQUENCE_FLAG(ctrl_seq, MONO_CHROME);
+ seq->enable_order_hint = SEQUENCE_FLAG(ctrl_seq, ENABLE_ORDER_HINT);
+ seq->order_hint_bits = ctrl_seq->order_hint_bits;
+ seq->use_128x128_superblock = SEQUENCE_FLAG(ctrl_seq, USE_128X128_SUPERBLOCK);
+ seq->subsampling_x = SEQUENCE_FLAG(ctrl_seq, SUBSAMPLING_X);
+ seq->subsampling_y = SEQUENCE_FLAG(ctrl_seq, SUBSAMPLING_Y);
+}
+
+static void vdec_av1_slice_setup_tile(struct vdec_av1_slice_frame *frame,
+ struct v4l2_av1_tile_info *ctrl_tile)
+{
+ struct vdec_av1_slice_seq_header *seq = &frame->seq;
+ struct vdec_av1_slice_tile *tile = &frame->uh.tile;
+ u32 mib_size_log2 = seq->use_128x128_superblock ? 5 : 4;
+ int i;
+
+ tile->tile_cols = ctrl_tile->tile_cols;
+ tile->tile_rows = ctrl_tile->tile_rows;
+ tile->context_update_tile_id = ctrl_tile->context_update_tile_id;
+ tile->uniform_tile_spacing_flag =
+ BIT_FLAG(ctrl_tile, V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING);
+
+ for (i = 0; i < tile->tile_cols + 1; i++)
+ tile->mi_col_starts[i] =
+ ALIGN(ctrl_tile->mi_col_starts[i], BIT(mib_size_log2)) >> mib_size_log2;
+
+ for (i = 0; i < tile->tile_rows + 1; i++)
+ tile->mi_row_starts[i] =
+ ALIGN(ctrl_tile->mi_row_starts[i], BIT(mib_size_log2)) >> mib_size_log2;
+}
+
+static void vdec_av1_slice_setup_uh(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_frame *frame,
+ struct v4l2_ctrl_av1_frame *ctrl_fh)
+{
+ struct vdec_av1_slice_uncompressed_header *uh = &frame->uh;
+ int i;
+
+ uh->use_ref_frame_mvs = FH_FLAG(ctrl_fh, USE_REF_FRAME_MVS);
+ uh->order_hint = ctrl_fh->order_hint;
+ vdec_av1_slice_setup_gm(uh->gm, &ctrl_fh->global_motion);
+ uh->upscaled_width = ctrl_fh->upscaled_width;
+ uh->frame_width = ctrl_fh->frame_width_minus_1 + 1;
+ uh->frame_height = ctrl_fh->frame_height_minus_1 + 1;
+ uh->mi_cols = ((uh->frame_width + 7) >> 3) << 1;
+ uh->mi_rows = ((uh->frame_height + 7) >> 3) << 1;
+ uh->reduced_tx_set = FH_FLAG(ctrl_fh, REDUCED_TX_SET);
+ uh->tx_mode = ctrl_fh->tx_mode;
+ uh->uniform_tile_spacing_flag =
+ BIT_FLAG(&ctrl_fh->tile_info, V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING);
+ uh->interpolation_filter = ctrl_fh->interpolation_filter;
+ uh->allow_warped_motion = FH_FLAG(ctrl_fh, ALLOW_WARPED_MOTION);
+ uh->is_motion_mode_switchable = FH_FLAG(ctrl_fh, IS_MOTION_MODE_SWITCHABLE);
+ uh->frame_type = ctrl_fh->frame_type;
+ uh->frame_is_intra = (uh->frame_type == V4L2_AV1_INTRA_ONLY_FRAME ||
+ uh->frame_type == V4L2_AV1_KEY_FRAME);
+
+ if (!uh->frame_is_intra && FH_FLAG(ctrl_fh, REFERENCE_SELECT))
+ uh->reference_mode = AV1_REFERENCE_MODE_SELECT;
+ else
+ uh->reference_mode = AV1_SINGLE_REFERENCE;
+
+ uh->allow_high_precision_mv = FH_FLAG(ctrl_fh, ALLOW_HIGH_PRECISION_MV);
+ uh->allow_intra_bc = FH_FLAG(ctrl_fh, ALLOW_INTRABC);
+ uh->force_integer_mv = FH_FLAG(ctrl_fh, FORCE_INTEGER_MV);
+ uh->allow_screen_content_tools = FH_FLAG(ctrl_fh, ALLOW_SCREEN_CONTENT_TOOLS);
+ uh->error_resilient_mode = FH_FLAG(ctrl_fh, ERROR_RESILIENT_MODE);
+ uh->primary_ref_frame = ctrl_fh->primary_ref_frame;
+ uh->disable_frame_end_update_cdf =
+ FH_FLAG(ctrl_fh, DISABLE_FRAME_END_UPDATE_CDF);
+ uh->disable_cdf_update = FH_FLAG(ctrl_fh, DISABLE_CDF_UPDATE);
+ uh->skip_mode.skip_mode_present = FH_FLAG(ctrl_fh, SKIP_MODE_PRESENT);
+ uh->skip_mode.skip_mode_frame[0] =
+ ctrl_fh->skip_mode_frame[0] - V4L2_AV1_REF_LAST_FRAME;
+ uh->skip_mode.skip_mode_frame[1] =
+ ctrl_fh->skip_mode_frame[1] - V4L2_AV1_REF_LAST_FRAME;
+ uh->skip_mode.skip_mode_allowed = ctrl_fh->skip_mode_frame[0] ? 1 : 0;
+
+ vdec_av1_slice_setup_seg(&uh->seg, &ctrl_fh->segmentation);
+ uh->delta_q_lf.delta_q_present = QUANT_FLAG(&ctrl_fh->quantization, DELTA_Q_PRESENT);
+ uh->delta_q_lf.delta_q_res = 1 << ctrl_fh->quantization.delta_q_res;
+ uh->delta_q_lf.delta_lf_present =
+ BIT_FLAG(&ctrl_fh->loop_filter, V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT);
+ uh->delta_q_lf.delta_lf_res = ctrl_fh->loop_filter.delta_lf_res;
+ uh->delta_q_lf.delta_lf_multi =
+ BIT_FLAG(&ctrl_fh->loop_filter, V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI);
+ vdec_av1_slice_setup_quant(&uh->quant, &ctrl_fh->quantization);
+
+ uh->coded_loss_less = 1;
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ uh->quant.qindex[i] = vdec_av1_slice_get_qindex(uh, i);
+ uh->loss_less_array[i] =
+ (uh->quant.qindex[i] == 0 && uh->quant.delta_qydc == 0 &&
+ uh->quant.delta_quac == 0 && uh->quant.delta_qudc == 0 &&
+ uh->quant.delta_qvac == 0 && uh->quant.delta_qvdc == 0);
+
+ if (!uh->loss_less_array[i])
+ uh->coded_loss_less = 0;
+ }
+
+ vdec_av1_slice_setup_lr(&uh->lr, &ctrl_fh->loop_restoration);
+ uh->superres_denom = ctrl_fh->superres_denom;
+ vdec_av1_slice_setup_lf(&uh->loop_filter, &ctrl_fh->loop_filter);
+ vdec_av1_slice_setup_cdef(&uh->cdef, &ctrl_fh->cdef);
+ vdec_av1_slice_setup_tile(frame, &ctrl_fh->tile_info);
+}
+
+static int vdec_av1_slice_setup_tile_group(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi)
+{
+ struct v4l2_ctrl_av1_tile_group_entry *ctrl_tge;
+ struct vdec_av1_slice_tile_group *tile_group = &instance->tile_group;
+ struct vdec_av1_slice_uncompressed_header *uh = &vsi->frame.uh;
+ struct vdec_av1_slice_tile *tile = &uh->tile;
+ struct v4l2_ctrl *ctrl;
+ u32 tge_size;
+ int i;
+
+ ctrl = v4l2_ctrl_find(&instance->ctx->ctrl_hdl, V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY);
+ if (!ctrl)
+ return -EINVAL;
+
+ tge_size = ctrl->elems;
+ ctrl_tge = (struct v4l2_ctrl_av1_tile_group_entry *)ctrl->p_cur.p;
+
+ tile_group->num_tiles = tile->tile_cols * tile->tile_rows;
+
+ if (tile_group->num_tiles != tge_size ||
+ tile_group->num_tiles > V4L2_AV1_MAX_TILE_COUNT) {
+ mtk_vcodec_err(instance, "invalid tge_size %d, tile_num:%d\n",
+ tge_size, tile_group->num_tiles);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tge_size; i++) {
+ if (i != ctrl_tge[i].tile_row * vsi->frame.uh.tile.tile_cols +
+ ctrl_tge[i].tile_col) {
+ mtk_vcodec_err(instance, "invalid tge info %d, %d %d %d\n",
+ i, ctrl_tge[i].tile_row, ctrl_tge[i].tile_col,
+ vsi->frame.uh.tile.tile_rows);
+ return -EINVAL;
+ }
+ tile_group->tile_size[i] = ctrl_tge[i].tile_size;
+ tile_group->tile_start_offset[i] = ctrl_tge[i].tile_offset;
+ }
+
+ return 0;
+}
+
+static inline void vdec_av1_slice_setup_state(struct vdec_av1_slice_vsi *vsi)
+{
+ memset(&vsi->state, 0, sizeof(vsi->state));
+}
+
+static void vdec_av1_slice_setup_scale_factors(struct vdec_av1_slice_frame_refs *frame_ref,
+ struct vdec_av1_slice_frame_info *ref_frame_info,
+ struct vdec_av1_slice_uncompressed_header *uh)
+{
+ struct vdec_av1_slice_scale_factors *scale_factors = &frame_ref->scale_factors;
+ u32 ref_upscaled_width = ref_frame_info->upscaled_width;
+ u32 ref_frame_height = ref_frame_info->frame_height;
+ u32 frame_width = uh->frame_width;
+ u32 frame_height = uh->frame_height;
+
+ if (!vdec_av1_slice_need_scale(ref_upscaled_width, ref_frame_height,
+ frame_width, frame_height)) {
+ scale_factors->x_scale = -1;
+ scale_factors->y_scale = -1;
+ scale_factors->is_scaled = 0;
+ return;
+ }
+
+ scale_factors->x_scale =
+ ((ref_upscaled_width << AV1_REF_SCALE_SHIFT) + (frame_width >> 1)) / frame_width;
+ scale_factors->y_scale =
+ ((ref_frame_height << AV1_REF_SCALE_SHIFT) + (frame_height >> 1)) / frame_height;
+ scale_factors->is_scaled =
+ (scale_factors->x_scale != AV1_REF_INVALID_SCALE) &&
+ (scale_factors->y_scale != AV1_REF_INVALID_SCALE) &&
+ (scale_factors->x_scale != AV1_REF_NO_SCALE ||
+ scale_factors->y_scale != AV1_REF_NO_SCALE);
+ scale_factors->x_step =
+ AV1_DIV_ROUND_UP_POW2(scale_factors->x_scale,
+ AV1_REF_SCALE_SHIFT - AV1_SCALE_SUBPEL_BITS);
+ scale_factors->y_step =
+ AV1_DIV_ROUND_UP_POW2(scale_factors->y_scale,
+ AV1_REF_SCALE_SHIFT - AV1_SCALE_SUBPEL_BITS);
+}
+
+static unsigned char vdec_av1_slice_get_sign_bias(int a,
+ int b,
+ u8 enable_order_hint,
+ u8 order_hint_bits)
+{
+ int diff = 0;
+ int m = 0;
+ unsigned char result = 0;
+
+ if (!enable_order_hint)
+ return 0;
+
+ diff = a - b;
+ m = 1 << (order_hint_bits - 1);
+ diff = (diff & (m - 1)) - (diff & m);
+
+ if (diff > 0)
+ result = 1;
+
+ return result;
+}
+
+static void vdec_av1_slice_setup_ref(struct vdec_av1_slice_pfc *pfc,
+ struct v4l2_ctrl_av1_frame *ctrl_fh)
+{
+ struct vdec_av1_slice_vsi *vsi = &pfc->vsi;
+ struct vdec_av1_slice_frame *frame = &vsi->frame;
+ struct vdec_av1_slice_slot *slots = &vsi->slots;
+ struct vdec_av1_slice_uncompressed_header *uh = &frame->uh;
+ struct vdec_av1_slice_seq_header *seq = &frame->seq;
+ struct vdec_av1_slice_frame_info *cur_frame_info =
+ &slots->frame_info[vsi->slot_id];
+ struct vdec_av1_slice_frame_info *frame_info;
+ int i, slot_id;
+
+ if (uh->frame_is_intra)
+ return;
+
+ for (i = 0; i < V4L2_AV1_REFS_PER_FRAME; i++) {
+ int ref_idx = ctrl_fh->ref_frame_idx[i];
+
+ pfc->ref_idx[i] = ctrl_fh->reference_frame_ts[ref_idx];
+ slot_id = frame->ref_frame_map[ref_idx];
+ frame_info = &slots->frame_info[slot_id];
+ if (slot_id == AV1_INVALID_IDX) {
+ mtk_v4l2_err("cannot match reference[%d] 0x%llx\n", i,
+ ctrl_fh->reference_frame_ts[ref_idx]);
+ frame->order_hints[i] = 0;
+ frame->ref_frame_valid[i] = 0;
+ continue;
+ }
+
+ frame->frame_refs[i].ref_fb_idx = slot_id;
+ vdec_av1_slice_setup_scale_factors(&frame->frame_refs[i],
+ frame_info, uh);
+ if (!seq->enable_order_hint)
+ frame->ref_frame_sign_bias[i + 1] = 0;
+ else
+ frame->ref_frame_sign_bias[i + 1] =
+ vdec_av1_slice_get_sign_bias(frame_info->order_hint,
+ uh->order_hint,
+ seq->enable_order_hint,
+ seq->order_hint_bits);
+
+ frame->order_hints[i] = ctrl_fh->order_hints[i + 1];
+ cur_frame_info->order_hints[i] = frame->order_hints[i];
+ frame->ref_frame_valid[i] = 1;
+ }
+}
+
+static void vdec_av1_slice_get_previous(struct vdec_av1_slice_vsi *vsi)
+{
+ struct vdec_av1_slice_frame *frame = &vsi->frame;
+
+ if (frame->uh.primary_ref_frame == AV1_PRIMARY_REF_NONE)
+ frame->prev_fb_idx = AV1_INVALID_IDX;
+ else
+ frame->prev_fb_idx = frame->frame_refs[frame->uh.primary_ref_frame].ref_fb_idx;
+}
+
+static inline void vdec_av1_slice_setup_operating_mode(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_frame *frame)
+{
+ frame->large_scale_tile = 0;
+}
+
+static int vdec_av1_slice_setup_pfc(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_pfc *pfc)
+{
+ struct v4l2_ctrl_av1_frame *ctrl_fh;
+ struct v4l2_ctrl_av1_sequence *ctrl_seq;
+ struct vdec_av1_slice_vsi *vsi = &pfc->vsi;
+ int ret = 0;
+
+ /* frame header */
+ ctrl_fh = (struct v4l2_ctrl_av1_frame *)
+ vdec_av1_get_ctrl_ptr(instance->ctx,
+ V4L2_CID_STATELESS_AV1_FRAME);
+ if (IS_ERR(ctrl_fh))
+ return PTR_ERR(ctrl_fh);
+
+ ctrl_seq = (struct v4l2_ctrl_av1_sequence *)
+ vdec_av1_get_ctrl_ptr(instance->ctx,
+ V4L2_CID_STATELESS_AV1_SEQUENCE);
+ if (IS_ERR(ctrl_seq))
+ return PTR_ERR(ctrl_seq);
+
+ /* setup vsi information */
+ vdec_av1_slice_setup_seq(&vsi->frame.seq, ctrl_seq);
+ vdec_av1_slice_setup_uh(instance, &vsi->frame, ctrl_fh);
+ vdec_av1_slice_setup_operating_mode(instance, &vsi->frame);
+
+ vdec_av1_slice_setup_state(vsi);
+ vdec_av1_slice_setup_slot(instance, vsi, ctrl_fh);
+ vdec_av1_slice_setup_ref(pfc, ctrl_fh);
+ vdec_av1_slice_get_previous(vsi);
+
+ pfc->seq = instance->seq;
+ instance->seq++;
+
+ return ret;
+}
+
+static void vdec_av1_slice_setup_lat_buffer(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct vdec_av1_slice_work_buffer *work_buffer;
+ int i;
+
+ vsi->bs.dma_addr = bs->dma_addr;
+ vsi->bs.size = bs->size;
+
+ vsi->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ vsi->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
+ vsi->trans.dma_addr = lat_buf->ctx->msg_queue.wdma_wptr_addr;
+ /* used to store trans end */
+ vsi->trans.dma_addr_end = lat_buf->ctx->msg_queue.wdma_rptr_addr;
+ vsi->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ vsi->err_map.size = lat_buf->wdma_err_addr.size;
+ vsi->rd_mv.dma_addr = lat_buf->rd_mv_addr.dma_addr;
+ vsi->rd_mv.size = lat_buf->rd_mv_addr.size;
+
+ vsi->row_info.buf = 0;
+ vsi->row_info.size = 0;
+
+ work_buffer = vsi->work_buffer;
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ work_buffer[i].mv_addr.buf = instance->mv[i].dma_addr;
+ work_buffer[i].mv_addr.size = instance->mv[i].size;
+ work_buffer[i].segid_addr.buf = instance->seg[i].dma_addr;
+ work_buffer[i].segid_addr.size = instance->seg[i].size;
+ work_buffer[i].cdf_addr.buf = instance->cdf[i].dma_addr;
+ work_buffer[i].cdf_addr.size = instance->cdf[i].size;
+ }
+
+ vsi->cdf_tmp.buf = instance->cdf_temp.dma_addr;
+ vsi->cdf_tmp.size = instance->cdf_temp.size;
+
+ vsi->tile.buf = instance->tile.dma_addr;
+ vsi->tile.size = instance->tile.size;
+ memcpy(lat_buf->tile_addr.va, instance->tile.va, 64 * instance->tile_group.num_tiles);
+
+ vsi->cdf_table.buf = instance->cdf_table.dma_addr;
+ vsi->cdf_table.size = instance->cdf_table.size;
+ vsi->iq_table.buf = instance->iq_table.dma_addr;
+ vsi->iq_table.size = instance->iq_table.size;
+}
+
+static void vdec_av1_slice_setup_seg_buffer(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi)
+{
+ struct vdec_av1_slice_uncompressed_header *uh = &vsi->frame.uh;
+ struct mtk_vcodec_mem *buf;
+
+ /* reset segment buffer */
+ if (uh->primary_ref_frame == AV1_PRIMARY_REF_NONE || !uh->seg.segmentation_enabled) {
+ mtk_vcodec_debug(instance, "reset seg %d\n", vsi->slot_id);
+ if (vsi->slot_id != AV1_INVALID_IDX) {
+ buf = &instance->seg[vsi->slot_id];
+ memset(buf->va, 0, buf->size);
+ }
+ }
+}
+
+static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_vsi *vsi,
+ struct mtk_vcodec_mem *bs)
+{
+ struct vdec_av1_slice_tile_group *tile_group = &instance->tile_group;
+ struct vdec_av1_slice_uncompressed_header *uh = &vsi->frame.uh;
+ struct vdec_av1_slice_tile *tile = &uh->tile;
+ u32 tile_num, tile_row, tile_col;
+ u32 allow_update_cdf = 0;
+ u32 sb_boundary_x_m1 = 0, sb_boundary_y_m1 = 0;
+ int tile_info_base;
+ u32 tile_buf_pa;
+ u32 *tile_info_buf = instance->tile.va;
+ u32 pa = (u32)bs->dma_addr;
+
+ if (uh->disable_cdf_update == 0)
+ allow_update_cdf = 1;
+
+ for (tile_num = 0; tile_num < tile_group->num_tiles; tile_num++) {
+ /* each uint32 takes place of 4 bytes */
+ tile_info_base = (AV1_TILE_BUF_SIZE * tile_num) >> 2;
+ tile_row = tile_num / tile->tile_cols;
+ tile_col = tile_num % tile->tile_cols;
+ tile_info_buf[tile_info_base + 0] = (tile_group->tile_size[tile_num] << 3);
+ tile_buf_pa = pa + tile_group->tile_start_offset[tile_num];
+
+ tile_info_buf[tile_info_base + 1] = (tile_buf_pa >> 4) << 4;
+ tile_info_buf[tile_info_base + 2] = (tile_buf_pa % 16) << 3;
+
+ sb_boundary_x_m1 =
+ (tile->mi_col_starts[tile_col + 1] - tile->mi_col_starts[tile_col] - 1) &
+ 0x3f;
+ sb_boundary_y_m1 =
+ (tile->mi_row_starts[tile_row + 1] - tile->mi_row_starts[tile_row] - 1) &
+ 0x1ff;
+
+ tile_info_buf[tile_info_base + 3] = (sb_boundary_y_m1 << 7) | sb_boundary_x_m1;
+ tile_info_buf[tile_info_base + 4] = ((allow_update_cdf << 18) | (1 << 16));
+
+ if (tile_num == tile->context_update_tile_id &&
+ uh->disable_frame_end_update_cdf == 0)
+ tile_info_buf[tile_info_base + 4] |= (1 << 17);
+
+ mtk_vcodec_debug(instance, "// tile buf %d pos(%dx%d) offset 0x%x\n",
+ tile_num, tile_row, tile_col, tile_info_base);
+ mtk_vcodec_debug(instance, "// %08x %08x %08x %08x\n",
+ tile_info_buf[tile_info_base + 0],
+ tile_info_buf[tile_info_base + 1],
+ tile_info_buf[tile_info_base + 2],
+ tile_info_buf[tile_info_base + 3]);
+ mtk_vcodec_debug(instance, "// %08x %08x %08x %08x\n",
+ tile_info_buf[tile_info_base + 4],
+ tile_info_buf[tile_info_base + 5],
+ tile_info_buf[tile_info_base + 6],
+ tile_info_buf[tile_info_base + 7]);
+ }
+}
+
+static int vdec_av1_slice_setup_lat(struct vdec_av1_slice_instance *instance,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_lat_buf *lat_buf,
+ struct vdec_av1_slice_pfc *pfc)
+{
+ struct vdec_av1_slice_vsi *vsi = &pfc->vsi;
+ int ret;
+
+ ret = vdec_av1_slice_setup_lat_from_src_buf(instance, vsi, lat_buf);
+ if (ret)
+ return ret;
+
+ ret = vdec_av1_slice_setup_pfc(instance, pfc);
+ if (ret)
+ return ret;
+
+ ret = vdec_av1_slice_setup_tile_group(instance, vsi);
+ if (ret)
+ return ret;
+
+ ret = vdec_av1_slice_alloc_working_buffer(instance, vsi);
+ if (ret)
+ return ret;
+
+ vdec_av1_slice_setup_seg_buffer(instance, vsi);
+ vdec_av1_slice_setup_tile_buffer(instance, vsi, bs);
+ vdec_av1_slice_setup_lat_buffer(instance, vsi, bs, lat_buf);
+
+ return 0;
+}
+
+static int vdec_av1_slice_update_lat(struct vdec_av1_slice_instance *instance,
+ struct vdec_lat_buf *lat_buf,
+ struct vdec_av1_slice_pfc *pfc)
+{
+ struct vdec_av1_slice_vsi *vsi;
+
+ vsi = &pfc->vsi;
+ mtk_vcodec_debug(instance, "frame %u LAT CRC 0x%08x, output size is %d\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.out_size);
+
+ /* buffer full, need to re-decode */
+ if (vsi->state.full) {
+ /* buffer not enough */
+ if (vsi->trans.dma_addr_end - vsi->trans.dma_addr == vsi->ube.size)
+ return -ENOMEM;
+ return -EAGAIN;
+ }
+
+ instance->width = vsi->frame.uh.upscaled_width;
+ instance->height = vsi->frame.uh.frame_height;
+ instance->frame_type = vsi->frame.uh.frame_type;
+
+ return 0;
+}
+
+static int vdec_av1_slice_setup_core_to_dst_buf(struct vdec_av1_slice_instance *instance,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct vb2_v4l2_buffer *dst;
+
+ dst = v4l2_m2m_next_dst_buf(instance->ctx->m2m_ctx);
+ if (!dst)
+ return -EINVAL;
+
+ v4l2_m2m_buf_copy_metadata(&lat_buf->ts_info, dst, true);
+
+ return 0;
+}
+
+static int vdec_av1_slice_setup_core_buffer(struct vdec_av1_slice_instance *instance,
+ struct vdec_av1_slice_pfc *pfc,
+ struct vdec_av1_slice_vsi *vsi,
+ struct vdec_fb *fb,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct vb2_buffer *vb;
+ struct vb2_queue *vq;
+ int w, h, plane, size;
+ int i;
+
+ plane = instance->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
+ w = vsi->frame.uh.upscaled_width;
+ h = vsi->frame.uh.frame_height;
+ size = ALIGN(w, VCODEC_DEC_ALIGNED_64) * ALIGN(h, VCODEC_DEC_ALIGNED_64);
+
+ /* frame buffer */
+ vsi->fb.y.dma_addr = fb->base_y.dma_addr;
+ if (plane == 1)
+ vsi->fb.c.dma_addr = fb->base_y.dma_addr + size;
+ else
+ vsi->fb.c.dma_addr = fb->base_c.dma_addr;
+
+ /* reference buffers */
+ vq = v4l2_m2m_get_vq(instance->ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!vq)
+ return -EINVAL;
+
+ /* get current output buffer */
+ vb = &v4l2_m2m_next_dst_buf(instance->ctx->m2m_ctx)->vb2_buf;
+ if (!vb)
+ return -EINVAL;
+
+ /* get buffer address from vb2buf */
+ for (i = 0; i < V4L2_AV1_REFS_PER_FRAME; i++) {
+ struct vdec_av1_slice_fb *vref = &vsi->ref[i];
+
+ vb = vb2_find_buffer(vq, pfc->ref_idx[i]);
+ if (!vb) {
+ memset(vref, 0, sizeof(*vref));
+ continue;
+ }
+
+ vref->y.dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (plane == 1)
+ vref->c.dma_addr = vref->y.dma_addr + size;
+ else
+ vref->c.dma_addr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ }
+ vsi->tile.dma_addr = lat_buf->tile_addr.dma_addr;
+ vsi->tile.size = lat_buf->tile_addr.size;
+
+ return 0;
+}
+
+static int vdec_av1_slice_setup_core(struct vdec_av1_slice_instance *instance,
+ struct vdec_fb *fb,
+ struct vdec_lat_buf *lat_buf,
+ struct vdec_av1_slice_pfc *pfc)
+{
+ struct vdec_av1_slice_vsi *vsi = &pfc->vsi;
+ int ret;
+
+ ret = vdec_av1_slice_setup_core_to_dst_buf(instance, lat_buf);
+ if (ret)
+ return ret;
+
+ ret = vdec_av1_slice_setup_core_buffer(instance, pfc, vsi, fb, lat_buf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int vdec_av1_slice_update_core(struct vdec_av1_slice_instance *instance,
+ struct vdec_lat_buf *lat_buf,
+ struct vdec_av1_slice_pfc *pfc)
+{
+ struct vdec_av1_slice_vsi *vsi = instance->core_vsi;
+
+ mtk_vcodec_debug(instance, "frame %u Y_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.crc[1],
+ vsi->state.crc[2], vsi->state.crc[3]);
+ mtk_vcodec_debug(instance, "frame %u C_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[8], vsi->state.crc[9],
+ vsi->state.crc[10], vsi->state.crc[11]);
+
+ return 0;
+}
+
+static int vdec_av1_slice_init(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_av1_slice_instance *instance;
+ struct vdec_av1_slice_init_vsi *vsi;
+ int ret;
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+
+ instance->ctx = ctx;
+ instance->vpu.id = SCP_IPI_VDEC_LAT;
+ instance->vpu.core_id = SCP_IPI_VDEC_CORE;
+ instance->vpu.ctx = ctx;
+ instance->vpu.codec_type = ctx->current_codec;
+
+ ret = vpu_dec_init(&instance->vpu);
+ if (ret) {
+ mtk_vcodec_err(instance, "failed to init vpu dec, ret %d\n", ret);
+ goto error_vpu_init;
+ }
+
+ /* init vsi and global flags */
+ vsi = instance->vpu.vsi;
+ if (!vsi) {
+ mtk_vcodec_err(instance, "failed to get AV1 vsi\n");
+ ret = -EINVAL;
+ goto error_vsi;
+ }
+ instance->init_vsi = vsi;
+ instance->core_vsi = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler, (u32)vsi->core_vsi);
+
+ if (!instance->core_vsi) {
+ mtk_vcodec_err(instance, "failed to get AV1 core vsi\n");
+ ret = -EINVAL;
+ goto error_vsi;
+ }
+
+ if (vsi->vsi_size != sizeof(struct vdec_av1_slice_vsi))
+ mtk_vcodec_err(instance, "remote vsi size 0x%x mismatch! expected: 0x%zx\n",
+ vsi->vsi_size, sizeof(struct vdec_av1_slice_vsi));
+
+ instance->irq_enabled = 1;
+ instance->inneracing_mode = IS_VDEC_INNER_RACING(instance->ctx->dev->dec_capability);
+
+ mtk_vcodec_debug(instance, "vsi 0x%p core_vsi 0x%llx 0x%p, inneracing_mode %d\n",
+ vsi, vsi->core_vsi, instance->core_vsi, instance->inneracing_mode);
+
+ ret = vdec_av1_slice_init_cdf_table(instance);
+ if (ret)
+ goto error_vsi;
+
+ ret = vdec_av1_slice_init_iq_table(instance);
+ if (ret)
+ goto error_vsi;
+
+ ctx->drv_handle = instance;
+
+ return 0;
+error_vsi:
+ vpu_dec_deinit(&instance->vpu);
+error_vpu_init:
+ kfree(instance);
+
+ return ret;
+}
+
+static void vdec_av1_slice_deinit(void *h_vdec)
+{
+ struct vdec_av1_slice_instance *instance = h_vdec;
+
+ if (!instance)
+ return;
+ mtk_vcodec_debug(instance, "h_vdec 0x%p\n", h_vdec);
+ vpu_dec_deinit(&instance->vpu);
+ vdec_av1_slice_free_working_buffer(instance);
+ vdec_msg_queue_deinit(&instance->ctx->msg_queue, instance->ctx);
+ kfree(instance);
+}
+
+static int vdec_av1_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_av1_slice_instance *instance = h_vdec;
+ int i;
+
+ mtk_vcodec_debug(instance, "flush ...\n");
+
+ vdec_msg_queue_wait_lat_buf_full(&instance->ctx->msg_queue);
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++)
+ vdec_av1_slice_clear_fb(&instance->slots.frame_info[i]);
+
+ return vpu_dec_reset(&instance->vpu);
+}
+
+static void vdec_av1_slice_get_pic_info(struct vdec_av1_slice_instance *instance)
+{
+ struct mtk_vcodec_ctx *ctx = instance->ctx;
+ u32 data[3];
+
+ mtk_vcodec_debug(instance, "w %u h %u\n", ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+
+ data[0] = ctx->picinfo.pic_w;
+ data[1] = ctx->picinfo.pic_h;
+ data[2] = ctx->capture_fourcc;
+ vpu_dec_get_param(&instance->vpu, data, 3, GET_PARAM_PIC_INFO);
+
+ ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, VCODEC_DEC_ALIGNED_64);
+ ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, VCODEC_DEC_ALIGNED_64);
+ ctx->picinfo.fb_sz[0] = instance->vpu.fb_sz[0];
+ ctx->picinfo.fb_sz[1] = instance->vpu.fb_sz[1];
+}
+
+static inline void vdec_av1_slice_get_dpb_size(struct vdec_av1_slice_instance *instance,
+ u32 *dpb_sz)
+{
+ /* refer av1 specification */
+ *dpb_sz = V4L2_AV1_TOTAL_REFS_PER_FRAME + 1;
+}
+
+static void vdec_av1_slice_get_crop_info(struct vdec_av1_slice_instance *instance,
+ struct v4l2_rect *cr)
+{
+ struct mtk_vcodec_ctx *ctx = instance->ctx;
+
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = ctx->picinfo.pic_w;
+ cr->height = ctx->picinfo.pic_h;
+
+ mtk_vcodec_debug(instance, "l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_av1_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
+{
+ struct vdec_av1_slice_instance *instance = h_vdec;
+
+ switch (type) {
+ case GET_PARAM_PIC_INFO:
+ vdec_av1_slice_get_pic_info(instance);
+ break;
+ case GET_PARAM_DPB_SIZE:
+ vdec_av1_slice_get_dpb_size(instance, out);
+ break;
+ case GET_PARAM_CROP_INFO:
+ vdec_av1_slice_get_crop_info(instance, out);
+ break;
+ default:
+ mtk_vcodec_err(instance, "invalid get parameter type=%d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_av1_slice_instance *instance = h_vdec;
+ struct vdec_lat_buf *lat_buf;
+ struct vdec_av1_slice_pfc *pfc;
+ struct vdec_av1_slice_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+ int ret;
+
+ if (!instance || !instance->ctx)
+ return -EINVAL;
+
+ ctx = instance->ctx;
+ /* init msgQ for the first time */
+ if (vdec_msg_queue_init(&ctx->msg_queue, ctx,
+ vdec_av1_slice_core_decode, sizeof(*pfc))) {
+ mtk_vcodec_err(instance, "failed to init AV1 msg queue\n");
+ return -ENOMEM;
+ }
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vdec_av1_slice_flush(h_vdec, bs, fb, res_chg);
+
+ lat_buf = vdec_msg_queue_dqbuf(&ctx->msg_queue.lat_ctx);
+ if (!lat_buf) {
+ mtk_vcodec_err(instance, "failed to get AV1 lat buf\n");
+ return -EAGAIN;
+ }
+ pfc = (struct vdec_av1_slice_pfc *)lat_buf->private_data;
+ if (!pfc) {
+ ret = -EINVAL;
+ goto err_free_fb_out;
+ }
+ vsi = &pfc->vsi;
+
+ ret = vdec_av1_slice_setup_lat(instance, bs, lat_buf, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "failed to setup AV1 lat ret %d\n", ret);
+ goto err_free_fb_out;
+ }
+
+ vdec_av1_slice_vsi_to_remote(vsi, instance->vsi);
+ ret = vpu_dec_start(&instance->vpu, NULL, 0);
+ if (ret) {
+ mtk_vcodec_err(instance, "failed to dec AV1 ret %d\n", ret);
+ goto err_free_fb_out;
+ }
+ if (instance->inneracing_mode)
+ vdec_msg_queue_qbuf(&ctx->msg_queue.core_ctx, lat_buf);
+
+ if (instance->irq_enabled) {
+ ret = mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS,
+ MTK_VDEC_LAT0);
+ /* update remote vsi if decode timeout */
+ if (ret) {
+ mtk_vcodec_err(instance, "AV1 Frame %d decode timeout %d\n", pfc->seq, ret);
+ WRITE_ONCE(instance->vsi->state.timeout, 1);
+ }
+ vpu_dec_end(&instance->vpu);
+ }
+
+ vdec_av1_slice_vsi_from_remote(vsi, instance->vsi);
+ ret = vdec_av1_slice_update_lat(instance, lat_buf, pfc);
+
+ /* LAT trans full, re-decode */
+ if (ret == -EAGAIN) {
+ mtk_vcodec_err(instance, "AV1 Frame %d trans full\n", pfc->seq);
+ if (!instance->inneracing_mode)
+ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ return 0;
+ }
+
+ /* LAT trans full, no more UBE or decode timeout */
+ if (ret == -ENOMEM || vsi->state.timeout) {
+ mtk_vcodec_err(instance, "AV1 Frame %d insufficient buffer or timeout\n", pfc->seq);
+ if (!instance->inneracing_mode)
+ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ return -EBUSY;
+ }
+ vsi->trans.dma_addr_end += ctx->msg_queue.wdma_addr.dma_addr;
+ mtk_vcodec_debug(instance, "lat dma 1 0x%pad 0x%pad\n",
+ &pfc->vsi.trans.dma_addr, &pfc->vsi.trans.dma_addr_end);
+
+ vdec_msg_queue_update_ube_wptr(&ctx->msg_queue, vsi->trans.dma_addr_end);
+
+ if (!instance->inneracing_mode)
+ vdec_msg_queue_qbuf(&ctx->msg_queue.core_ctx, lat_buf);
+ memcpy(&instance->slots, &vsi->slots, sizeof(instance->slots));
+
+ return 0;
+
+err_free_fb_out:
+ vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+
+ if (pfc)
+ mtk_vcodec_err(instance, "slice dec number: %d err: %d", pfc->seq, ret);
+
+ return ret;
+}
+
+static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf)
+{
+ struct vdec_av1_slice_instance *instance;
+ struct vdec_av1_slice_pfc *pfc;
+ struct mtk_vcodec_ctx *ctx = NULL;
+ struct vdec_fb *fb = NULL;
+ int ret = -EINVAL;
+
+ if (!lat_buf)
+ return -EINVAL;
+
+ pfc = lat_buf->private_data;
+ ctx = lat_buf->ctx;
+ if (!pfc || !ctx)
+ return -EINVAL;
+
+ instance = ctx->drv_handle;
+ if (!instance)
+ goto err;
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+ if (!fb) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = vdec_av1_slice_setup_core(instance, fb, lat_buf, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "vdec_av1_slice_setup_core\n");
+ goto err;
+ }
+ vdec_av1_slice_vsi_to_remote(&pfc->vsi, instance->core_vsi);
+ ret = vpu_dec_core(&instance->vpu);
+ if (ret) {
+ mtk_vcodec_err(instance, "vpu_dec_core\n");
+ goto err;
+ }
+
+ if (instance->irq_enabled) {
+ ret = mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS,
+ MTK_VDEC_CORE);
+ /* update remote vsi if decode timeout */
+ if (ret) {
+ mtk_vcodec_err(instance, "AV1 frame %d core timeout\n", pfc->seq);
+ WRITE_ONCE(instance->vsi->state.timeout, 1);
+ }
+ vpu_dec_core_end(&instance->vpu);
+ }
+
+ ret = vdec_av1_slice_update_core(instance, lat_buf, pfc);
+ if (ret) {
+ mtk_vcodec_err(instance, "vdec_av1_slice_update_core\n");
+ goto err;
+ }
+
+ mtk_vcodec_debug(instance, "core dma_addr_end 0x%pad\n",
+ &instance->core_vsi->trans.dma_addr_end);
+ vdec_msg_queue_update_ube_rptr(&ctx->msg_queue, instance->core_vsi->trans.dma_addr_end);
+
+ ctx->dev->vdec_pdata->cap_to_disp(ctx, 0, lat_buf->src_buf_req);
+
+ return 0;
+
+err:
+ /* always update read pointer */
+ vdec_msg_queue_update_ube_rptr(&ctx->msg_queue, pfc->vsi.trans.dma_addr_end);
+
+ if (fb)
+ ctx->dev->vdec_pdata->cap_to_disp(ctx, 1, lat_buf->src_buf_req);
+
+ return ret;
+}
+
+const struct vdec_common_if vdec_av1_slice_lat_if = {
+ .init = vdec_av1_slice_init,
+ .decode = vdec_av1_slice_lat_decode,
+ .get_param = vdec_av1_slice_get_param,
+ .deinit = vdec_av1_slice_deinit,
+};
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
index 999ce7ee5fdc..a7e8e3257b7f 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
@@ -596,7 +596,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_err(inst, "failed to get lat buffer");
+ mtk_vcodec_debug(inst, "failed to get lat buffer");
return -EAGAIN;
}
share_info = lat_buf->private_data;
@@ -672,7 +672,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
sizeof(share_info->h264_slice_params));
- vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
}
/* wait decoder done interrupt */
@@ -698,7 +698,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
sizeof(share_info->h264_slice_params));
- vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
}
mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c
new file mode 100644
index 000000000000..1e6ab138b0bb
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c
@@ -0,0 +1,1097 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_drv_if.h"
+#include "../vdec_vpu_if.h"
+
+/* the size used to store hevc wrap information */
+#define VDEC_HEVC_WRAP_SZ (532 * SZ_1K)
+
+#define HEVC_MAX_MV_NUM 32
+
+/* get used parameters for sps/pps */
+#define GET_HEVC_VDEC_FLAG(cond, flag) \
+ { dst_param->cond = ((src_param->flags & (flag)) ? (1) : (0)); }
+#define GET_HEVC_VDEC_PARAM(param) \
+ { dst_param->param = src_param->param; }
+
+/**
+ * enum vdec_hevc_core_dec_err_type - core decode error type
+ *
+ * @TRANS_BUFFER_FULL: trans buffer is full
+ * @SLICE_HEADER_FULL: slice header buffer is full
+ */
+enum vdec_hevc_core_dec_err_type {
+ TRANS_BUFFER_FULL = 1,
+ SLICE_HEADER_FULL,
+};
+
+/**
+ * struct mtk_hevc_dpb_info - hevc dpb information
+ *
+ * @y_dma_addr: Y plane physical address
+ * @c_dma_addr: CbCr plane physical address
+ * @reference_flag: reference picture flag (short/long term reference picture)
+ * @field: field picture flag
+ */
+struct mtk_hevc_dpb_info {
+ dma_addr_t y_dma_addr;
+ dma_addr_t c_dma_addr;
+ int reference_flag;
+ int field;
+};
+
+/*
+ * struct mtk_hevc_sps_param - parameters for sps
+ */
+struct mtk_hevc_sps_param {
+ unsigned char video_parameter_set_id;
+ unsigned char seq_parameter_set_id;
+ unsigned short pic_width_in_luma_samples;
+ unsigned short pic_height_in_luma_samples;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char sps_max_dec_pic_buffering_minus1;
+ unsigned char sps_max_num_reorder_pics;
+ unsigned char sps_max_latency_increase_plus1;
+ unsigned char log2_min_luma_coding_block_size_minus3;
+ unsigned char log2_diff_max_min_luma_coding_block_size;
+ unsigned char log2_min_luma_transform_block_size_minus2;
+ unsigned char log2_diff_max_min_luma_transform_block_size;
+ unsigned char max_transform_hierarchy_depth_inter;
+ unsigned char max_transform_hierarchy_depth_intra;
+ unsigned char pcm_sample_bit_depth_luma_minus1;
+ unsigned char pcm_sample_bit_depth_chroma_minus1;
+ unsigned char log2_min_pcm_luma_coding_block_size_minus3;
+ unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
+ unsigned char num_short_term_ref_pic_sets;
+ unsigned char num_long_term_ref_pics_sps;
+ unsigned char chroma_format_idc;
+ unsigned char sps_max_sub_layers_minus1;
+ unsigned char separate_colour_plane;
+ unsigned char scaling_list_enabled;
+ unsigned char amp_enabled;
+ unsigned char sample_adaptive_offset;
+ unsigned char pcm_enabled;
+ unsigned char pcm_loop_filter_disabled;
+ unsigned char long_term_ref_pics_enabled;
+ unsigned char sps_temporal_mvp_enabled;
+ unsigned char strong_intra_smoothing_enabled;
+ unsigned char reserved[5];
+};
+
+/*
+ * struct mtk_hevc_pps_param - parameters for pps
+ */
+struct mtk_hevc_pps_param {
+ unsigned char pic_parameter_set_id;
+ unsigned char num_extra_slice_header_bits;
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ char init_qp_minus26;
+ unsigned char diff_cu_qp_delta_depth;
+ char pps_cb_qp_offset;
+ char pps_cr_qp_offset;
+ unsigned char num_tile_columns_minus1;
+ unsigned char num_tile_rows_minus1;
+ unsigned char column_width_minus1[20];
+ unsigned char row_height_minus1[22];
+ char pps_beta_offset_div2;
+ char pps_tc_offset_div2;
+ unsigned char log2_parallel_merge_level_minus2;
+ char dependent_slice_segment_enabled;
+ char output_flag_present;
+ char sign_data_hiding_enabled;
+ char cabac_init_present;
+ char constrained_intra_pred;
+ char transform_skip_enabled;
+ char cu_qp_delta_enabled;
+ char pps_slice_chroma_qp_offsets_present;
+ char weighted_pred;
+ char weighted_bipred;
+ char transquant_bypass_enabled;
+ char pps_flag_tiles_enabled;
+ char entropy_coding_sync_enabled;
+ char loop_filter_across_tiles_enabled;
+ char pps_loop_filter_across_slices_enabled;
+ char deblocking_filter_override_enabled;
+ char pps_disable_deflocking_filter;
+ char lists_modification_present;
+ char slice_segment_header_extersion_present;
+ char deblocking_filter_control_present;
+ char uniform_spacing;
+ char reserved[6];
+};
+
+/*
+ * struct mtk_hevc_slice_header_param - parameters for slice header
+ */
+struct mtk_hevc_slice_header_param {
+ unsigned int slice_type;
+ unsigned int num_active_ref_layer_pics;
+ int slice_qp;
+ int slice_qp_delta_cb;
+ int slice_qp_delta_cr;
+ int num_ref_idx[3];
+ unsigned int col_ref_idx;
+ unsigned int five_minus_max_num_merge_cand;
+ int slice_deblocking_filter_beta_offset_div2;
+ int slice_deblocking_filter_tc_offset_div2;
+ unsigned char sao_enable_flag;
+ unsigned char sao_enable_flag_chroma;
+ unsigned char cabac_init_flag;
+ unsigned char slice_tmvp_flags_present;
+ unsigned char col_from_l0_flag;
+ unsigned char mvd_l1_zero_flag;
+ unsigned char slice_loop_filter_across_slices_enabled_flag;
+ unsigned char deblocking_filter_disable_flag;
+ unsigned int slice_reg0;
+ unsigned int slice_reg1;
+ unsigned int slice_reg2;
+ unsigned int num_rps_curr_temp_list;
+ unsigned int ref_list_mode;
+ int str_num_delta_pocs;
+ int str_num_negtive_pos_pics;
+ int num_long_term;
+ int num_long_term_sps;
+ unsigned int max_cu_width;
+ unsigned int max_cu_height;
+ unsigned int num_entry_point_offsets;
+ unsigned int last_lcu_x_in_tile[17];
+ unsigned int last_lcu_y_in_tile[17];
+ unsigned char nal_unit_type;
+};
+
+/*
+ * struct slice_api_hevc_scaling_matrix - parameters for scaling list
+ */
+struct slice_api_hevc_scaling_matrix {
+ unsigned char scaling_list_4x4[6][16];
+ unsigned char scaling_list_8x8[6][64];
+ unsigned char scaling_list_16x16[6][64];
+ unsigned char scaling_list_32x32[2][64];
+ unsigned char scaling_list_dc_coef_16x16[6];
+ unsigned char scaling_list_dc_coef_32x32[2];
+};
+
+/*
+ * struct slice_hevc_dpb_entry - each dpb information
+ */
+struct slice_hevc_dpb_entry {
+ u64 timestamp;
+ unsigned char flags;
+ unsigned char field_pic;
+ int pic_order_cnt_val;
+};
+
+/*
+ * struct slice_api_hevc_decode_param - parameters for decode.
+ */
+struct slice_api_hevc_decode_param {
+ struct slice_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ int pic_order_cnt_val;
+ unsigned short short_term_ref_pic_set_size;
+ unsigned short long_term_ref_pic_set_size;
+ unsigned char num_active_dpb_entries;
+ unsigned char num_poc_st_curr_before;
+ unsigned char num_poc_st_curr_after;
+ unsigned char num_poc_lt_curr;
+ unsigned char poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ unsigned char poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ unsigned char poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ unsigned char num_delta_pocs_of_ref_rps_idx;
+ int flags;
+};
+
+/**
+ * struct hevc_fb - hevc decode frame buffer information
+ *
+ * @vdec_fb_va: virtual address of struct vdec_fb
+ * @y_fb_dma: dma address of Y frame buffer (luma)
+ * @c_fb_dma: dma address of C frame buffer (chroma)
+ * @poc: picture order count of frame buffer
+ * @reserved: for 8 bytes alignment
+ */
+struct hevc_fb {
+ u64 vdec_fb_va;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ s32 poc;
+ u32 reserved;
+};
+
+/**
+ * struct vdec_hevc_slice_lat_dec_param - parameters for decode current frame
+ *
+ * @sps: hevc sps syntax parameters
+ * @pps: hevc pps syntax parameters
+ * @slice_header: hevc slice header syntax parameters
+ * @scaling_matrix: hevc scaling list parameters
+ * @decode_params: decoder parameters of each frame used for hardware decode
+ * @hevc_dpb_info: dpb reference list
+ */
+struct vdec_hevc_slice_lat_dec_param {
+ struct mtk_hevc_sps_param sps;
+ struct mtk_hevc_pps_param pps;
+ struct mtk_hevc_slice_header_param slice_header;
+ struct slice_api_hevc_scaling_matrix scaling_matrix;
+ struct slice_api_hevc_decode_param decode_params;
+ struct mtk_hevc_dpb_info hevc_dpb_info[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+};
+
+/**
+ * struct vdec_hevc_slice_info - decode information
+ *
+ * @wdma_end_addr_offset: wdma end address offset
+ * @timeout: Decode timeout: 1 timeout, 0 no timeount
+ * @vdec_fb_va: VDEC frame buffer struct virtual address
+ * @crc: Used to check whether hardware's status is right
+ */
+struct vdec_hevc_slice_info {
+ u64 wdma_end_addr_offset;
+ u64 timeout;
+ u64 vdec_fb_va;
+ u32 crc[8];
+};
+
+/*
+ * struct vdec_hevc_slice_mem - memory address and size
+ */
+struct vdec_hevc_slice_mem {
+ union {
+ u64 buf;
+ dma_addr_t dma_addr;
+ };
+ union {
+ size_t size;
+ dma_addr_t dma_addr_end;
+ u64 padding;
+ };
+};
+
+/**
+ * struct vdec_hevc_slice_fb - frame buffer for decoding
+ * @y: current y buffer address info
+ * @c: current c buffer address info
+ */
+struct vdec_hevc_slice_fb {
+ struct vdec_hevc_slice_mem y;
+ struct vdec_hevc_slice_mem c;
+};
+
+/**
+ * struct vdec_hevc_slice_vsi - shared memory for decode information exchange
+ * between SCP and Host.
+ *
+ * @bs: input buffer info
+ *
+ * @ube: ube buffer
+ * @trans: transcoded buffer
+ * @err_map: err map buffer
+ * @slice_bc: slice bc buffer
+ * @wrap: temp buffer
+ *
+ * @fb: current y/c buffer
+ * @mv_buf_dma: HW working motion vector buffer
+ * @dec: decode information (AP-R, VPU-W)
+ * @hevc_slice_params: decode parameters for hw used
+ */
+struct vdec_hevc_slice_vsi {
+ /* used in LAT stage */
+ struct vdec_hevc_slice_mem bs;
+
+ struct vdec_hevc_slice_mem ube;
+ struct vdec_hevc_slice_mem trans;
+ struct vdec_hevc_slice_mem err_map;
+ struct vdec_hevc_slice_mem slice_bc;
+ struct vdec_hevc_slice_mem wrap;
+
+ struct vdec_hevc_slice_fb fb;
+ struct vdec_hevc_slice_mem mv_buf_dma[HEVC_MAX_MV_NUM];
+ struct vdec_hevc_slice_info dec;
+ struct vdec_hevc_slice_lat_dec_param hevc_slice_params;
+};
+
+/**
+ * struct vdec_hevc_slice_share_info - shared information used to exchange
+ * message between lat and core
+ *
+ * @sps: sequence header information from user space
+ * @dec_params: decoder params from user space
+ * @hevc_slice_params: decoder params used for hardware
+ * @trans: trans buffer dma address
+ */
+struct vdec_hevc_slice_share_info {
+ struct v4l2_ctrl_hevc_sps sps;
+ struct v4l2_ctrl_hevc_decode_params dec_params;
+ struct vdec_hevc_slice_lat_dec_param hevc_slice_params;
+ struct vdec_hevc_slice_mem trans;
+};
+
+/**
+ * struct vdec_hevc_slice_inst - hevc decoder instance
+ *
+ * @slice_dec_num: how many picture be decoded
+ * @ctx: point to mtk_vcodec_ctx
+ * @mv_buf: HW working motion vector buffer
+ * @vpu: VPU instance
+ * @vsi: vsi used for lat
+ * @vsi_core: vsi used for core
+ * @wrap_addr: wrap address used for hevc
+ *
+ * @hevc_slice_param: the parameters that hardware use to decode
+ *
+ * @resolution_changed: resolution changed
+ * @realloc_mv_buf: reallocate mv buffer
+ * @cap_num_planes: number of capture queue plane
+ */
+struct vdec_hevc_slice_inst {
+ unsigned int slice_dec_num;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem mv_buf[HEVC_MAX_MV_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_hevc_slice_vsi *vsi;
+ struct vdec_hevc_slice_vsi *vsi_core;
+ struct mtk_vcodec_mem wrap_addr;
+
+ struct vdec_hevc_slice_lat_dec_param hevc_slice_param;
+
+ unsigned int resolution_changed;
+ unsigned int realloc_mv_buf;
+ unsigned int cap_num_planes;
+};
+
+static unsigned int vdec_hevc_get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ const unsigned int unit_size = (width / 16) * (height / 16) + 8;
+
+ return 64 * unit_size;
+}
+
+static void *vdec_hevc_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
+
+ if (!ctrl)
+ return ERR_PTR(-EINVAL);
+
+ return ctrl->p_cur.p;
+}
+
+static void vdec_hevc_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
+ struct slice_api_hevc_decode_param *decode_params,
+ struct mtk_hevc_dpb_info *hevc_dpb_info)
+{
+ const struct slice_hevc_dpb_entry *dpb;
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ int index;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ for (index = 0; index < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; index++) {
+ dpb = &decode_params->dpb[index];
+ if (index >= decode_params->num_active_dpb_entries)
+ continue;
+
+ vb = vb2_find_buffer(vq, dpb->timestamp);
+ if (!vb) {
+ dev_err(&ctx->dev->plat_dev->dev,
+ "Reference invalid: dpb_index(%d) timestamp(%lld)",
+ index, dpb->timestamp);
+ continue;
+ }
+
+ hevc_dpb_info[index].field = dpb->field_pic;
+
+ hevc_dpb_info[index].y_dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ hevc_dpb_info[index].c_dma_addr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ else
+ hevc_dpb_info[index].c_dma_addr =
+ hevc_dpb_info[index].y_dma_addr + ctx->picinfo.fb_sz[0];
+ }
+}
+
+static void vdec_hevc_copy_sps_params(struct mtk_hevc_sps_param *dst_param,
+ const struct v4l2_ctrl_hevc_sps *src_param)
+{
+ GET_HEVC_VDEC_PARAM(video_parameter_set_id);
+ GET_HEVC_VDEC_PARAM(seq_parameter_set_id);
+ GET_HEVC_VDEC_PARAM(pic_width_in_luma_samples);
+ GET_HEVC_VDEC_PARAM(pic_height_in_luma_samples);
+ GET_HEVC_VDEC_PARAM(bit_depth_luma_minus8);
+ GET_HEVC_VDEC_PARAM(bit_depth_chroma_minus8);
+ GET_HEVC_VDEC_PARAM(log2_max_pic_order_cnt_lsb_minus4);
+ GET_HEVC_VDEC_PARAM(sps_max_dec_pic_buffering_minus1);
+ GET_HEVC_VDEC_PARAM(sps_max_num_reorder_pics);
+ GET_HEVC_VDEC_PARAM(sps_max_latency_increase_plus1);
+ GET_HEVC_VDEC_PARAM(log2_min_luma_coding_block_size_minus3);
+ GET_HEVC_VDEC_PARAM(log2_diff_max_min_luma_coding_block_size);
+ GET_HEVC_VDEC_PARAM(log2_min_luma_transform_block_size_minus2);
+ GET_HEVC_VDEC_PARAM(log2_diff_max_min_luma_transform_block_size);
+ GET_HEVC_VDEC_PARAM(max_transform_hierarchy_depth_inter);
+ GET_HEVC_VDEC_PARAM(max_transform_hierarchy_depth_intra);
+ GET_HEVC_VDEC_PARAM(pcm_sample_bit_depth_luma_minus1);
+ GET_HEVC_VDEC_PARAM(pcm_sample_bit_depth_chroma_minus1);
+ GET_HEVC_VDEC_PARAM(log2_min_pcm_luma_coding_block_size_minus3);
+ GET_HEVC_VDEC_PARAM(log2_diff_max_min_pcm_luma_coding_block_size);
+ GET_HEVC_VDEC_PARAM(num_short_term_ref_pic_sets);
+ GET_HEVC_VDEC_PARAM(num_long_term_ref_pics_sps);
+ GET_HEVC_VDEC_PARAM(chroma_format_idc);
+ GET_HEVC_VDEC_PARAM(sps_max_sub_layers_minus1);
+
+ GET_HEVC_VDEC_FLAG(separate_colour_plane,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+ GET_HEVC_VDEC_FLAG(scaling_list_enabled,
+ V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
+ GET_HEVC_VDEC_FLAG(amp_enabled,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED);
+ GET_HEVC_VDEC_FLAG(sample_adaptive_offset,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET);
+ GET_HEVC_VDEC_FLAG(pcm_enabled,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED);
+ GET_HEVC_VDEC_FLAG(pcm_loop_filter_disabled,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED);
+ GET_HEVC_VDEC_FLAG(long_term_ref_pics_enabled,
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT);
+ GET_HEVC_VDEC_FLAG(sps_temporal_mvp_enabled,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED);
+ GET_HEVC_VDEC_FLAG(strong_intra_smoothing_enabled,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED);
+}
+
+static void vdec_hevc_copy_pps_params(struct mtk_hevc_pps_param *dst_param,
+ const struct v4l2_ctrl_hevc_pps *src_param)
+{
+ int i;
+
+ GET_HEVC_VDEC_PARAM(pic_parameter_set_id);
+ GET_HEVC_VDEC_PARAM(num_extra_slice_header_bits);
+ GET_HEVC_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
+ GET_HEVC_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
+ GET_HEVC_VDEC_PARAM(init_qp_minus26);
+ GET_HEVC_VDEC_PARAM(diff_cu_qp_delta_depth);
+ GET_HEVC_VDEC_PARAM(pps_cb_qp_offset);
+ GET_HEVC_VDEC_PARAM(pps_cr_qp_offset);
+ GET_HEVC_VDEC_PARAM(num_tile_columns_minus1);
+ GET_HEVC_VDEC_PARAM(num_tile_rows_minus1);
+ GET_HEVC_VDEC_PARAM(init_qp_minus26);
+ GET_HEVC_VDEC_PARAM(diff_cu_qp_delta_depth);
+ GET_HEVC_VDEC_PARAM(pic_parameter_set_id);
+ GET_HEVC_VDEC_PARAM(num_extra_slice_header_bits);
+ GET_HEVC_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
+ GET_HEVC_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
+ GET_HEVC_VDEC_PARAM(pps_beta_offset_div2);
+ GET_HEVC_VDEC_PARAM(pps_tc_offset_div2);
+ GET_HEVC_VDEC_PARAM(log2_parallel_merge_level_minus2);
+
+ for (i = 0; i < ARRAY_SIZE(src_param->column_width_minus1); i++)
+ GET_HEVC_VDEC_PARAM(column_width_minus1[i]);
+ for (i = 0; i < ARRAY_SIZE(src_param->row_height_minus1); i++)
+ GET_HEVC_VDEC_PARAM(row_height_minus1[i]);
+
+ GET_HEVC_VDEC_FLAG(dependent_slice_segment_enabled,
+ V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED);
+ GET_HEVC_VDEC_FLAG(output_flag_present,
+ V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT);
+ GET_HEVC_VDEC_FLAG(sign_data_hiding_enabled,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED);
+ GET_HEVC_VDEC_FLAG(cabac_init_present,
+ V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT);
+ GET_HEVC_VDEC_FLAG(constrained_intra_pred,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+ GET_HEVC_VDEC_FLAG(transform_skip_enabled,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED);
+ GET_HEVC_VDEC_FLAG(cu_qp_delta_enabled,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED);
+ GET_HEVC_VDEC_FLAG(pps_slice_chroma_qp_offsets_present,
+ V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT);
+ GET_HEVC_VDEC_FLAG(weighted_pred,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED);
+ GET_HEVC_VDEC_FLAG(weighted_bipred,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED);
+ GET_HEVC_VDEC_FLAG(transquant_bypass_enabled,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED);
+ GET_HEVC_VDEC_FLAG(pps_flag_tiles_enabled,
+ V4L2_HEVC_PPS_FLAG_TILES_ENABLED);
+ GET_HEVC_VDEC_FLAG(entropy_coding_sync_enabled,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED);
+ GET_HEVC_VDEC_FLAG(loop_filter_across_tiles_enabled,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED);
+ GET_HEVC_VDEC_FLAG(pps_loop_filter_across_slices_enabled,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED);
+ GET_HEVC_VDEC_FLAG(deblocking_filter_override_enabled,
+ V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED);
+ GET_HEVC_VDEC_FLAG(pps_disable_deflocking_filter,
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER);
+ GET_HEVC_VDEC_FLAG(lists_modification_present,
+ V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT);
+ GET_HEVC_VDEC_FLAG(slice_segment_header_extersion_present,
+ V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT);
+ GET_HEVC_VDEC_FLAG(deblocking_filter_control_present,
+ V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+ GET_HEVC_VDEC_FLAG(uniform_spacing,
+ V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING);
+}
+
+static void vdec_hevc_copy_scaling_matrix(struct slice_api_hevc_scaling_matrix *dst_matrix,
+ const struct v4l2_ctrl_hevc_scaling_matrix *src_matrix)
+{
+ memcpy(dst_matrix, src_matrix, sizeof(*src_matrix));
+}
+
+static void
+vdec_hevc_copy_decode_params(struct slice_api_hevc_decode_param *dst_param,
+ const struct v4l2_ctrl_hevc_decode_params *src_param,
+ const struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX])
+{
+ struct slice_hevc_dpb_entry *dst_entry;
+ const struct v4l2_hevc_dpb_entry *src_entry;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dst_param->dpb); i++) {
+ dst_entry = &dst_param->dpb[i];
+ src_entry = &dpb[i];
+
+ dst_entry->timestamp = src_entry->timestamp;
+ dst_entry->flags = src_entry->flags;
+ dst_entry->field_pic = src_entry->field_pic;
+ dst_entry->pic_order_cnt_val = src_entry->pic_order_cnt_val;
+
+ GET_HEVC_VDEC_PARAM(poc_st_curr_before[i]);
+ GET_HEVC_VDEC_PARAM(poc_st_curr_after[i]);
+ GET_HEVC_VDEC_PARAM(poc_lt_curr[i]);
+ }
+
+ GET_HEVC_VDEC_PARAM(pic_order_cnt_val);
+ GET_HEVC_VDEC_PARAM(short_term_ref_pic_set_size);
+ GET_HEVC_VDEC_PARAM(long_term_ref_pic_set_size);
+ GET_HEVC_VDEC_PARAM(num_active_dpb_entries);
+ GET_HEVC_VDEC_PARAM(num_poc_st_curr_before);
+ GET_HEVC_VDEC_PARAM(num_poc_st_curr_after);
+ GET_HEVC_VDEC_PARAM(num_delta_pocs_of_ref_rps_idx);
+ GET_HEVC_VDEC_PARAM(num_poc_lt_curr);
+ GET_HEVC_VDEC_PARAM(flags);
+}
+
+static int vdec_hevc_slice_fill_decode_parameters(struct vdec_hevc_slice_inst *inst,
+ struct vdec_hevc_slice_share_info *share_info)
+{
+ struct vdec_hevc_slice_lat_dec_param *slice_param = &inst->vsi->hevc_slice_params;
+ const struct v4l2_ctrl_hevc_decode_params *dec_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *src_matrix;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+
+ dec_params =
+ vdec_hevc_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
+ if (IS_ERR(dec_params))
+ return PTR_ERR(dec_params);
+
+ src_matrix =
+ vdec_hevc_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
+ if (IS_ERR(src_matrix))
+ return PTR_ERR(src_matrix);
+
+ sps = vdec_hevc_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_HEVC_SPS);
+ if (IS_ERR(sps))
+ return PTR_ERR(sps);
+
+ pps = vdec_hevc_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_HEVC_PPS);
+ if (IS_ERR(pps))
+ return PTR_ERR(pps);
+
+ vdec_hevc_copy_sps_params(&slice_param->sps, sps);
+ vdec_hevc_copy_pps_params(&slice_param->pps, pps);
+ vdec_hevc_copy_scaling_matrix(&slice_param->scaling_matrix, src_matrix);
+
+ memcpy(&share_info->sps, sps, sizeof(*sps));
+ memcpy(&share_info->dec_params, dec_params, sizeof(*dec_params));
+
+ slice_param->decode_params.num_poc_st_curr_before = dec_params->num_poc_st_curr_before;
+ slice_param->decode_params.num_poc_st_curr_after = dec_params->num_poc_st_curr_after;
+ slice_param->decode_params.num_poc_lt_curr = dec_params->num_poc_lt_curr;
+ slice_param->decode_params.num_delta_pocs_of_ref_rps_idx =
+ dec_params->num_delta_pocs_of_ref_rps_idx;
+
+ return 0;
+}
+
+static void vdec_hevc_slice_fill_decode_reflist(struct vdec_hevc_slice_inst *inst,
+ struct vdec_hevc_slice_lat_dec_param *slice_param,
+ struct vdec_hevc_slice_share_info *share_info)
+{
+ struct v4l2_ctrl_hevc_decode_params *dec_params = &share_info->dec_params;
+
+ vdec_hevc_copy_decode_params(&slice_param->decode_params, dec_params,
+ share_info->dec_params.dpb);
+
+ vdec_hevc_fill_dpb_info(inst->ctx, &slice_param->decode_params,
+ slice_param->hevc_dpb_info);
+}
+
+static int vdec_hevc_slice_alloc_mv_buf(struct vdec_hevc_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ unsigned int buf_sz = vdec_hevc_get_mv_buf_size(pic->buf_w, pic->buf_h);
+ struct mtk_vcodec_mem *mem;
+ int i, err;
+
+ mtk_v4l2_debug(3, "allocate mv buffer size = 0x%x", buf_sz);
+ for (i = 0; i < HEVC_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void vdec_hevc_slice_free_mv_buf(struct vdec_hevc_slice_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem;
+
+ for (i = 0; i < HEVC_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static void vdec_hevc_slice_get_pic_info(struct vdec_hevc_slice_inst *inst)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+ u32 data[3];
+
+ data[0] = ctx->picinfo.pic_w;
+ data[1] = ctx->picinfo.pic_h;
+ data[2] = ctx->capture_fourcc;
+ vpu_dec_get_param(&inst->vpu, data, 3, GET_PARAM_PIC_INFO);
+
+ ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, VCODEC_DEC_ALIGNED_64);
+ ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, VCODEC_DEC_ALIGNED_64);
+ ctx->picinfo.fb_sz[0] = inst->vpu.fb_sz[0];
+ ctx->picinfo.fb_sz[1] = inst->vpu.fb_sz[1];
+ inst->cap_num_planes =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
+
+ if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
+ inst->resolution_changed = true;
+ if (ctx->last_decoded_picinfo.buf_w != ctx->picinfo.buf_w ||
+ ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
+ inst->realloc_mv_buf = true;
+
+ mtk_v4l2_debug(1, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->resolution_changed,
+ inst->realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ }
+}
+
+static void vdec_hevc_slice_get_crop_info(struct vdec_hevc_slice_inst *inst,
+ struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->ctx->picinfo.pic_w;
+ cr->height = inst->ctx->picinfo.pic_h;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_hevc_slice_setup_lat_buffer(struct vdec_hevc_slice_inst *inst,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_lat_buf *lat_buf,
+ bool *res_chg)
+{
+ struct mtk_vcodec_mem *mem;
+ struct mtk_video_dec_buf *src_buf_info;
+ struct vdec_hevc_slice_share_info *share_info;
+ int i, err;
+
+ inst->vsi->bs.dma_addr = (u64)bs->dma_addr;
+ inst->vsi->bs.size = bs->size;
+
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+ lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
+
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ if (inst->realloc_mv_buf) {
+ err = vdec_hevc_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->realloc_mv_buf = false;
+ if (err)
+ return err;
+ }
+ inst->resolution_changed = false;
+ }
+
+ for (i = 0; i < HEVC_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi->mv_buf_dma[i].size = mem->size;
+ }
+
+ inst->vsi->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
+
+ inst->vsi->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi->trans.dma_addr_end = inst->ctx->msg_queue.wdma_rptr_addr;
+ inst->vsi->trans.dma_addr = inst->ctx->msg_queue.wdma_wptr_addr;
+
+ share_info = lat_buf->private_data;
+ share_info->trans.dma_addr = inst->vsi->trans.dma_addr;
+ share_info->trans.dma_addr_end = inst->vsi->trans.dma_addr_end;
+
+ mtk_vcodec_debug(inst, "lat: ube addr/size(0x%llx 0x%llx) err:0x%llx",
+ inst->vsi->ube.buf,
+ inst->vsi->ube.padding,
+ inst->vsi->err_map.buf);
+
+ mtk_vcodec_debug(inst, "slice addr/size(0x%llx 0x%llx) trans start/end((0x%llx 0x%llx))",
+ inst->vsi->slice_bc.buf,
+ inst->vsi->slice_bc.padding,
+ inst->vsi->trans.buf,
+ inst->vsi->trans.padding);
+
+ return 0;
+}
+
+static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
+ struct vdec_hevc_slice_share_info *share_info,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct mtk_vcodec_mem *mem;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct vdec_fb *fb;
+ u64 y_fb_dma, c_fb_dma;
+ int i;
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+ if (!fb) {
+ mtk_vcodec_err(inst, "fb buffer is NULL");
+ return -EBUSY;
+ }
+
+ y_fb_dma = (u64)fb->base_y.dma_addr;
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ c_fb_dma =
+ y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ else
+ c_fb_dma = (u64)fb->base_c.dma_addr;
+
+ mtk_vcodec_debug(inst, "[hevc-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
+ c_fb_dma);
+
+ inst->vsi_core->fb.y.dma_addr = y_fb_dma;
+ inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
+ inst->vsi_core->fb.c.dma_addr = c_fb_dma;
+ inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1];
+
+ inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
+
+ inst->vsi_core->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi_core->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
+
+ inst->vsi_core->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi_core->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi_core->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi_core->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi_core->trans.dma_addr = share_info->trans.dma_addr;
+ inst->vsi_core->trans.dma_addr_end = share_info->trans.dma_addr_end;
+
+ inst->vsi_core->wrap.dma_addr = inst->wrap_addr.dma_addr;
+ inst->vsi_core->wrap.size = inst->wrap_addr.size;
+
+ for (i = 0; i < HEVC_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_core->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi_core->mv_buf_dma[i].size = mem->size;
+ }
+
+ vb2_v4l2 = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ v4l2_m2m_buf_copy_metadata(&lat_buf->ts_info, vb2_v4l2, true);
+
+ return 0;
+}
+
+static int vdec_hevc_slice_init(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_hevc_slice_inst *inst;
+ int err, vsi_size;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_LAT;
+ inst->vpu.core_id = SCP_IPI_VDEC_CORE;
+ inst->vpu.ctx = ctx;
+ inst->vpu.codec_type = ctx->current_codec;
+ inst->vpu.capture_type = ctx->capture_fourcc;
+
+ ctx->drv_handle = inst;
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_hevc init err=%d", err);
+ goto error_free_inst;
+ }
+
+ vsi_size = round_up(sizeof(struct vdec_hevc_slice_vsi), VCODEC_DEC_ALIGNED_64);
+ inst->vsi = inst->vpu.vsi;
+ inst->vsi_core =
+ (struct vdec_hevc_slice_vsi *)(((char *)inst->vpu.vsi) + vsi_size);
+
+ inst->resolution_changed = true;
+ inst->realloc_mv_buf = true;
+
+ inst->wrap_addr.size = VDEC_HEVC_WRAP_SZ;
+ err = mtk_vcodec_mem_alloc(ctx, &inst->wrap_addr);
+ if (err)
+ goto error_free_inst;
+
+ mtk_vcodec_debug(inst, "lat struct size = %d,%d,%d,%d vsi: %d\n",
+ (int)sizeof(struct mtk_hevc_sps_param),
+ (int)sizeof(struct mtk_hevc_pps_param),
+ (int)sizeof(struct vdec_hevc_slice_lat_dec_param),
+ (int)sizeof(struct mtk_hevc_dpb_info),
+ vsi_size);
+ mtk_vcodec_debug(inst, "lat hevc instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
+
+ return 0;
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_hevc_slice_deinit(void *h_vdec)
+{
+ struct vdec_hevc_slice_inst *inst = h_vdec;
+ struct mtk_vcodec_mem *mem;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ vdec_hevc_slice_free_mv_buf(inst);
+
+ mem = &inst->wrap_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
+ kfree(inst);
+}
+
+static int vdec_hevc_slice_core_decode(struct vdec_lat_buf *lat_buf)
+{
+ int err, timeout;
+ struct mtk_vcodec_ctx *ctx = lat_buf->ctx;
+ struct vdec_hevc_slice_inst *inst = ctx->drv_handle;
+ struct vdec_hevc_slice_share_info *share_info = lat_buf->private_data;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+
+ mtk_vcodec_debug(inst, "[hevc-core] vdec_hevc core decode");
+ memcpy(&inst->vsi_core->hevc_slice_params, &share_info->hevc_slice_params,
+ sizeof(share_info->hevc_slice_params));
+
+ err = vdec_hevc_slice_setup_core_buffer(inst, share_info, lat_buf);
+ if (err)
+ goto vdec_dec_end;
+
+ vdec_hevc_slice_fill_decode_reflist(inst, &inst->vsi_core->hevc_slice_params,
+ share_info);
+ err = vpu_dec_core(vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "core decode err=%d", err);
+ goto vdec_dec_end;
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ if (timeout)
+ mtk_vcodec_err(inst, "core decode timeout: pic_%d",
+ ctx->decoded_frame_cnt);
+ inst->vsi_core->dec.timeout = !!timeout;
+
+ vpu_dec_core_end(vpu);
+ mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ ctx->decoded_frame_cnt,
+ inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
+ inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
+ inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
+ inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
+
+vdec_dec_end:
+ vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans.dma_addr_end);
+ ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
+ mtk_vcodec_debug(inst, "core decode done err=%d", err);
+ ctx->decoded_frame_cnt++;
+ return 0;
+}
+
+static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_hevc_slice_inst *inst = h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ int err, timeout = 0;
+ unsigned int data[2];
+ struct vdec_lat_buf *lat_buf;
+ struct vdec_hevc_slice_share_info *share_info;
+
+ if (vdec_msg_queue_init(&inst->ctx->msg_queue, inst->ctx,
+ vdec_hevc_slice_core_decode,
+ sizeof(*share_info)))
+ return -ENOMEM;
+
+ /* bs NULL means flush decoder */
+ if (!bs) {
+ vdec_msg_queue_wait_lat_buf_full(&inst->ctx->msg_queue);
+ return vpu_dec_reset(vpu);
+ }
+
+ lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
+ if (!lat_buf) {
+ mtk_vcodec_debug(inst, "failed to get lat buffer");
+ return -EAGAIN;
+ }
+
+ share_info = lat_buf->private_data;
+ err = vdec_hevc_slice_fill_decode_parameters(inst, share_info);
+ if (err)
+ goto err_free_fb_out;
+
+ err = vdec_hevc_slice_setup_lat_buffer(inst, bs, lat_buf, res_chg);
+ if (err)
+ goto err_free_fb_out;
+
+ err = vpu_dec_start(vpu, data, 2);
+ if (err) {
+ mtk_vcodec_debug(inst, "lat decode err: %d", err);
+ goto err_free_fb_out;
+ }
+
+ if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->hevc_slice_params, &inst->vsi->hevc_slice_params,
+ sizeof(share_info->hevc_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
+ if (timeout)
+ mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ inst->vsi->dec.timeout = !!timeout;
+
+ err = vpu_dec_end(vpu);
+ if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) {
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ inst->slice_dec_num++;
+ mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
+ return -EINVAL;
+ }
+
+ share_info->trans.dma_addr_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi->dec.wdma_end_addr_offset;
+ vdec_msg_queue_update_ube_wptr(&lat_buf->ctx->msg_queue, share_info->trans.dma_addr_end);
+
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->hevc_slice_params, &inst->vsi->hevc_slice_params,
+ sizeof(share_info->hevc_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+ mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
+
+ inst->slice_dec_num++;
+ return 0;
+err_free_fb_out:
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+ return err;
+}
+
+static int vdec_hevc_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *unused, bool *res_chg)
+{
+ struct vdec_hevc_slice_inst *inst = h_vdec;
+
+ if (!h_vdec || inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ return -EINVAL;
+
+ return vdec_hevc_slice_lat_decode(h_vdec, bs, unused, res_chg);
+}
+
+static int vdec_hevc_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
+ void *out)
+{
+ struct vdec_hevc_slice_inst *inst = h_vdec;
+
+ switch (type) {
+ case GET_PARAM_PIC_INFO:
+ vdec_hevc_slice_get_pic_info(inst);
+ break;
+ case GET_PARAM_DPB_SIZE:
+ *(unsigned int *)out = 6;
+ break;
+ case GET_PARAM_CROP_INFO:
+ vdec_hevc_slice_get_crop_info(inst, out);
+ break;
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+const struct vdec_common_if vdec_hevc_slice_multi_if = {
+ .init = vdec_hevc_slice_init,
+ .decode = vdec_hevc_slice_decode,
+ .get_param = vdec_hevc_slice_get_param,
+ .deinit = vdec_hevc_slice_deinit,
+};
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
index cf16cf2807f0..c2f90848f498 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
@@ -2069,7 +2069,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&instance->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_err(instance, "Failed to get VP9 lat buf\n");
+ mtk_vcodec_debug(instance, "Failed to get VP9 lat buf\n");
return -EAGAIN;
}
pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
@@ -2119,7 +2119,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
vdec_msg_queue_update_ube_wptr(&ctx->msg_queue,
vsi->trans.dma_addr_end +
ctx->msg_queue.wdma_addr.dma_addr);
- vdec_msg_queue_qbuf(&ctx->dev->msg_queue_core_ctx, lat_buf);
+ vdec_msg_queue_qbuf(&ctx->msg_queue.core_ctx, lat_buf);
return 0;
err_free_fb_out:
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
index f3807f03d880..06d393174cc2 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
@@ -49,6 +49,14 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
ctx->dec_if = &vdec_vp9_slice_lat_if;
ctx->hw_id = IS_VDEC_LAT_ARCH(hw_arch) ? MTK_VDEC_LAT0 : MTK_VDEC_CORE;
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->dec_if = &vdec_hevc_slice_multi_if;
+ ctx->hw_id = MTK_VDEC_LAT0;
+ break;
+ case V4L2_PIX_FMT_AV1_FRAME:
+ ctx->dec_if = &vdec_av1_slice_lat_if;
+ ctx->hw_id = MTK_VDEC_LAT0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
index 076306ff2dd4..a8da6a59a6a5 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
@@ -61,6 +61,8 @@ extern const struct vdec_common_if vdec_vp8_if;
extern const struct vdec_common_if vdec_vp8_slice_if;
extern const struct vdec_common_if vdec_vp9_if;
extern const struct vdec_common_if vdec_vp9_slice_lat_if;
+extern const struct vdec_common_if vdec_hevc_slice_multi_if;
+extern const struct vdec_common_if vdec_av1_slice_lat_if;
/**
* vdec_if_init() - initialize decode driver
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
index f3073d1e7f42..f555341ae708 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
@@ -20,6 +20,9 @@
/* the size used to store avc error information */
#define VDEC_ERR_MAP_SZ_AVC (17 * SZ_1K)
+#define VDEC_RD_MV_BUFFER_SZ (((SZ_4K * 2304 >> 4) + SZ_1K) << 1)
+#define VDEC_LAT_TILE_SZ (64 * V4L2_AV1_MAX_TILE_COUNT)
+
/* core will read the trans buffer which decoded by lat to decode again.
* The trans buffer size of FHD and 4K bitstreams are different.
*/
@@ -71,7 +74,6 @@ static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_in
int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
{
struct list_head *head;
- int status;
head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
if (!head) {
@@ -87,12 +89,9 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
wake_up_all(&msg_ctx->ready_to_use);
} else {
- if (buf->ctx->msg_queue.core_work_cnt <
- atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
- status = queue_work(buf->ctx->dev->core_workqueue,
- &buf->ctx->msg_queue.core_work);
- if (status)
- buf->ctx->msg_queue.core_work_cnt++;
+ if (!(buf->ctx->msg_queue.status & CONTEXT_LIST_QUEUED)) {
+ queue_work(buf->ctx->dev->core_workqueue, &buf->ctx->msg_queue.core_work);
+ buf->ctx->msg_queue.status |= CONTEXT_LIST_QUEUED;
}
}
@@ -181,49 +180,23 @@ void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t u
bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
{
- struct vdec_lat_buf *buf, *tmp;
- struct list_head *list_core[3];
- struct vdec_msg_queue_ctx *core_ctx;
- int ret, i, in_core_count = 0, count = 0;
- long timeout_jiff;
-
- core_ctx = &msg_queue->ctx->dev->msg_queue_core_ctx;
- spin_lock(&core_ctx->ready_lock);
- list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
- if (buf && buf->ctx == msg_queue->ctx) {
- list_core[in_core_count++] = &buf->core_list;
- list_del(&buf->core_list);
- }
- }
-
- for (i = 0; i < in_core_count; i++) {
- list_add(list_core[in_core_count - (1 + i)], &core_ctx->ready_queue);
- queue_work(msg_queue->ctx->dev->core_workqueue, &msg_queue->core_work);
- }
- spin_unlock(&core_ctx->ready_lock);
-
- timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
- ret = wait_event_timeout(msg_queue->ctx->msg_queue.core_dec_done,
- msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
- timeout_jiff);
- if (ret) {
- mtk_v4l2_debug(3, "success to get lat buf: %d",
- msg_queue->lat_ctx.ready_num);
+ if (atomic_read(&msg_queue->lat_list_cnt) == NUM_BUFFER_COUNT) {
+ mtk_v4l2_debug(3, "wait buf full: list(%d %d) ready_num:%d status:%d",
+ atomic_read(&msg_queue->lat_list_cnt),
+ atomic_read(&msg_queue->core_list_cnt),
+ msg_queue->lat_ctx.ready_num,
+ msg_queue->status);
return true;
}
- spin_lock(&core_ctx->ready_lock);
- list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
- if (buf && buf->ctx == msg_queue->ctx) {
- count++;
- list_del(&buf->core_list);
- }
- }
- spin_unlock(&core_ctx->ready_lock);
+ msg_queue->flush_done = false;
+ vdec_msg_queue_qbuf(&msg_queue->core_ctx, &msg_queue->empty_lat_buf);
+ wait_event(msg_queue->core_dec_done, msg_queue->flush_done);
- mtk_v4l2_err("failed with lat buf isn't full: list(%d %d) count:%d",
- atomic_read(&msg_queue->lat_list_cnt),
- atomic_read(&msg_queue->core_list_cnt), count);
+ mtk_v4l2_debug(3, "flush done => ready_num:%d status:%d list(%d %d)",
+ msg_queue->lat_ctx.ready_num, msg_queue->status,
+ atomic_read(&msg_queue->lat_list_cnt),
+ atomic_read(&msg_queue->core_list_cnt));
return false;
}
@@ -249,8 +222,18 @@ void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
if (mem->va)
mtk_vcodec_mem_free(ctx, mem);
+ mem = &lat_buf->rd_mv_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(ctx, mem);
+
+ mem = &lat_buf->tile_addr;
+ if (mem->va)
+ mtk_vcodec_mem_free(ctx, mem);
+
kfree(lat_buf->private_data);
}
+
+ cancel_work_sync(&msg_queue->core_work);
}
static void vdec_msg_queue_core_work(struct work_struct *work)
@@ -261,12 +244,23 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
struct mtk_vcodec_dev *dev = ctx->dev;
struct vdec_lat_buf *lat_buf;
- int status;
- lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
+ spin_lock(&msg_queue->core_ctx.ready_lock);
+ ctx->msg_queue.status &= ~CONTEXT_LIST_QUEUED;
+ spin_unlock(&msg_queue->core_ctx.ready_lock);
+
+ lat_buf = vdec_msg_queue_dqbuf(&msg_queue->core_ctx);
if (!lat_buf)
return;
+ if (lat_buf->is_last_frame) {
+ ctx->msg_queue.status = CONTEXT_LIST_DEC_DONE;
+ msg_queue->flush_done = true;
+ wake_up(&ctx->msg_queue.core_dec_done);
+
+ return;
+ }
+
ctx = lat_buf->ctx;
mtk_vcodec_dec_enable_hardware(ctx, MTK_VDEC_CORE);
mtk_vcodec_set_curr_ctx(dev, ctx, MTK_VDEC_CORE);
@@ -277,18 +271,13 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
- wake_up_all(&ctx->msg_queue.core_dec_done);
- spin_lock(&dev->msg_queue_core_ctx.ready_lock);
- lat_buf->ctx->msg_queue.core_work_cnt--;
-
- if (lat_buf->ctx->msg_queue.core_work_cnt <
- atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
- status = queue_work(lat_buf->ctx->dev->core_workqueue,
- &lat_buf->ctx->msg_queue.core_work);
- if (status)
- lat_buf->ctx->msg_queue.core_work_cnt++;
+ if (!(ctx->msg_queue.status & CONTEXT_LIST_QUEUED) &&
+ atomic_read(&msg_queue->core_list_cnt)) {
+ spin_lock(&msg_queue->core_ctx.ready_lock);
+ ctx->msg_queue.status |= CONTEXT_LIST_QUEUED;
+ spin_unlock(&msg_queue->core_ctx.ready_lock);
+ queue_work(ctx->dev->core_workqueue, &msg_queue->core_work);
}
- spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
}
int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
@@ -302,14 +291,14 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
if (msg_queue->wdma_addr.size)
return 0;
- msg_queue->ctx = ctx;
- msg_queue->core_work_cnt = 0;
vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
+ vdec_msg_queue_init_ctx(&msg_queue->core_ctx, MTK_VDEC_CORE);
INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
atomic_set(&msg_queue->lat_list_cnt, 0);
atomic_set(&msg_queue->core_list_cnt, 0);
init_waitqueue_head(&msg_queue->core_dec_done);
+ msg_queue->status = CONTEXT_LIST_EMPTY;
msg_queue->wdma_addr.size =
vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
@@ -322,6 +311,10 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
msg_queue->wdma_wptr_addr = msg_queue->wdma_addr.dma_addr;
+ msg_queue->empty_lat_buf.ctx = ctx;
+ msg_queue->empty_lat_buf.core_decode = NULL;
+ msg_queue->empty_lat_buf.is_last_frame = true;
+
for (i = 0; i < NUM_BUFFER_COUNT; i++) {
lat_buf = &msg_queue->lat_buf[i];
@@ -339,6 +332,22 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
goto mem_alloc_err;
}
+ if (ctx->current_codec == V4L2_PIX_FMT_AV1_FRAME) {
+ lat_buf->rd_mv_addr.size = VDEC_RD_MV_BUFFER_SZ;
+ err = mtk_vcodec_mem_alloc(ctx, &lat_buf->rd_mv_addr);
+ if (err) {
+ mtk_v4l2_err("failed to allocate rd_mv_addr buf[%d]", i);
+ return -ENOMEM;
+ }
+
+ lat_buf->tile_addr.size = VDEC_LAT_TILE_SZ;
+ err = mtk_vcodec_mem_alloc(ctx, &lat_buf->tile_addr);
+ if (err) {
+ mtk_v4l2_err("failed to allocate tile_addr buf[%d]", i);
+ return -ENOMEM;
+ }
+ }
+
lat_buf->private_data = kzalloc(private_size, GFP_KERNEL);
if (!lat_buf->private_data) {
err = -ENOMEM;
@@ -347,6 +356,7 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
lat_buf->ctx = ctx;
lat_buf->core_decode = core_decode;
+ lat_buf->is_last_frame = false;
err = vdec_msg_queue_qbuf(&msg_queue->lat_ctx, lat_buf);
if (err) {
mtk_v4l2_err("failed to qbuf buf[%d]", i);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
index a5d44bc97c16..2a745e902ad1 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
@@ -22,6 +22,18 @@ struct mtk_vcodec_dev;
typedef int (*core_decode_cb_t)(struct vdec_lat_buf *lat_buf);
/**
+ * enum core_ctx_status - Context decode status for core hardwre.
+ * @CONTEXT_LIST_EMPTY: No buffer queued on core hardware(must always be 0)
+ * @CONTEXT_LIST_QUEUED: Buffer queued to core work list
+ * @CONTEXT_LIST_DEC_DONE: context decode done
+ */
+enum core_ctx_status {
+ CONTEXT_LIST_EMPTY = 0,
+ CONTEXT_LIST_QUEUED,
+ CONTEXT_LIST_DEC_DONE,
+};
+
+/**
* struct vdec_msg_queue_ctx - represents a queue for buffers ready to be processed
* @ready_to_use: ready used queue used to signalize when get a job queue
* @ready_queue: list of ready lat buffer queues
@@ -42,6 +54,8 @@ struct vdec_msg_queue_ctx {
* struct vdec_lat_buf - lat buffer message used to store lat info for core decode
* @wdma_err_addr: wdma error address used for lat hardware
* @slice_bc_addr: slice bc address used for lat hardware
+ * @rd_mv_addr: mv addr for av1 lat hardware output, core hardware input
+ * @tile_addr: tile buffer for av1 core input
* @ts_info: need to set timestamp from output to capture
* @src_buf_req: output buffer media request object
*
@@ -50,10 +64,14 @@ struct vdec_msg_queue_ctx {
* @core_decode: different codec use different decode callback function
* @lat_list: add lat buffer to lat head list
* @core_list: add lat buffer to core head list
+ *
+ * @is_last_frame: meaning this buffer is the last frame
*/
struct vdec_lat_buf {
struct mtk_vcodec_mem wdma_err_addr;
struct mtk_vcodec_mem slice_bc_addr;
+ struct mtk_vcodec_mem rd_mv_addr;
+ struct mtk_vcodec_mem tile_addr;
struct vb2_v4l2_buffer ts_info;
struct media_request *src_buf_req;
@@ -62,6 +80,8 @@ struct vdec_lat_buf {
core_decode_cb_t core_decode;
struct list_head lat_list;
struct list_head core_list;
+
+ bool is_last_frame;
};
/**
@@ -72,12 +92,14 @@ struct vdec_lat_buf {
* @wdma_wptr_addr: ube write point
* @core_work: core hardware work
* @lat_ctx: used to store lat buffer list
- * @ctx: point to mtk_vcodec_ctx
+ * @core_ctx: used to store core buffer list
*
* @lat_list_cnt: used to record each instance lat list count
* @core_list_cnt: used to record each instance core list count
+ * @flush_done: core flush done status
+ * @empty_lat_buf: the last lat buf used to flush decode
* @core_dec_done: core work queue decode done event
- * @core_work_cnt: the number of core work in work queue
+ * @status: current context decode status for core hardware
*/
struct vdec_msg_queue {
struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
@@ -88,12 +110,14 @@ struct vdec_msg_queue {
struct work_struct core_work;
struct vdec_msg_queue_ctx lat_ctx;
- struct mtk_vcodec_ctx *ctx;
+ struct vdec_msg_queue_ctx core_ctx;
atomic_t lat_list_cnt;
atomic_t core_list_cnt;
+ bool flush_done;
+ struct vdec_lat_buf empty_lat_buf;
wait_queue_head_t core_dec_done;
- int core_work_cnt;
+ int status;
};
/**
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
index 5e2bc286f168..4c8f5296d120 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
@@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
int vpu_load_firmware(struct platform_device *pdev)
{
struct mtk_vpu *vpu;
- struct device *dev = &pdev->dev;
+ struct device *dev;
struct vpu_run *run;
int ret;
if (!pdev) {
- dev_err(dev, "VPU platform device is invalid\n");
+ pr_err("VPU platform device is invalid\n");
return -EINVAL;
}
+ dev = &pdev->dev;
+
vpu = platform_get_drvdata(pdev);
run = &vpu->run;
@@ -1016,6 +1018,7 @@ static int mtk_vpu_resume(struct device *dev)
clk_prepare(vpu->clk);
ret = vpu_clock_enable(vpu);
if (ret) {
+ clk_unprepare(vpu->clk);
dev_err(dev, "failed to enable vpu clock\n");
return ret;
}
diff --git a/drivers/media/platform/nxp/imx7-media-csi.c b/drivers/media/platform/nxp/imx7-media-csi.c
index b701e823436a..0bd2613b9320 100644
--- a/drivers/media/platform/nxp/imx7-media-csi.c
+++ b/drivers/media/platform/nxp/imx7-media-csi.c
@@ -1014,39 +1014,6 @@ static int imx7_csi_enum_mbus_formats(u32 *code, u32 index)
return -EINVAL;
}
-static int imx7_csi_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- const struct v4l2_mbus_framefmt *mbus,
- const struct imx7_csi_pixfmt *cc)
-{
- u32 width;
- u32 stride;
-
- if (!cc) {
- cc = imx7_csi_find_mbus_format(mbus->code);
- if (!cc)
- return -EINVAL;
- }
-
- /* Round up width for minimum burst size */
- width = round_up(mbus->width, 8);
-
- /* Round up stride for IDMAC line start address alignment */
- stride = round_up((width * cc->bpp) >> 3, 8);
-
- pix->width = width;
- pix->height = mbus->height;
- pix->pixelformat = cc->fourcc;
- pix->colorspace = mbus->colorspace;
- pix->xfer_func = mbus->xfer_func;
- pix->ycbcr_enc = mbus->ycbcr_enc;
- pix->quantization = mbus->quantization;
- pix->field = mbus->field;
- pix->bytesperline = stride;
- pix->sizeimage = stride * pix->height;
-
- return 0;
-}
-
/* -----------------------------------------------------------------------------
* Video Capture Device - IOCTLs
*/
@@ -1145,8 +1112,13 @@ static const struct imx7_csi_pixfmt *
__imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt,
struct v4l2_rect *compose)
{
- struct v4l2_mbus_framefmt fmt_src;
const struct imx7_csi_pixfmt *cc;
+ u32 walign;
+
+ if (compose) {
+ compose->width = pixfmt->width;
+ compose->height = pixfmt->height;
+ }
/*
* Find the pixel format, default to the first supported format if not
@@ -1158,27 +1130,19 @@ __imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt,
cc = imx7_csi_find_pixel_format(pixfmt->pixelformat);
}
- /* Allow IDMAC interweave but enforce field order from source. */
- if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
- switch (pixfmt->field) {
- case V4L2_FIELD_SEQ_TB:
- pixfmt->field = V4L2_FIELD_INTERLACED_TB;
- break;
- case V4L2_FIELD_SEQ_BT:
- pixfmt->field = V4L2_FIELD_INTERLACED_BT;
- break;
- default:
- break;
- }
- }
+ /*
+ * The width alignment is 8 bytes as indicated by the
+ * CSI_IMAG_PARA.IMAGE_WIDTH documentation. Convert it to pixels.
+ *
+ * TODO: Implement configurable stride support.
+ */
+ walign = 8 * 8 / cc->bpp;
+ v4l_bound_align_image(&pixfmt->width, 1, 0xffff, walign,
+ &pixfmt->height, 1, 0xffff, 1, 0);
- v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
- imx7_csi_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
-
- if (compose) {
- compose->width = fmt_src.width;
- compose->height = fmt_src.height;
- }
+ pixfmt->bytesperline = pixfmt->width * cc->bpp / 8;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+ pixfmt->field = V4L2_FIELD_NONE;
return cc;
}
@@ -1606,22 +1570,14 @@ static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi)
return buf;
}
-static int imx7_csi_video_init_format(struct imx7_csi *csi)
+static void imx7_csi_video_init_format(struct imx7_csi *csi)
{
- struct v4l2_mbus_framefmt format = { };
-
- format.code = IMX7_CSI_DEF_MBUS_CODE;
- format.width = IMX7_CSI_DEF_PIX_WIDTH;
- format.height = IMX7_CSI_DEF_PIX_HEIGHT;
- format.field = V4L2_FIELD_NONE;
-
- imx7_csi_mbus_fmt_to_pix_fmt(&csi->vdev_fmt, &format, NULL);
- csi->vdev_compose.width = format.width;
- csi->vdev_compose.height = format.height;
+ struct v4l2_pix_format *pixfmt = &csi->vdev_fmt;
- csi->vdev_cc = imx7_csi_find_pixel_format(csi->vdev_fmt.pixelformat);
+ pixfmt->width = IMX7_CSI_DEF_PIX_WIDTH;
+ pixfmt->height = IMX7_CSI_DEF_PIX_HEIGHT;
- return 0;
+ csi->vdev_cc = __imx7_csi_video_try_fmt(pixfmt, &csi->vdev_compose);
}
static int imx7_csi_video_register(struct imx7_csi *csi)
@@ -1634,9 +1590,7 @@ static int imx7_csi_video_register(struct imx7_csi *csi)
vdev->v4l2_dev = v4l2_dev;
/* Initialize the default format and compose rectangle. */
- ret = imx7_csi_video_init_format(csi);
- if (ret < 0)
- return ret;
+ imx7_csi_video_init_format(csi);
/* Register the video device. */
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index b5ffde46f31b..f7447b2f4d77 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -223,7 +223,7 @@ static int mxc_isi_crossbar_init_cfg(struct v4l2_subdev *sd,
route->sink_pad = i;
route->source_pad = i + xbar->num_sinks;
route->flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
- };
+ }
routing.num_routes = xbar->num_sources;
routing.routes = routes;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index e0832f3f4f25..06c95568e5af 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -1541,7 +1541,11 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
}
video_out->ops = &vfe->video_ops;
- video_out->bpl_alignment = 8;
+ if (vfe->camss->version == CAMSS_845 ||
+ vfe->camss->version == CAMSS_8250)
+ video_out->bpl_alignment = 16;
+ else
+ video_out->bpl_alignment = 8;
video_out->line_based = 0;
if (i == VFE_LINE_PIX) {
video_out->bpl_alignment = 16;
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 4f81669986ba..320bde0f83cb 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -83,6 +83,23 @@ struct venus_resources {
const char *fwname;
};
+enum venus_fmt {
+ VENUS_FMT_NV12 = 0,
+ VENUS_FMT_QC08C = 1,
+ VENUS_FMT_QC10C = 2,
+ VENUS_FMT_P010 = 3,
+ VENUS_FMT_H264 = 4,
+ VENUS_FMT_VP8 = 5,
+ VENUS_FMT_VP9 = 6,
+ VENUS_FMT_HEVC = 7,
+ VENUS_FMT_VC1_ANNEX_G = 8,
+ VENUS_FMT_VC1_ANNEX_L = 9,
+ VENUS_FMT_MPEG4 = 10,
+ VENUS_FMT_MPEG2 = 11,
+ VENUS_FMT_H263 = 12,
+ VENUS_FMT_XVID = 13,
+};
+
struct venus_format {
u32 pixfmt;
unsigned int num_planes;
@@ -201,6 +218,11 @@ struct venus_core {
unsigned int core0_usage_count;
unsigned int core1_usage_count;
struct dentry *root;
+ struct venus_img_version {
+ u32 major;
+ u32 minor;
+ u32 rev;
+ } venus_ver;
};
struct vdec_controls {
@@ -389,6 +411,7 @@ enum venus_inst_modes {
* @sequence_out: a sequence counter for output queue
* @m2m_dev: a reference to m2m device structure
* @m2m_ctx: a reference to m2m context structure
+ * @ctx_q_lock: a lock to serialize video device ioctl calls
* @state: current state of the instance
* @done: a completion for sync HFI operation
* @error: an error returned during last HFI sync operation
@@ -460,6 +483,7 @@ struct venus_inst {
u32 sequence_out;
struct v4l2_m2m_dev *m2m_dev;
struct v4l2_m2m_ctx *m2m_ctx;
+ struct mutex ctx_q_lock;
unsigned int state;
struct completion done;
unsigned int error;
@@ -508,4 +532,19 @@ venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain)
return NULL;
}
+static inline bool
+is_fw_rev_or_newer(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev)
+{
+ return ((core)->venus_ver.major == vmajor &&
+ (core)->venus_ver.minor == vminor &&
+ (core)->venus_ver.rev >= vrev);
+}
+
+static inline bool
+is_fw_rev_or_older(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev)
+{
+ return ((core)->venus_ver.major == vmajor &&
+ (core)->venus_ver.minor == vminor &&
+ (core)->venus_ver.rev <= vrev);
+}
#endif
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index a2ceab7f9ddb..1822e85ab6bf 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -502,7 +502,6 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
struct vb2_buffer *vb = &vbuf->vb2_buf;
unsigned int type = vb->type;
struct hfi_frame_data fdata;
- int ret;
memset(&fdata, 0, sizeof(fdata));
fdata.alloc_len = buf->size;
@@ -533,11 +532,7 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
fdata.offset = 0;
}
- ret = hfi_session_process_buf(inst, &fdata);
- if (ret)
- return ret;
-
- return 0;
+ return hfi_session_process_buf(inst, &fdata);
}
static bool is_dynamic_bufmode(struct venus_inst *inst)
@@ -612,6 +607,8 @@ static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
return HFI_COLOR_FORMAT_NV12_UBWC;
case V4L2_PIX_FMT_QC10C:
return HFI_COLOR_FORMAT_YUV420_TP10_UBWC;
+ case V4L2_PIX_FMT_P010:
+ return HFI_COLOR_FORMAT_P010;
default:
break;
}
@@ -639,12 +636,16 @@ static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
if (is_dec) {
params.width = inst->width;
params.height = inst->height;
+ params.out_width = inst->out_width;
+ params.out_height = inst->out_height;
params.codec = inst->fmt_out->pixfmt;
params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_cap->pixfmt);
params.dec.max_mbs_per_frame = mbs_per_frame_max(inst);
params.dec.buffer_size_limit = 0;
params.dec.is_secondary_output =
inst->opb_buftype == HFI_BUFFER_OUTPUT2;
+ if (params.dec.is_secondary_output)
+ params.hfi_dpb_color_fmt = inst->dpb_fmt;
params.dec.is_interlaced =
inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE;
} else {
@@ -1036,8 +1037,8 @@ static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height)
u32 extradata = SZ_16K;
u32 size;
- y_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
- uv_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
+ y_stride = ALIGN(width * 4 / 3, 256);
+ uv_stride = ALIGN(width * 4 / 3, 256);
y_sclines = ALIGN(height, 16);
uv_sclines = ALIGN((height + 1) >> 1, 16);
@@ -1764,6 +1765,22 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
if (!caps)
return -EINVAL;
+ if (inst->bit_depth == VIDC_BITDEPTH_10 && inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
+ HFI_COLOR_FORMAT_YUV420_TP10_UBWC);
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+ if (found_ubwc && found) {
+ /*
+ * Hard-code DPB buffers to be 10bit UBWC
+ * until V4L2 is able to expose compressed/tiled
+ * formats to applications.
+ */
+ *out_fmt = HFI_COLOR_FORMAT_YUV420_TP10_UBWC;
+ *out2_fmt = fmt;
+ return 0;
+ }
+ }
+
if (ubwc) {
ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index bc3f8ff05840..7f0802a5518c 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -83,7 +83,7 @@ int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
res->size = size;
res->mem = addr;
pkt->resource_type = HFI_RESOURCE_OCMEM;
- pkt->hdr.size += sizeof(*res) - sizeof(u32);
+ pkt->hdr.size += sizeof(*res);
break;
}
case VIDC_RESOURCE_NONE:
@@ -200,8 +200,8 @@ int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
struct hfi_buffer_info *bi;
pkt->extradata_size = bd->extradata_size;
- pkt->shdr.hdr.size = sizeof(*pkt) - sizeof(u32) +
- (bd->num_buffers * sizeof(*bi));
+ pkt->shdr.hdr.size = sizeof(*pkt) +
+ bd->num_buffers * sizeof(*bi);
bi = (struct hfi_buffer_info *)pkt->buffer_info;
for (i = 0; i < pkt->num_buffers; i++) {
bi->buffer_addr = bd->device_addr;
@@ -209,8 +209,8 @@ int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
}
} else {
pkt->extradata_size = 0;
- pkt->shdr.hdr.size = sizeof(*pkt) +
- ((bd->num_buffers - 1) * sizeof(u32));
+ pkt->shdr.hdr.size = struct_size(pkt, buffer_info,
+ bd->num_buffers);
for (i = 0; i < pkt->num_buffers; i++)
pkt->buffer_info[i] = bd->device_addr;
}
@@ -243,16 +243,16 @@ int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
bi->extradata_addr = bd->extradata_addr;
}
pkt->shdr.hdr.size =
- sizeof(struct hfi_session_set_buffers_pkt) -
- sizeof(u32) + (bd->num_buffers * sizeof(*bi));
+ sizeof(struct hfi_session_set_buffers_pkt) +
+ bd->num_buffers * sizeof(*bi);
} else {
for (i = 0; i < pkt->num_buffers; i++)
pkt->buffer_info[i] = bd->device_addr;
pkt->extradata_size = 0;
pkt->shdr.hdr.size =
- sizeof(struct hfi_session_set_buffers_pkt) +
- ((bd->num_buffers - 1) * sizeof(u32));
+ struct_size((struct hfi_session_set_buffers_pkt *)0,
+ buffer_info, bd->num_buffers);
}
pkt->response_req = bd->response_required;
@@ -521,6 +521,7 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
+ case HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
struct hfi_enable *in = pdata;
struct hfi_enable *en = prop_data;
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index 99bc0b6db67c..dd9c5066442d 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -56,7 +56,7 @@ struct hfi_sys_set_resource_pkt {
struct hfi_pkt_hdr hdr;
u32 resource_handle;
u32 resource_type;
- u32 resource_data[1];
+ u32 resource_data[];
};
struct hfi_sys_release_resource_pkt {
@@ -117,7 +117,7 @@ struct hfi_session_set_buffers_pkt {
u32 extradata_size;
u32 min_buffer_size;
u32 num_buffers;
- u32 buffer_info[1];
+ u32 buffer_info[];
};
struct hfi_session_get_sequence_header_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 105792a68060..0abbc50c5864 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -469,6 +469,8 @@
#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
+#define HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT \
+ 0x100300b
/*
* HFI_PROPERTY_CONFIG_VDEC_COMMON_START
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index df96db3761a7..3d5dadfa1900 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -233,7 +233,7 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
goto done;
}
- rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
+ rem_bytes = pkt->hdr.size - sizeof(*pkt);
if (rem_bytes <= 0) {
/* missing property data */
error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
@@ -248,13 +248,15 @@ done:
}
static void
-sys_get_prop_image_version(struct device *dev,
+sys_get_prop_image_version(struct venus_core *core,
struct hfi_msg_sys_property_info_pkt *pkt)
{
+ struct device *dev = core->dev;
u8 *smem_tbl_ptr;
u8 *img_ver;
int req_bytes;
size_t smem_blk_sz;
+ int ret;
req_bytes = pkt->hdr.size - sizeof(*pkt);
@@ -263,8 +265,30 @@ sys_get_prop_image_version(struct device *dev,
return;
img_ver = pkt->data;
+ if (!img_ver)
+ return;
+
+ ret = sscanf(img_ver, "14:video-firmware.%u.%u-%u",
+ &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
+ if (ret)
+ goto done;
+
+ ret = sscanf(img_ver, "14:VIDEO.VPU.%u.%u-%u",
+ &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
+ if (ret)
+ goto done;
+
+ ret = sscanf(img_ver, "14:VIDEO.VE.%u.%u-%u",
+ &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
+ if (ret)
+ goto done;
- dev_dbg(dev, VDBGL "F/W version: %s\n", img_ver);
+ dev_err(dev, VDBGL "error reading F/W version\n");
+ return;
+
+done:
+ dev_dbg(dev, VDBGL "F/W version: %s, major %u, minor %u, revision %u\n",
+ img_ver, core->venus_ver.major, core->venus_ver.minor, core->venus_ver.rev);
smem_tbl_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
SMEM_IMG_VER_TBL, &smem_blk_sz);
@@ -286,7 +310,7 @@ static void hfi_sys_property_info(struct venus_core *core,
switch (pkt->property) {
case HFI_PROPERTY_SYS_IMAGE_VERSION:
- sys_get_prop_image_version(dev, pkt);
+ sys_get_prop_image_version(core, pkt);
break;
default:
dev_dbg(dev, VDBGL "unknown property data\n");
@@ -434,7 +458,7 @@ static void hfi_session_init_done(struct venus_core *core,
if (!IS_V1(core))
goto done;
- rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
if (rem_bytes <= 0) {
error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
goto done;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h
index 510513697335..8c2e17b0d36f 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.h
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.h
@@ -50,7 +50,7 @@ struct hfi_msg_event_notify_pkt {
u32 event_id;
u32 event_data1;
u32 event_data2;
- u32 ext_event_data[1];
+ u32 ext_event_data[];
};
struct hfi_msg_event_release_buffer_ref_pkt {
@@ -63,7 +63,7 @@ struct hfi_msg_sys_init_done_pkt {
struct hfi_pkt_hdr hdr;
u32 error_type;
u32 num_properties;
- u32 data[1];
+ u32 data[];
};
struct hfi_msg_sys_pc_prep_done_pkt {
@@ -81,7 +81,7 @@ struct hfi_msg_session_init_done_pkt {
struct hfi_session_hdr_pkt shdr;
u32 error_type;
u32 num_properties;
- u32 data[1];
+ u32 data[];
};
struct hfi_msg_session_end_done_pkt {
@@ -228,7 +228,7 @@ struct hfi_msg_session_parse_sequence_header_done_pkt {
struct hfi_session_hdr_pkt shdr;
u32 error_type;
u32 num_properties;
- u32 data[1];
+ u32 data[];
};
struct hfi_msg_session_property_info_pkt {
@@ -247,7 +247,7 @@ struct hfi_msg_session_release_buffers_done_pkt {
struct hfi_session_hdr_pkt shdr;
u32 error_type;
u32 num_buffers;
- u32 buffer_info[1];
+ u32 buffer_info[];
};
struct hfi_msg_sys_debug_pkt {
@@ -256,7 +256,7 @@ struct hfi_msg_sys_debug_pkt {
u32 msg_size;
u32 time_stamp_hi;
u32 time_stamp_lo;
- u8 msg_data[1];
+ u8 msg_data[];
};
struct hfi_msg_sys_coverage_pkt {
@@ -264,7 +264,7 @@ struct hfi_msg_sys_coverage_pkt {
u32 msg_size;
u32 time_stamp_hi;
u32 time_stamp_lo;
- u8 msg_data[1];
+ u8 msg_data[];
};
struct venus_core;
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs.h b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h
index 52a51a3b964a..25e607452096 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs.h
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h
@@ -12,8 +12,11 @@
struct hfi_plat_buffers_params {
u32 width;
u32 height;
+ u32 out_width;
+ u32 out_height;
u32 codec;
u32 hfi_color_fmt;
+ u32 hfi_dpb_color_fmt;
enum hfi_version version;
u32 num_vpp_pipes;
union {
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index a9be31ec6927..e97ff8cf6d64 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1185,6 +1185,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
enum hfi_version version = params->version;
u32 codec = params->codec;
u32 width = params->width, height = params->height, out_min_count;
+ u32 out_width = params->out_width, out_height = params->out_height;
struct dec_bufsize_ops *dec_ops;
bool is_secondary_output = params->dec.is_secondary_output;
bool is_interlaced = params->dec.is_interlaced;
@@ -1230,12 +1231,16 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
calculate_dec_input_frame_size(width, height, codec,
max_mbs_per_frame,
buffer_size_limit);
- } else if (buftype == HFI_BUFFER_OUTPUT ||
- buftype == HFI_BUFFER_OUTPUT2) {
+ } else if (buftype == HFI_BUFFER_OUTPUT || buftype == HFI_BUFFER_OUTPUT2) {
bufreq->count_min = out_min_count;
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_color_fmt,
- width, height);
+ out_width, out_height);
+ if (buftype == HFI_BUFFER_OUTPUT &&
+ params->dec.is_secondary_output)
+ bufreq->size =
+ venus_helper_get_framesz_raw(params->hfi_dpb_color_fmt,
+ out_width, out_height);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = dec_ops->scratch(width, height, is_interlaced);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 2ad40b3945b0..f0b46389e8d5 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -835,34 +835,24 @@ static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
- int ret;
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
- ret = venus_iface_cmdq_write(hdev, pkt, false);
- if (ret)
- return ret;
-
- return 0;
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
- int ret;
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_coverage_config(pkt, mode);
- ret = venus_iface_cmdq_write(hdev, pkt, false);
- if (ret)
- return ret;
-
- return 0;
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
@@ -870,7 +860,6 @@ static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
- int ret;
if (!enable)
return 0;
@@ -879,11 +868,7 @@ static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
pkt_sys_idle_indicator(pkt, enable);
- ret = venus_iface_cmdq_write(hdev, pkt, false);
- if (ret)
- return ret;
-
- return 0;
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
@@ -891,17 +876,12 @@ static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
- int ret;
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_power_control(pkt, enable);
- ret = venus_iface_cmdq_write(hdev, pkt, false);
- if (ret)
- return ret;
-
- return 0;
+ return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 51a53bf82bd3..f5676440dd36 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -30,69 +30,87 @@
* - future firmware versions could add support for >1 planes
*/
static const struct venus_format vdec_formats[] = {
- {
+ [VENUS_FMT_NV12] = {
.pixfmt = V4L2_PIX_FMT_NV12,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
+ },
+ [VENUS_FMT_QC08C] = {
.pixfmt = V4L2_PIX_FMT_QC08C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
+ },
+ [VENUS_FMT_QC10C] = {
.pixfmt = V4L2_PIX_FMT_QC10C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_MPEG4,
+ },
+ [VENUS_FMT_P010] = {
+ .pixfmt = V4L2_PIX_FMT_P010,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ [VENUS_FMT_H264] = {
+ .pixfmt = V4L2_PIX_FMT_H264,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_MPEG2,
+ },
+ [VENUS_FMT_VP8] = {
+ .pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_H263,
+ },
+ [VENUS_FMT_VP9] = {
+ .pixfmt = V4L2_PIX_FMT_VP9,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
+ },
+ [VENUS_FMT_HEVC] = {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
+ },
+ [VENUS_FMT_VC1_ANNEX_G] = {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_H264,
+ },
+ [VENUS_FMT_VC1_ANNEX_L] = {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_VP8,
+ },
+ [VENUS_FMT_MPEG4] = {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_VP9,
+ },
+ [VENUS_FMT_MPEG2] = {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_XVID,
+ },
+ [VENUS_FMT_H263] = {
+ .pixfmt = V4L2_PIX_FMT_H263,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- }, {
- .pixfmt = V4L2_PIX_FMT_HEVC,
+ },
+ [VENUS_FMT_XVID] = {
+ .pixfmt = V4L2_PIX_FMT_XVID,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
+
};
static const struct venus_format *
@@ -551,7 +569,7 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.flags |= HFI_BUFFERFLAG_EOS;
- if (IS_V6(inst->core))
+ if (IS_V6(inst->core) && is_fw_rev_or_older(inst->core, 1, 0, 87))
fdata.device_addr = 0;
else
fdata.device_addr = 0xdeadb000;
@@ -684,6 +702,14 @@ static int vdec_set_properties(struct venus_inst *inst)
return ret;
}
+ /* Enabling sufficient sequence change support for VP9 */
+ if (is_fw_rev_or_newer(inst->core, 5, 4, 51)) {
+ ptype = HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
ptype = HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
conceal = ctr->conceal_color & 0xffff;
conceal |= ((ctr->conceal_color >> 16) & 0xffff) << 10;
@@ -710,6 +736,9 @@ static int vdec_set_work_route(struct venus_inst *inst)
}
#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
+#define is_10bit_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_10_BIT_BASE & \
+ HFI_COLOR_FORMAT_UBWC_BASE))
+
static int vdec_output_conf(struct venus_inst *inst)
{
@@ -757,7 +786,7 @@ static int vdec_output_conf(struct venus_inst *inst)
inst->opb_fmt = out2_fmt;
inst->dpb_buftype = HFI_BUFFER_OUTPUT;
inst->dpb_fmt = out_fmt;
- } else if (is_ubwc_fmt(out2_fmt)) {
+ } else if (is_ubwc_fmt(out2_fmt) || is_10bit_ubwc_fmt(out_fmt)) {
inst->opb_buftype = HFI_BUFFER_OUTPUT;
inst->opb_fmt = out_fmt;
inst->dpb_buftype = HFI_BUFFER_OUTPUT2;
@@ -1474,8 +1503,13 @@ static void vdec_event_change(struct venus_inst *inst,
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
- if (inst->bit_depth != ev_data->bit_depth)
+ if (inst->bit_depth != ev_data->bit_depth) {
inst->bit_depth = ev_data->bit_depth;
+ if (inst->bit_depth == VIDC_BITDEPTH_10)
+ inst->fmt_cap = &vdec_formats[VENUS_FMT_P010];
+ else
+ inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12];
+ }
if (inst->pic_struct != ev_data->pic_struct)
inst->pic_struct = ev_data->pic_struct;
@@ -1567,8 +1601,8 @@ static const struct hfi_inst_ops vdec_hfi_ops = {
static void vdec_inst_init(struct venus_inst *inst)
{
inst->hfi_codec = HFI_VIDEO_CODEC_H264;
- inst->fmt_out = &vdec_formats[8];
- inst->fmt_cap = &vdec_formats[0];
+ inst->fmt_out = &vdec_formats[VENUS_FMT_H264];
+ inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12];
inst->width = frame_width_min(inst);
inst->height = ALIGN(frame_height_min(inst), 32);
inst->crop.left = 0;
@@ -1609,6 +1643,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->allow_zero_bytesused = 1;
src_vq->min_buffers_needed = 0;
src_vq->dev = inst->core->dev;
+ src_vq->lock = &inst->ctx_q_lock;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
@@ -1623,6 +1658,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 0;
dst_vq->dev = inst->core->dev;
+ dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
}
@@ -1641,6 +1677,7 @@ static int vdec_open(struct file *file)
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
mutex_init(&inst->lock);
+ mutex_init(&inst->ctx_q_lock);
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_DEC;
@@ -1716,6 +1753,7 @@ static int vdec_close(struct file *file)
ida_destroy(&inst->dpb_ids);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
+ mutex_destroy(&inst->ctx_q_lock);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 4666f42feea3..6d773b000e8a 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -32,28 +32,33 @@
* - future firmware versions could add support for >1 planes
*/
static const struct venus_format venc_formats[] = {
- {
+ [VENUS_FMT_NV12] = {
.pixfmt = V4L2_PIX_FMT_NV12,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_MPEG4,
+ },
+ [VENUS_FMT_H264] = {
+ .pixfmt = V4L2_PIX_FMT_H264,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_H263,
+ },
+ [VENUS_FMT_VP8] = {
+ .pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_H264,
+ },
+ [VENUS_FMT_HEVC] = {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_VP8,
+ },
+ [VENUS_FMT_MPEG4] = {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- }, {
- .pixfmt = V4L2_PIX_FMT_HEVC,
+ },
+ [VENUS_FMT_H263] = {
+ .pixfmt = V4L2_PIX_FMT_H263,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
@@ -1395,6 +1400,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->allow_zero_bytesused = 1;
src_vq->min_buffers_needed = 1;
src_vq->dev = inst->core->dev;
+ src_vq->lock = &inst->ctx_q_lock;
if (inst->core->res->hfi_version == HFI_VERSION_1XX)
src_vq->bidirectional = 1;
ret = vb2_queue_init(src_vq);
@@ -1411,13 +1417,14 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 1;
dst_vq->dev = inst->core->dev;
+ dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
}
static void venc_inst_init(struct venus_inst *inst)
{
- inst->fmt_cap = &venc_formats[3];
- inst->fmt_out = &venc_formats[0];
+ inst->fmt_cap = &venc_formats[VENUS_FMT_H264];
+ inst->fmt_out = &venc_formats[VENUS_FMT_NV12];
inst->width = 1280;
inst->height = ALIGN(720, 32);
inst->out_width = 1280;
@@ -1443,6 +1450,7 @@ static int venc_open(struct file *file)
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
mutex_init(&inst->lock);
+ mutex_init(&inst->ctx_q_lock);
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_ENC;
@@ -1512,6 +1520,7 @@ static int venc_close(struct file *file)
venc_ctrl_deinit(inst);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
+ mutex_destroy(&inst->ctx_q_lock);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index f666b621338d..fee1a066f56b 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -430,6 +430,7 @@ static int risp_probe_resources(struct rcar_isp *isp,
static const struct of_device_id risp_of_id_table[] = {
{ .compatible = "renesas,r8a779a0-isp" },
+ { .compatible = "renesas,r8a779g0-isp" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, risp_of_id_table);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index ff4bde9cc0e3..3c4f5eb93be1 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -1284,6 +1284,15 @@ static const struct rvin_info rcar_info_r8a779a0 = {
.max_height = 4096,
};
+static const struct rvin_info rcar_info_r8a779g0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
static const struct of_device_id rvin_of_id_table[] = {
{
.compatible = "renesas,vin-r8a774a1",
@@ -1349,6 +1358,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.compatible = "renesas,vin-r8a779a0",
.data = &rcar_info_r8a779a0,
},
+ {
+ .compatible = "renesas,vin-r8a779g0",
+ .data = &rcar_info_r8a779g0,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index e34060c2b039..7a134c0eff57 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -483,11 +483,15 @@ enum rcar_csi2_pads {
struct rcar_csi2_info {
int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
int (*phy_post_init)(struct rcar_csi2 *priv);
+ int (*start_receiver)(struct rcar_csi2 *priv);
+ void (*enter_standby)(struct rcar_csi2 *priv);
const struct rcsi2_mbps_reg *hsfreqrange;
unsigned int csi0clkfreqrange;
unsigned int num_channels;
bool clear_ulps;
bool use_isp;
+ bool support_dphy;
+ bool support_cphy;
};
struct rcar_csi2 {
@@ -509,6 +513,7 @@ struct rcar_csi2 {
struct v4l2_mbus_framefmt mf;
int stream_count;
+ bool cphy;
unsigned short lanes;
unsigned char lane_swap[4];
};
@@ -533,10 +538,17 @@ static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
iowrite32(data, priv->base + reg);
}
-static void rcsi2_enter_standby(struct rcar_csi2 *priv)
+static void rcsi2_enter_standby_gen3(struct rcar_csi2 *priv)
{
rcsi2_write(priv, PHYCNT_REG, 0);
rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR);
+}
+
+static void rcsi2_enter_standby(struct rcar_csi2 *priv)
+{
+ if (priv->info->enter_standby)
+ priv->info->enter_standby(priv);
+
reset_control_assert(priv->rstc);
usleep_range(100, 150);
pm_runtime_put(priv->dev);
@@ -656,7 +668,16 @@ static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
return ret;
}
- if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
+ switch (mbus_config.type) {
+ case V4L2_MBUS_CSI2_CPHY:
+ if (!priv->cphy)
+ return -EINVAL;
+ break;
+ case V4L2_MBUS_CSI2_DPHY:
+ if (priv->cphy)
+ return -EINVAL;
+ break;
+ default:
dev_err(priv->dev, "Unsupported media bus type %u\n",
mbus_config.type);
return -EINVAL;
@@ -674,7 +695,7 @@ static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
return 0;
}
-static int rcsi2_start_receiver(struct rcar_csi2 *priv)
+static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
@@ -821,7 +842,7 @@ static int rcsi2_start(struct rcar_csi2 *priv)
if (ret < 0)
return ret;
- ret = rcsi2_start_receiver(priv);
+ ret = priv->info->start_receiver(priv);
if (ret) {
rcsi2_enter_standby(priv);
return ret;
@@ -1016,15 +1037,41 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
if (vep->base.port || vep->base.id)
return -ENOTCONN;
- if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
- dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type);
- return -EINVAL;
- }
-
priv->lanes = vep->bus.mipi_csi2.num_data_lanes;
- if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) {
- dev_err(priv->dev, "Unsupported number of data-lanes: %u\n",
- priv->lanes);
+
+ switch (vep->bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ if (!priv->info->support_dphy) {
+ dev_err(priv->dev, "D-PHY not supported\n");
+ return -EINVAL;
+ }
+
+ if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) {
+ dev_err(priv->dev,
+ "Unsupported number of data-lanes for D-PHY: %u\n",
+ priv->lanes);
+ return -EINVAL;
+ }
+
+ priv->cphy = false;
+ break;
+ case V4L2_MBUS_CSI2_CPHY:
+ if (!priv->info->support_cphy) {
+ dev_err(priv->dev, "C-PHY not supported\n");
+ return -EINVAL;
+ }
+
+ if (priv->lanes != 3) {
+ dev_err(priv->dev,
+ "Unsupported number of data-lanes for C-PHY: %u\n",
+ priv->lanes);
+ return -EINVAL;
+ }
+
+ priv->cphy = true;
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type);
return -EINVAL;
}
@@ -1048,7 +1095,7 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
struct fwnode_handle *fwnode;
struct fwnode_handle *ep;
struct v4l2_fwnode_endpoint v4l2_ep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
+ .bus_type = V4L2_MBUS_UNKNOWN,
};
int ret;
@@ -1363,63 +1410,90 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
.num_channels = 4,
.clear_ulps = true,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a7795es2 = {
.init_phtw = rcsi2_init_phtw_h3es2,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
.num_channels = 4,
.clear_ulps = true,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_m3w,
.num_channels = 4,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77961 = {
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_m3w,
.num_channels = 4,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
.num_channels = 4,
.clear_ulps = true,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
.phy_post_init = rcsi2_phy_post_init_v3m_e3,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.num_channels = 4,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77980 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
.clear_ulps = true,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
.phy_post_init = rcsi2_phy_post_init_v3m_e3,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.num_channels = 2,
+ .support_dphy = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
.init_phtw = rcsi2_init_phtw_v3u,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
.hsfreqrange = hsfreqrange_v3u,
.csi0clkfreqrange = 0x20,
.clear_ulps = true,
.use_isp = true,
+ .support_dphy = true,
};
static const struct of_device_id rcar_csi2_of_table[] = {
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index f43e458590b8..ab39cd2201c8 100644
--- a/drivers/media/platform/renesas/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -254,6 +254,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
/* Internal Data (HW Version) */
#define FD1_IP_INTDATA 0x0800
+/* R-Car Gen2 HW manual says zero, but actual value matches R-Car H3 ES1.x */
+#define FD1_IP_GEN2 0x02010101
#define FD1_IP_M3W 0x02010202
#define FD1_IP_H3 0x02010203
#define FD1_IP_M3N 0x02010204
@@ -2360,6 +2362,9 @@ static int fdp1_probe(struct platform_device *pdev)
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
+ case FD1_IP_GEN2:
+ dprintk(fdp1, "FDP1 Version R-Car Gen2\n");
+ break;
case FD1_IP_M3W:
dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
break;
diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
index e7f604807825..2b8cb50f54de 100644
--- a/drivers/media/platform/renesas/rcar_jpu.c
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -28,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/videodev2.h>
+#include <media/jpeg.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -71,19 +72,6 @@
#define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
#define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
-/* JPEG markers */
-#define TEM 0x01
-#define SOF0 0xc0
-#define RST 0xd0
-#define SOI 0xd8
-#define EOI 0xd9
-#define DHP 0xde
-#define DHT 0xc4
-#define COM 0xfe
-#define DQT 0xdb
-#define DRI 0xdd
-#define APP0 0xe0
-
#define JPU_RESET_TIMEOUT 100 /* ms */
#define JPU_JOB_TIMEOUT 300 /* ms */
#define JPU_MAX_QUALITY 4
@@ -330,26 +318,32 @@ static const u8 zigzag[] = {
* Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
*/
#define JPU_JPEG_HDR_BLOB { \
- 0xff, SOI, 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
- [JPU_JPEG_QTBL_LUM_OFFSET ... \
+ 0xff, JPEG_MARKER_SOI, 0xff, JPEG_MARKER_DQT, 0x00, \
+ JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
+ [JPU_JPEG_QTBL_LUM_OFFSET ... \
JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
- 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
+ 0xff, JPEG_MARKER_DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
[JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \
- JPU_JPEG_QTBL_SIZE - 1] = 0x00, 0xff, SOF0, 0x00, 0x11, 0x08, \
+ JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_SOF0, 0x00, 0x11, 0x08, \
[JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \
[JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \
0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \
0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \
- 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_DC, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \
+ JPU_JPEG_LUM | JPU_JPEG_DC, \
[JPU_JPEG_HDCTBL_LUM_OFFSET ... \
JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
- 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_AC, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \
+ JPU_JPEG_LUM | JPU_JPEG_AC, \
[JPU_JPEG_HACTBL_LUM_OFFSET ... \
JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
- 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_DC, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \
+ JPU_JPEG_CHR | JPU_JPEG_DC, \
[JPU_JPEG_HDCTBL_CHR_OFFSET ... \
JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
- 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_AC, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \
+ JPU_JPEG_CHR | JPU_JPEG_AC, \
[JPU_JPEG_HACTBL_CHR_OFFSET ... \
JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
[JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \
@@ -613,7 +607,8 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
* basic size check and EOI - we don't want to let JPU cross
* buffer bounds in any case. Hope it's stopping by EOI.
*/
- if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != EOI)
+ if (size < JPU_JPEG_MIN_SIZE ||
+ *(u8 *)(buffer + size - 1) != JPEG_MARKER_EOI)
return 0;
for (;;) {
@@ -624,14 +619,14 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
c = get_byte(&jpeg_buffer);
while (c == 0xff || c == 0);
- if (!soi && c == SOI) {
+ if (!soi && c == JPEG_MARKER_SOI) {
soi = true;
continue;
- } else if (soi != (c != SOI))
+ } else if (soi != (c != JPEG_MARKER_SOI))
return 0;
switch (c) {
- case SOF0: /* SOF0: baseline JPEG */
+ case JPEG_MARKER_SOF0: /* SOF0: baseline JPEG */
skip(&jpeg_buffer, 3); /* segment length and bpp */
if (get_word_be(&jpeg_buffer, height) ||
get_word_be(&jpeg_buffer, width) ||
@@ -640,11 +635,11 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
skip(&jpeg_buffer, 1);
return get_byte(&jpeg_buffer);
- case DHT:
- case DQT:
- case COM:
- case DRI:
- case APP0 ... APP0 + 0x0f:
+ case JPEG_MARKER_DHT:
+ case JPEG_MARKER_DQT:
+ case JPEG_MARKER_COM:
+ case JPEG_MARKER_DRI:
+ case JPEG_MARKER_APP0 ... JPEG_MARKER_APP0 + 0x0f:
if (get_word_be(&jpeg_buffer, &word))
return 0;
skip(&jpeg_buffer, (long)word - 2);
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 56b9c59cfda8..5c9e27f8c94b 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -702,12 +702,6 @@ static int ceu_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Grab the first available buffer and trigger the first capture. */
buf = list_first_entry(&ceudev->capture, struct ceu_buffer,
queue);
- if (!buf) {
- spin_unlock_irqrestore(&ceudev->lock, irqflags);
- dev_dbg(ceudev->dev,
- "No buffer available for capture.\n");
- goto error_stop_sensor;
- }
list_del(&buf->queue);
ceudev->active = &buf->vb;
@@ -722,9 +716,6 @@ static int ceu_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
-error_stop_sensor:
- v4l2_subdev_call(v4l2_sd, video, s_stream, 0);
-
error_return_bufs:
spin_lock_irqsave(&ceudev->lock, irqflags);
list_for_each_entry(buf, &ceudev->capture, queue)
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index 30dad7383654..d6489c62b081 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -81,10 +81,10 @@
#define CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(x) (((x) & 0x3) << 4)
#define CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(x) (((x) & 0x3) << 8)
#define CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(x) (((x) & 0x3) << 12)
-#define CSIDPHYSKW0_DEFAULT_SKW CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(1) | \
- CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(1) | \
- CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \
- CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1)
+#define CSIDPHYSKW0_DEFAULT_SKW (CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1))
#define VSRSTS_RETRIES 20
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 67dcf22e5ba3..f1c532a5802a 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -76,10 +76,7 @@ static irqreturn_t rga_isr(int irq, void *prv)
WARN_ON(!src);
WARN_ON(!dst);
- dst->timecode = src->timecode;
- dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
- dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
@@ -726,10 +723,10 @@ static int rga_enable_clocks(struct rockchip_rga *rga)
return 0;
-err_disable_sclk:
- clk_disable_unprepare(rga->sclk);
err_disable_aclk:
clk_disable_unprepare(rga->aclk);
+err_disable_sclk:
+ clk_disable_unprepare(rga->sclk);
return ret;
}
diff --git a/drivers/media/platform/samsung/exynos4-is/Kconfig b/drivers/media/platform/samsung/exynos4-is/Kconfig
index da33faa7132e..7f9ba053dd8e 100644
--- a/drivers/media/platform/samsung/exynos4-is/Kconfig
+++ b/drivers/media/platform/samsung/exynos4-is/Kconfig
@@ -47,7 +47,7 @@ config VIDEO_S5P_MIPI_CSIS
config VIDEO_EXYNOS_FIMC_LITE
tristate "EXYNOS FIMC-LITE camera interface driver"
depends on I2C
- depends on SOC_EXYNOS4412 || SOC_EXYNOS5250 || COMPILE_TEST
+ depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250 || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select VIDEO_EXYNOS4_IS_COMMON
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index a2034ade8b9e..976b4f747ad4 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -1128,7 +1128,7 @@ static const struct fimc_drvdata fimc_drvdata_exynos4210 = {
.out_buf_count = 32,
};
-/* EXYNOS4412 */
+/* EXYNOS4212, EXYNOS4412 */
static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
.num_entities = 4,
.lclk_frequency = 166000000UL,
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index 24b3dda26714..c3146ae08447 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -1621,7 +1621,7 @@ static const struct dev_pm_ops fimc_lite_pm_ops = {
NULL)
};
-/* EXYNOS4412 */
+/* EXYNOS4212, EXYNOS4412 */
static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
.max_width = 8192,
.max_height = 8192,
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
index 5570c79f122f..4b665a3b630f 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.h
@@ -11,6 +11,7 @@
#define JPEG_CORE_H_
#include <linux/interrupt.h>
+#include <media/jpeg.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
@@ -36,17 +37,6 @@
#define EXYNOS3250_IRQ_TIMEOUT 0x10000000
-/* a selection of JPEG markers */
-#define JPEG_MARKER_TEM 0x01
-#define JPEG_MARKER_SOF0 0xc0
-#define JPEG_MARKER_DHT 0xc4
-#define JPEG_MARKER_RST 0xd0
-#define JPEG_MARKER_SOI 0xd8
-#define JPEG_MARKER_EOI 0xd9
-#define JPEG_MARKER_SOS 0xda
-#define JPEG_MARKER_DQT 0xdb
-#define JPEG_MARKER_DHP 0xde
-
/* Flags that indicate a format can be used for capture/output */
#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
#define SJPEG_FMT_FLAG_ENC_OUTPUT (1 << 1)
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 45ade7210d26..5dc1f908b49b 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -16,8 +16,10 @@
#include <linux/dma-mapping.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -135,7 +137,7 @@ static void channel_swdemux_tsklet(struct tasklet_struct *t)
static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *demux = dvbdmxfeed->demux;
- struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct stdemux *stdemux = demux->priv;
struct c8sectpfei *fei = stdemux->c8sectpfei;
struct channel_info *channel;
u32 tmp;
@@ -256,7 +258,7 @@ static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *demux = dvbdmxfeed->demux;
- struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct stdemux *stdemux = demux->priv;
struct c8sectpfei *fei = stdemux->c8sectpfei;
struct channel_info *channel;
int idlereq;
@@ -812,30 +814,23 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
of_node_put(i2c_bus);
- tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
-
- ret = gpio_is_valid(tsin->rst_gpio);
- if (!ret) {
- dev_err(dev,
- "reset gpio for tsin%d not valid (gpio=%d)\n",
- tsin->tsin_id, tsin->rst_gpio);
- ret = -EINVAL;
- goto err_node_put;
- }
-
- ret = devm_gpio_request_one(dev, tsin->rst_gpio,
- GPIOF_OUT_INIT_LOW, "NIM reset");
+ /* Acquire reset GPIO and activate it */
+ tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
+ of_fwnode_handle(child),
+ "reset", GPIOD_OUT_HIGH,
+ "NIM reset");
+ ret = PTR_ERR_OR_ZERO(tsin->rst_gpio);
if (ret && ret != -EBUSY) {
- dev_err(dev, "Can't request tsin%d reset gpio\n"
- , fei->channel_data[index]->tsin_id);
+ dev_err(dev, "Can't request tsin%d reset gpio\n",
+ fei->channel_data[index]->tsin_id);
goto err_node_put;
}
if (!ret) {
- /* toggle reset lines */
- gpio_direction_output(tsin->rst_gpio, 0);
+ /* wait for the chip to reset */
usleep_range(3500, 5000);
- gpio_direction_output(tsin->rst_gpio, 1);
+ /* release the reset line */
+ gpiod_set_value_cansleep(tsin->rst_gpio, 0);
usleep_range(3000, 5000);
}
@@ -1173,7 +1168,7 @@ MODULE_DEVICE_TABLE(of, c8sectpfe_match);
static struct platform_driver c8sectpfe_driver = {
.driver = {
.name = "c8sectpfe",
- .of_match_table = of_match_ptr(c8sectpfe_match),
+ .of_match_table = c8sectpfe_match,
},
.probe = c8sectpfe_probe,
.remove_new = c8sectpfe_remove,
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
index c9d6021904cd..bf377cc82225 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
@@ -16,6 +16,8 @@
#define C8SECTPFE_MAX_TSIN_CHAN 8
+struct gpio_desc;
+
struct channel_info {
int tsin_id;
@@ -25,7 +27,7 @@ struct channel_info {
int i2c;
int dvb_card;
- int rst_gpio;
+ struct gpio_desc *rst_gpio;
struct i2c_adapter *i2c_adapter;
struct i2c_adapter *tuner_i2c;
diff --git a/drivers/media/platform/st/sti/hva/hva-h264.c b/drivers/media/platform/st/sti/hva/hva-h264.c
index 98cb00d2d868..196e631fa4b8 100644
--- a/drivers/media/platform/st/sti/hva/hva-h264.c
+++ b/drivers/media/platform/st/sti/hva/hva-h264.c
@@ -591,7 +591,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
{
struct hva_dev *hva = ctx_to_hdev(pctx);
struct device *dev = ctx_to_dev(pctx);
- struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_h264_ctx *ctx = pctx->priv;
struct hva_buffer *seq_info = ctx->seq_info;
struct hva_buffer *fwd_ref_frame = ctx->ref_frame;
struct hva_buffer *loc_rec_frame = ctx->rec_frame;
@@ -984,7 +984,7 @@ err:
static int hva_h264_close(struct hva_ctx *pctx)
{
- struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_h264_ctx *ctx = pctx->priv;
struct device *dev = ctx_to_dev(pctx);
if (ctx->seq_info)
@@ -1007,8 +1007,8 @@ static int hva_h264_close(struct hva_ctx *pctx)
static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
struct hva_stream *stream)
{
- struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
- struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
+ struct hva_h264_ctx *ctx = pctx->priv;
+ struct hva_h264_task *task = ctx->task->vaddr;
u32 stuffing_bytes = 0;
int ret = 0;
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
index ebd5ede7bef7..6ad2ef885920 100644
--- a/drivers/media/platform/verisilicon/Makefile
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -18,6 +18,9 @@ hantro-vpu-y += \
rockchip_vpu2_hw_h264_dec.o \
rockchip_vpu2_hw_mpeg2_dec.o \
rockchip_vpu2_hw_vp8_dec.o \
+ rockchip_vpu981_hw_av1_dec.o \
+ rockchip_av1_filmgrain.o \
+ rockchip_av1_entropymode.o \
hantro_jpeg.o \
hantro_h264.o \
hantro_hevc.o \
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 2989ebc631cc..6523ffb74881 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -38,6 +38,7 @@ struct hantro_postproc_ops;
#define HANTRO_H264_DECODER BIT(18)
#define HANTRO_HEVC_DECODER BIT(19)
#define HANTRO_VP9_DECODER BIT(20)
+#define HANTRO_AV1_DECODER BIT(21)
#define HANTRO_DECODERS 0xffff0000
/**
@@ -111,6 +112,7 @@ struct hantro_variant {
* @HANTRO_MODE_VP8_DEC: VP8 decoder.
* @HANTRO_MODE_HEVC_DEC: HEVC decoder.
* @HANTRO_MODE_VP9_DEC: VP9 decoder.
+ * @HANTRO_MODE_AV1_DEC: AV1 decoder
*/
enum hantro_codec_mode {
HANTRO_MODE_NONE = -1,
@@ -120,6 +122,7 @@ enum hantro_codec_mode {
HANTRO_MODE_VP8_DEC,
HANTRO_MODE_HEVC_DEC,
HANTRO_MODE_VP9_DEC,
+ HANTRO_MODE_AV1_DEC,
};
/*
@@ -228,6 +231,8 @@ struct hantro_dev {
* @ctrl_handler: Control handler used to register controls.
* @jpeg_quality: User-specified JPEG compression quality.
* @bit_depth: Bit depth of current frame
+ * @need_postproc: Set to true if the bitstream features require to
+ * use the post-processor.
*
* @codec_ops: Set of operations related to codec mode.
* @postproc: Post-processing context.
@@ -237,6 +242,7 @@ struct hantro_dev {
* @vp8_dec: VP8-decoding context.
* @hevc_dec: HEVC-decoding context.
* @vp9_dec: VP9-decoding context.
+ * @av1_dec: AV1-decoding context.
*/
struct hantro_ctx {
struct hantro_dev *dev;
@@ -257,6 +263,7 @@ struct hantro_ctx {
const struct hantro_codec_ops *codec_ops;
struct hantro_postproc_ctx postproc;
+ bool need_postproc;
/* Specific for particular codec modes. */
union {
@@ -265,6 +272,7 @@ struct hantro_ctx {
struct hantro_vp8_dec_hw_ctx vp8_dec;
struct hantro_hevc_dec_hw_ctx hevc_dec;
struct hantro_vp9_dec_hw_ctx vp9_dec;
+ struct hantro_av1_dec_hw_ctx av1_dec;
};
};
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 09c74a573ddb..c0a368bacf88 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -275,7 +275,13 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
/* We only support profile 0 */
if (dec_params->profile != 0)
return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) {
+ const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence;
+
+ if (sequence->bit_depth != 8 && sequence->bit_depth != 10)
+ return -EINVAL;
}
+
return 0;
}
@@ -313,7 +319,7 @@ static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
if (ctx->bit_depth == bit_depth)
return 0;
- return hantro_reset_raw_fmt(ctx, bit_depth);
+ return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
}
default:
return -EINVAL;
@@ -337,7 +343,37 @@ static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
if (ctx->bit_depth == bit_depth)
return 0;
- return hantro_reset_raw_fmt(ctx, bit_depth);
+ return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ {
+ int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth;
+ bool need_postproc = HANTRO_AUTO_POSTPROC;
+
+ if (ctrl->p_new.p_av1_sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT)
+ need_postproc = HANTRO_FORCE_POSTPROC;
+
+ if (ctx->bit_depth == bit_depth &&
+ ctx->need_postproc == need_postproc)
+ return 0;
+
+ return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc);
}
default:
return -EINVAL;
@@ -363,6 +399,11 @@ static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
.s_ctrl = hantro_hevc_s_ctrl,
};
+static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = {
+ .try_ctrl = hantro_try_ctrl,
+ .s_ctrl = hantro_av1_s_ctrl,
+};
+
#define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
V4L2_JPEG_ACTIVE_MARKER_COM | \
V4L2_JPEG_ACTIVE_MARKER_DQT | \
@@ -525,6 +566,28 @@ static const struct hantro_ctrl controls[] = {
.cfg = {
.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
},
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_FRAME,
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
+ .dims = { V4L2_AV1_MAX_TILE_COUNT },
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_SEQUENCE,
+ .ops = &hantro_av1_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
+ },
},
};
@@ -656,6 +719,7 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
{ .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
+ { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
{ .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
index 9383fb7081f6..2c14330bc562 100644
--- a/drivers/media/platform/verisilicon/hantro_hevc.c
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
@@ -109,7 +109,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx)
&hevc_dec->tile_filter.dma,
GFP_KERNEL);
if (!hevc_dec->tile_filter.cpu)
- goto err_free_tile_buffers;
+ return -ENOMEM;
hevc_dec->tile_filter.size = size;
size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
@@ -125,31 +125,26 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx)
&hevc_dec->tile_bsd.dma,
GFP_KERNEL);
if (!hevc_dec->tile_bsd.cpu)
- goto err_free_tile_buffers;
+ goto err_free_sao_buffers;
hevc_dec->tile_bsd.size = size;
hevc_dec->num_tile_cols_allocated = num_tile_cols;
return 0;
-err_free_tile_buffers:
- if (hevc_dec->tile_filter.cpu)
- dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
- hevc_dec->tile_filter.cpu,
- hevc_dec->tile_filter.dma);
- hevc_dec->tile_filter.cpu = NULL;
-
+err_free_sao_buffers:
if (hevc_dec->tile_sao.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
hevc_dec->tile_sao.cpu,
hevc_dec->tile_sao.dma);
hevc_dec->tile_sao.cpu = NULL;
- if (hevc_dec->tile_bsd.cpu)
- dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
- hevc_dec->tile_bsd.cpu,
- hevc_dec->tile_bsd.dma);
- hevc_dec->tile_bsd.cpu = NULL;
+err_free_tile_buffers:
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
return -ENOMEM;
}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index e83f0c523a30..7f33f7b07ce4 100644
--- a/drivers/media/platform/verisilicon/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -15,6 +15,9 @@
#include <media/v4l2-vp9.h>
#include <media/videobuf2-core.h>
+#include "rockchip_av1_entropymode.h"
+#include "rockchip_av1_filmgrain.h"
+
#define DEC_8190_ALIGN_MASK 0x07U
#define MB_DIM 16
@@ -35,6 +38,8 @@
#define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1)
+#define AV1_MAX_FRAME_BUF_COUNT (V4L2_AV1_TOTAL_REFS_PER_FRAME + 1)
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -248,6 +253,84 @@ struct hantro_vp9_dec_hw_ctx {
};
/**
+ * struct hantro_av1_dec_ctrls
+ * @sequence: AV1 Sequence
+ * @tile_group_entry: AV1 Tile Group entry
+ * @frame: AV1 Frame Header OBU
+ * @film_grain: AV1 Film Grain
+ */
+struct hantro_av1_dec_ctrls {
+ const struct v4l2_ctrl_av1_sequence *sequence;
+ const struct v4l2_ctrl_av1_tile_group_entry *tile_group_entry;
+ const struct v4l2_ctrl_av1_frame *frame;
+ const struct v4l2_ctrl_av1_film_grain *film_grain;
+};
+
+struct hantro_av1_frame_ref {
+ int width;
+ int height;
+ int mi_cols;
+ int mi_rows;
+ u64 timestamp;
+ enum v4l2_av1_frame_type frame_type;
+ bool used;
+ u32 order_hint;
+ u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ struct vb2_v4l2_buffer *vb2_ref;
+};
+
+/**
+ * struct hantro_av1_dec_hw_ctx
+ * @db_data_col: db tile col data buffer
+ * @db_ctrl_col: db tile col ctrl buffer
+ * @cdef_col: cdef tile col buffer
+ * @sr_col: sr tile col buffer
+ * @lr_col: lr tile col buffer
+ * @global_model: global model buffer
+ * @tile_info: tile info buffer
+ * @segment: segmentation info buffer
+ * @film_grain: film grain buffer
+ * @prob_tbl: probability table
+ * @prob_tbl_out: probability table output
+ * @tile_buf: tile buffer
+ * @ctrls: V4L2 controls attached to a run
+ * @frame_refs: reference frames info slots
+ * @ref_frame_sign_bias: array of sign bias
+ * @num_tile_cols_allocated: number of allocated tiles
+ * @cdfs: current probabilities structure
+ * @cdfs_ndvc: current mv probabilities structure
+ * @default_cdfs: default probabilities structure
+ * @default_cdfs_ndvc: default mv probabilties structure
+ * @cdfs_last: stored probabilities structures
+ * @cdfs_last_ndvc: stored mv probabilities structures
+ * @current_frame_index: index of the current in frame_refs array
+ */
+struct hantro_av1_dec_hw_ctx {
+ struct hantro_aux_buf db_data_col;
+ struct hantro_aux_buf db_ctrl_col;
+ struct hantro_aux_buf cdef_col;
+ struct hantro_aux_buf sr_col;
+ struct hantro_aux_buf lr_col;
+ struct hantro_aux_buf global_model;
+ struct hantro_aux_buf tile_info;
+ struct hantro_aux_buf segment;
+ struct hantro_aux_buf film_grain;
+ struct hantro_aux_buf prob_tbl;
+ struct hantro_aux_buf prob_tbl_out;
+ struct hantro_aux_buf tile_buf;
+ struct hantro_av1_dec_ctrls ctrls;
+ struct hantro_av1_frame_ref frame_refs[AV1_MAX_FRAME_BUF_COUNT];
+ u32 ref_frame_sign_bias[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ unsigned int num_tile_cols_allocated;
+ struct av1cdfs *cdfs;
+ struct mvcdfs *cdfs_ndvc;
+ struct av1cdfs default_cdfs;
+ struct mvcdfs default_cdfs_ndvc;
+ struct av1cdfs cdfs_last[NUM_REF_FRAMES];
+ struct mvcdfs cdfs_last_ndvc[NUM_REF_FRAMES];
+ int current_frame_index;
+};
+/**
* struct hantro_postproc_ctx
*
* @dec_q: References buffers, in decoder format.
@@ -320,11 +403,13 @@ extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3399_vpu_variant;
extern const struct hantro_variant rk3568_vepu_variant;
extern const struct hantro_variant rk3568_vpu_variant;
+extern const struct hantro_variant rk3588_vpu981_variant;
extern const struct hantro_variant sama5d4_vdec_variant;
extern const struct hantro_variant sunxi_vpu_variant;
extern const struct hantro_postproc_ops hantro_g1_postproc_ops;
extern const struct hantro_postproc_ops hantro_g2_postproc_ops;
+extern const struct hantro_postproc_ops rockchip_vpu981_postproc_ops;
extern const u32 hantro_vp8_dec_mc_filter[8][6];
@@ -361,6 +446,10 @@ void hantro_hevc_ref_init(struct hantro_ctx *ctx);
dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, s32 poc);
int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr);
+int rockchip_vpu981_av1_dec_init(struct hantro_ctx *ctx);
+void rockchip_vpu981_av1_dec_exit(struct hantro_ctx *ctx);
+int rockchip_vpu981_av1_dec_run(struct hantro_ctx *ctx);
+void rockchip_vpu981_av1_dec_done(struct hantro_ctx *ctx);
static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension)
{
@@ -417,6 +506,19 @@ hantro_hevc_mv_size(unsigned int width, unsigned int height)
return width * height / 16;
}
+static inline unsigned short hantro_av1_num_sbs(unsigned short dimension)
+{
+ return DIV_ROUND_UP(dimension, 64);
+}
+
+static inline size_t
+hantro_av1_mv_size(unsigned int width, unsigned int height)
+{
+ size_t num_sbs = hantro_av1_num_sbs(width) * hantro_av1_num_sbs(height);
+
+ return ALIGN(num_sbs * 384, 16) * 2 + 512;
+}
+
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 6437423ccf3a..c977d64105b1 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -57,6 +57,10 @@ bool hantro_needs_postproc(const struct hantro_ctx *ctx,
{
if (ctx->is_encoder)
return false;
+
+ if (ctx->need_postproc)
+ return true;
+
return fmt->postprocessed;
}
@@ -197,7 +201,7 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
unsigned int i, buf_size;
/* this should always pick native format */
- fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth);
+ fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
if (!fmt)
return -EINVAL;
v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
@@ -213,6 +217,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
buf_size += hantro_hevc_mv_size(pix_mp.width,
pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
+ buf_size += hantro_av1_mv_size(pix_mp.width,
+ pix_mp.height);
for (i = 0; i < num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 61cfaaf4e927..e871c078dd59 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -31,15 +31,21 @@
#define HANTRO_DEFAULT_BIT_DEPTH 8
static int hantro_set_fmt_out(struct hantro_ctx *ctx,
- struct v4l2_pix_format_mplane *pix_mp);
+ struct v4l2_pix_format_mplane *pix_mp,
+ bool need_postproc);
static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp);
static const struct hantro_fmt *
-hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
+hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts, bool need_postproc)
{
const struct hantro_fmt *formats;
+ if (need_postproc) {
+ *num_fmts = 0;
+ return NULL;
+ }
+
if (ctx->is_encoder) {
formats = ctx->dev->variant->enc_fmts;
*num_fmts = ctx->dev->variant->num_enc_fmts;
@@ -71,6 +77,7 @@ int hantro_get_format_depth(u32 fourcc)
switch (fourcc) {
case V4L2_PIX_FMT_P010:
case V4L2_PIX_FMT_P010_4L4:
+ case V4L2_PIX_FMT_NV15_4L4:
return 10;
default:
return 8;
@@ -85,6 +92,10 @@ hantro_check_depth_match(const struct hantro_fmt *fmt, int bit_depth)
if (!fmt->match_depth && !fmt->postprocessed)
return true;
+ /* 0 means default depth, which is 8 */
+ if (!bit_depth)
+ bit_depth = HANTRO_DEFAULT_BIT_DEPTH;
+
fmt_depth = hantro_get_format_depth(fmt->fourcc);
/*
@@ -103,7 +114,7 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
const struct hantro_fmt *formats;
unsigned int i, num_fmts;
- formats = hantro_get_formats(ctx, &num_fmts);
+ formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
for (i = 0; i < num_fmts; i++)
if (formats[i].fourcc == fourcc)
return &formats[i];
@@ -116,18 +127,28 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
}
const struct hantro_fmt *
-hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream, int bit_depth)
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
+ int bit_depth, bool need_postproc)
{
const struct hantro_fmt *formats;
unsigned int i, num_fmts;
- formats = hantro_get_formats(ctx, &num_fmts);
+ formats = hantro_get_formats(ctx, &num_fmts, need_postproc);
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE) &&
+ hantro_check_depth_match(&formats[i], bit_depth))
+ return &formats[i];
+ }
+
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
if (bitstream == (formats[i].codec_mode !=
HANTRO_MODE_NONE) &&
hantro_check_depth_match(&formats[i], bit_depth))
return &formats[i];
}
+
return NULL;
}
@@ -194,7 +215,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
*/
skip_mode_none = capture == ctx->is_encoder;
- formats = hantro_get_formats(ctx, &num_fmts);
+ formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
for (i = 0; i < num_fmts; i++) {
bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
fmt = &formats[i];
@@ -289,7 +310,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
- fmt = hantro_get_default_fmt(ctx, coded, HANTRO_DEFAULT_BIT_DEPTH);
+ fmt = hantro_get_default_fmt(ctx, coded, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
pix_mp->pixelformat = fmt->fourcc;
}
@@ -328,6 +349,11 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
pix_mp->plane_fmt[0].sizeimage +=
hantro_hevc_mv_size(pix_mp->width,
pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_av1_mv_size(pix_mp->width,
+ pix_mp->height);
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -373,7 +399,7 @@ hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
const struct hantro_fmt *vpu_fmt;
struct v4l2_pix_format_mplane fmt;
- vpu_fmt = hantro_get_default_fmt(ctx, true, HANTRO_DEFAULT_BIT_DEPTH);
+ vpu_fmt = hantro_get_default_fmt(ctx, true, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
if (!vpu_fmt)
return;
@@ -383,17 +409,17 @@ hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
if (ctx->is_encoder)
hantro_set_fmt_cap(ctx, &fmt);
else
- hantro_set_fmt_out(ctx, &fmt);
+ hantro_set_fmt_out(ctx, &fmt, HANTRO_AUTO_POSTPROC);
}
int
-hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth)
+hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth, bool need_postproc)
{
const struct hantro_fmt *raw_vpu_fmt;
struct v4l2_pix_format_mplane raw_fmt, *encoded_fmt;
int ret;
- raw_vpu_fmt = hantro_get_default_fmt(ctx, false, bit_depth);
+ raw_vpu_fmt = hantro_get_default_fmt(ctx, false, bit_depth, need_postproc);
if (!raw_vpu_fmt)
return -EINVAL;
@@ -408,12 +434,14 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth)
raw_fmt.width = encoded_fmt->width;
raw_fmt.height = encoded_fmt->height;
if (ctx->is_encoder)
- ret = hantro_set_fmt_out(ctx, &raw_fmt);
+ ret = hantro_set_fmt_out(ctx, &raw_fmt, need_postproc);
else
ret = hantro_set_fmt_cap(ctx, &raw_fmt);
- if (!ret)
+ if (!ret) {
ctx->bit_depth = bit_depth;
+ ctx->need_postproc = need_postproc;
+ }
return ret;
}
@@ -421,7 +449,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth)
void hantro_reset_fmts(struct hantro_ctx *ctx)
{
hantro_reset_encoded_fmt(ctx);
- hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH);
+ hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
}
static void
@@ -468,7 +496,8 @@ hantro_update_requires_hold_capture_buf(struct hantro_ctx *ctx, u32 fourcc)
}
static int hantro_set_fmt_out(struct hantro_ctx *ctx,
- struct v4l2_pix_format_mplane *pix_mp)
+ struct v4l2_pix_format_mplane *pix_mp,
+ bool need_postproc)
{
struct vb2_queue *vq;
int ret;
@@ -521,7 +550,9 @@ static int hantro_set_fmt_out(struct hantro_ctx *ctx,
* changes to the raw format.
*/
if (!ctx->is_encoder)
- hantro_reset_raw_fmt(ctx, hantro_get_format_depth(pix_mp->pixelformat));
+ hantro_reset_raw_fmt(ctx,
+ hantro_get_format_depth(pix_mp->pixelformat),
+ need_postproc);
/* Colorimetry information are always propagated. */
ctx->dst_fmt.colorspace = pix_mp->colorspace;
@@ -584,7 +615,7 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
* changes to the raw format.
*/
if (ctx->is_encoder)
- hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH);
+ hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
/* Colorimetry information are always propagated. */
ctx->src_fmt.colorspace = pix_mp->colorspace;
@@ -604,7 +635,7 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
static int
vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
- return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp);
+ return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
}
static int
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.h b/drivers/media/platform/verisilicon/hantro_v4l2.h
index 9ea2fef57dcd..fca7e3a690e5 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.h
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.h
@@ -18,13 +18,17 @@
#include "hantro.h"
+#define HANTRO_FORCE_POSTPROC true
+#define HANTRO_AUTO_POSTPROC false
+
extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
extern const struct vb2_ops hantro_queue_ops;
-int hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth);
+int hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth, bool need_postproc);
void hantro_reset_fmts(struct hantro_ctx *ctx);
int hantro_get_format_depth(u32 fourcc);
const struct hantro_fmt *
-hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream, int bit_depth);
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
+ int bit_depth, bool need_postproc);
#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
new file mode 100644
index 000000000000..b1ae72ad675e
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
@@ -0,0 +1,4424 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "hantro.h"
+#include "rockchip_av1_entropymode.h"
+
+#define AOM_ICDF ICDF
+#define AOM_CDF2(a0) AOM_ICDF(a0)
+#define AOM_CDF3(a0, a1) \
+ AOM_ICDF(a0), AOM_ICDF(a1)
+#define AOM_CDF4(a0, a1, a2) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2)
+#define AOM_CDF5(a0, a1, a2, a3) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3)
+#define AOM_CDF6(a0, a1, a2, a3, a4) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4)
+#define AOM_CDF7(a0, a1, a2, a3, a4, a5) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
+ AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5)
+#define AOM_CDF8(a0, a1, a2, a3, a4, a5, a6) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
+ AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6)
+#define AOM_CDF9(a0, a1, a2, a3, a4, a5, a6, a7) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
+ AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7)
+#define AOM_CDF10(a0, a1, a2, a3, a4, a5, a6, a7, a8) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
+ AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8)
+#define AOM_CDF11(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9)
+#define AOM_CDF12(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
+ AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10)
+#define AOM_CDF13(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
+ AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10), AOM_ICDF(a11)
+#define AOM_CDF14(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12)
+#define AOM_CDF15(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13)
+#define AOM_CDF16(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13), AOM_ICDF(a14)
+
+static const u16 default_kf_y_mode_cdf
+ [KF_MODE_CONTEXTS][KF_MODE_CONTEXTS][CDF_SIZE(AV1_INTRA_MODES)] = {
+ {
+ {
+ AOM_CDF13(15588, 17027, 19338, 20218, 20682, 21110,
+ 21825, 23244, 24189, 28165, 29093, 30466)
+ },
+ {
+ AOM_CDF13(12016, 18066, 19516, 20303, 20719, 21444,
+ 21888, 23032, 24434, 28658, 30172, 31409)
+ },
+ {
+ AOM_CDF13(10052, 10771, 22296, 22788, 23055, 23239,
+ 24133, 25620, 26160, 29336, 29929, 31567)
+ },
+ {
+ AOM_CDF13(14091, 15406, 16442, 18808, 19136, 19546,
+ 19998, 22096, 24746, 29585, 30958, 32462)
+ },
+ {
+ AOM_CDF13(12122, 13265, 15603, 16501, 18609, 20033,
+ 22391, 25583, 26437, 30261, 31073, 32475)
+ }
+ },
+ {
+ {
+ AOM_CDF13(10023, 19585, 20848, 21440, 21832, 22760,
+ 23089, 24023, 25381, 29014, 30482, 31436)
+ },
+ {
+ AOM_CDF13(5983, 24099, 24560, 24886, 25066, 25795,
+ 25913, 26423, 27610, 29905, 31276, 31794)
+ },
+ {
+ AOM_CDF13(7444, 12781, 20177, 20728, 21077, 21607,
+ 22170, 23405, 24469, 27915, 29090, 30492)
+ },
+ {
+ AOM_CDF13(8537, 14689, 15432, 17087, 17408, 18172,
+ 18408, 19825, 24649, 29153, 31096, 32210)
+ },
+ {
+ AOM_CDF13(7543, 14231, 15496, 16195, 17905, 20717,
+ 21984, 24516, 26001, 29675, 30981, 31994)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12613, 13591, 21383, 22004, 22312, 22577,
+ 23401, 25055, 25729, 29538, 30305, 32077)
+ },
+ {
+ AOM_CDF13(9687, 13470, 18506, 19230, 19604, 20147,
+ 20695, 22062, 23219, 27743, 29211, 30907)
+ },
+ {
+ AOM_CDF13(6183, 6505, 26024, 26252, 26366, 26434,
+ 27082, 28354, 28555, 30467, 30794, 32086)
+ },
+ {
+ AOM_CDF13(10718, 11734, 14954, 17224, 17565, 17924,
+ 18561, 21523, 23878, 28975, 30287, 32252)
+ },
+ {
+ AOM_CDF13(9194, 9858, 16501, 17263, 18424, 19171,
+ 21563, 25961, 26561, 30072, 30737, 32463)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12602, 14399, 15488, 18381, 18778, 19315,
+ 19724, 21419, 25060, 29696, 30917, 32409)
+ },
+ {
+ AOM_CDF13(8203, 13821, 14524, 17105, 17439, 18131,
+ 18404, 19468, 25225, 29485, 31158, 32342)
+ },
+ {
+ AOM_CDF13(8451, 9731, 15004, 17643, 18012, 18425,
+ 19070, 21538, 24605, 29118, 30078, 32018)
+ },
+ {
+ AOM_CDF13(7714, 9048, 9516, 16667, 16817, 16994,
+ 17153, 18767, 26743, 30389, 31536, 32528)
+ },
+ {
+ AOM_CDF13(8843, 10280, 11496, 15317, 16652, 17943,
+ 19108, 22718, 25769, 29953, 30983, 32485)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12578, 13671, 15979, 16834, 19075, 20913,
+ 22989, 25449, 26219, 30214, 31150, 32477)
+ },
+ {
+ AOM_CDF13(9563, 13626, 15080, 15892, 17756, 20863,
+ 22207, 24236, 25380, 29653, 31143, 32277)
+ },
+ {
+ AOM_CDF13(8356, 8901, 17616, 18256, 19350, 20106,
+ 22598, 25947, 26466, 29900, 30523, 32261)
+ },
+ {
+ AOM_CDF13(10835, 11815, 13124, 16042, 17018, 18039,
+ 18947, 22753, 24615, 29489, 30883, 32482)
+ },
+ {
+ AOM_CDF13(7618, 8288, 9859, 10509, 15386, 18657,
+ 22903, 28776, 29180, 31355, 31802, 32593)
+ }
+ }
+};
+
+static const u16 default_angle_delta_cdf[DIRECTIONAL_MODES]
+ [CDF_SIZE(2 * MAX_ANGLE_DELTA + 1)] = {
+ { AOM_CDF7(2180, 5032, 7567, 22776, 26989, 30217) },
+ { AOM_CDF7(2301, 5608, 8801, 23487, 26974, 30330) },
+ { AOM_CDF7(3780, 11018, 13699, 19354, 23083, 31286) },
+ { AOM_CDF7(4581, 11226, 15147, 17138, 21834, 28397) },
+ { AOM_CDF7(1737, 10927, 14509, 19588, 22745, 28823) },
+ { AOM_CDF7(2664, 10176, 12485, 17650, 21600, 30495) },
+ { AOM_CDF7(2240, 11096, 15453, 20341, 22561, 28917) },
+ { AOM_CDF7(3605, 10428, 12459, 17676, 21244, 30655) }
+};
+
+static const u16 default_if_y_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(AV1_INTRA_MODES)] = {
+ {
+ AOM_CDF13(22801, 23489, 24293, 24756, 25601, 26123,
+ 26606, 27418, 27945, 29228, 29685, 30349)
+ },
+ {
+ AOM_CDF13(18673, 19845, 22631, 23318, 23950, 24649,
+ 25527, 27364, 28152, 29701, 29984, 30852)
+ },
+ {
+ AOM_CDF13(19770, 20979, 23396, 23939, 24241, 24654,
+ 25136, 27073, 27830, 29360, 29730, 30659)
+ },
+ {
+ AOM_CDF13(20155, 21301, 22838, 23178, 23261, 23533,
+ 23703, 24804, 25352, 26575, 27016, 28049)
+ }
+};
+
+static const u16 default_uv_mode_cdf[CFL_ALLOWED_TYPES]
+ [AV1_INTRA_MODES][CDF_SIZE(UV_INTRA_MODES)] = {
+ {
+ {
+ AOM_CDF13(22631, 24152, 25378, 25661, 25986, 26520,
+ 27055, 27923, 28244, 30059, 30941, 31961)
+ },
+ {
+ AOM_CDF13(9513, 26881, 26973, 27046, 27118, 27664,
+ 27739, 27824, 28359, 29505, 29800, 31796)
+ },
+ {
+ AOM_CDF13(9845, 9915, 28663, 28704, 28757, 28780,
+ 29198, 29822, 29854, 30764, 31777, 32029)
+ },
+ {
+ AOM_CDF13(13639, 13897, 14171, 25331, 25606, 25727,
+ 25953, 27148, 28577, 30612, 31355, 32493)
+ },
+ {
+ AOM_CDF13(9764, 9835, 9930, 9954, 25386, 27053,
+ 27958, 28148, 28243, 31101, 31744, 32363)
+ },
+ {
+ AOM_CDF13(11825, 13589, 13677, 13720, 15048, 29213,
+ 29301, 29458, 29711, 31161, 31441, 32550)
+ },
+ {
+ AOM_CDF13(14175, 14399, 16608, 16821, 17718, 17775,
+ 28551, 30200, 30245, 31837, 32342, 32667)
+ },
+ {
+ AOM_CDF13(12885, 13038, 14978, 15590, 15673, 15748,
+ 16176, 29128, 29267, 30643, 31961, 32461)
+ },
+ {
+ AOM_CDF13(12026, 13661, 13874, 15305, 15490, 15726,
+ 15995, 16273, 28443, 30388, 30767, 32416)
+ },
+ {
+ AOM_CDF13(19052, 19840, 20579, 20916, 21150, 21467,
+ 21885, 22719, 23174, 28861, 30379, 32175)
+ },
+ {
+ AOM_CDF13(18627, 19649, 20974, 21219, 21492, 21816,
+ 22199, 23119, 23527, 27053, 31397, 32148)
+ },
+ {
+ AOM_CDF13(17026, 19004, 19997, 20339, 20586, 21103,
+ 21349, 21907, 22482, 25896, 26541, 31819)
+ },
+ {
+ AOM_CDF13(12124, 13759, 14959, 14992, 15007, 15051,
+ 15078, 15166, 15255, 15753, 16039, 16606)
+ }
+ },
+ {
+ {
+ AOM_CDF14(10407, 11208, 12900, 13181, 13823, 14175,
+ 14899, 15656, 15986, 20086, 20995, 22455,
+ 24212)
+ },
+ {
+ AOM_CDF14(4532, 19780, 20057, 20215, 20428, 21071,
+ 21199, 21451, 22099, 24228, 24693, 27032,
+ 29472)
+ },
+ {
+ AOM_CDF14(5273, 5379, 20177, 20270, 20385, 20439,
+ 20949, 21695, 21774, 23138, 24256, 24703,
+ 26679)
+ },
+ {
+ AOM_CDF14(6740, 7167, 7662, 14152, 14536, 14785,
+ 15034, 16741, 18371, 21520, 22206, 23389,
+ 24182)
+ },
+ {
+ AOM_CDF14(4987, 5368, 5928, 6068, 19114, 20315, 21857,
+ 22253, 22411, 24911, 25380, 26027, 26376)
+ },
+ {
+ AOM_CDF14(5370, 6889, 7247, 7393, 9498, 21114, 21402,
+ 21753, 21981, 24780, 25386, 26517, 27176)
+ },
+ {
+ AOM_CDF14(4816, 4961, 7204, 7326, 8765, 8930, 20169,
+ 20682, 20803, 23188, 23763, 24455, 24940)
+ },
+ {
+ AOM_CDF14(6608, 6740, 8529, 9049, 9257, 9356, 9735,
+ 18827, 19059, 22336, 23204, 23964, 24793)
+ },
+ {
+ AOM_CDF14(5998, 7419, 7781, 8933, 9255, 9549, 9753,
+ 10417, 18898, 22494, 23139, 24764, 25989)
+ },
+ {
+ AOM_CDF14(10660, 11298, 12550, 12957, 13322, 13624,
+ 14040, 15004, 15534, 20714, 21789, 23443,
+ 24861)
+ },
+ {
+ AOM_CDF14(10522, 11530, 12552, 12963, 13378, 13779,
+ 14245, 15235, 15902, 20102, 22696, 23774,
+ 25838)
+ },
+ {
+ AOM_CDF14(10099, 10691, 12639, 13049, 13386, 13665,
+ 14125, 15163, 15636, 19676, 20474, 23519,
+ 25208)
+ },
+ {
+ AOM_CDF14(3144, 5087, 7382, 7504, 7593, 7690, 7801,
+ 8064, 8232, 9248, 9875, 10521, 29048)
+ }
+ }
+};
+
+static const u16 default_partition_cdf[13][16] = {
+ {
+ AOM_CDF4(19132, 25510, 30392), AOM_CDF4(13928, 19855, 28540),
+ AOM_CDF4(12522, 23679, 28629), AOM_CDF4(9896, 18783, 25853),
+ AOM_CDF2(11570), AOM_CDF2(16855), AOM_CDF3(9413, 22581)
+ },
+ {
+ AOM_CDF10(15597, 20929, 24571, 26706, 27664, 28821, 29601, 30571, 31902)
+ },
+ {
+ AOM_CDF10(7925, 11043, 16785, 22470, 23971, 25043, 26651, 28701, 29834)
+ },
+ {
+ AOM_CDF10(5414, 13269, 15111, 20488, 22360, 24500, 25537, 26336, 32117)
+ },
+ {
+ AOM_CDF10(2662, 6362, 8614, 20860, 23053, 24778, 26436, 27829, 31171)
+ },
+ {
+ AOM_CDF10(18462, 20920, 23124, 27647, 28227, 29049, 29519, 30178, 31544)
+ },
+ {
+ AOM_CDF10(7689, 9060, 12056, 24992, 25660, 26182, 26951, 28041, 29052)
+ },
+ {
+ AOM_CDF10(6015, 9009, 10062, 24544, 25409, 26545, 27071, 27526, 32047)
+ },
+ {
+ AOM_CDF10(1394, 2208, 2796, 28614, 29061, 29466, 29840, 30185, 31899)
+ },
+ {
+ AOM_CDF10(20137, 21547, 23078, 29566, 29837, 30261, 30524, 30892, 31724),
+ AOM_CDF8(27899, 28219, 28529, 32484, 32539, 32619, 32639)
+ },
+ {
+ AOM_CDF10(6732, 7490, 9497, 27944, 28250, 28515, 28969, 29630, 30104),
+ AOM_CDF8(6607, 6990, 8268, 32060, 32219, 32338, 32371)
+ },
+ {
+ AOM_CDF10(5945, 7663, 8348, 28683, 29117, 29749, 30064, 30298, 32238),
+ AOM_CDF8(5429, 6676, 7122, 32027, 32227, 32531, 32582)
+ },
+ {
+ AOM_CDF10(870, 1212, 1487, 31198, 31394, 31574, 31743, 31881, 32332),
+ AOM_CDF8(711, 966, 1172, 32448, 32538, 32617, 32664)
+ },
+};
+
+static const u16 default_intra_ext_tx0_cdf[EXTTX_SIZES][AV1_INTRA_MODES][8] = {
+ {
+ { AOM_CDF7(1535, 8035, 9461, 12751, 23467, 27825)},
+ { AOM_CDF7(564, 3335, 9709, 10870, 18143, 28094)},
+ { AOM_CDF7(672, 3247, 3676, 11982, 19415, 23127)},
+ { AOM_CDF7(5279, 13885, 15487, 18044, 23527, 30252)},
+ { AOM_CDF7(4423, 6074, 7985, 10416, 25693, 29298)},
+ { AOM_CDF7(1486, 4241, 9460, 10662, 16456, 27694)},
+ { AOM_CDF7(439, 2838, 3522, 6737, 18058, 23754)},
+ { AOM_CDF7(1190, 4233, 4855, 11670, 20281, 24377)},
+ { AOM_CDF7(1045, 4312, 8647, 10159, 18644, 29335)},
+ { AOM_CDF7(202, 3734, 4747, 7298, 17127, 24016)},
+ { AOM_CDF7(447, 4312, 6819, 8884, 16010, 23858)},
+ { AOM_CDF7(277, 4369, 5255, 8905, 16465, 22271)},
+ { AOM_CDF7(3409, 5436, 10599, 15599, 19687, 24040)},
+ },
+ {
+ { AOM_CDF7(1870, 13742, 14530, 16498, 23770, 27698)},
+ { AOM_CDF7(326, 8796, 14632, 15079, 19272, 27486)},
+ { AOM_CDF7(484, 7576, 7712, 14443, 19159, 22591)},
+ { AOM_CDF7(1126, 15340, 15895, 17023, 20896, 30279)},
+ { AOM_CDF7(655, 4854, 5249, 5913, 22099, 27138)},
+ { AOM_CDF7(1299, 6458, 8885, 9290, 14851, 25497)},
+ { AOM_CDF7(311, 5295, 5552, 6885, 16107, 22672)},
+ { AOM_CDF7(883, 8059, 8270, 11258, 17289, 21549)},
+ { AOM_CDF7(741, 7580, 9318, 10345, 16688, 29046)},
+ { AOM_CDF7(110, 7406, 7915, 9195, 16041, 23329)},
+ { AOM_CDF7(363, 7974, 9357, 10673, 15629, 24474)},
+ { AOM_CDF7(153, 7647, 8112, 9936, 15307, 19996)},
+ { AOM_CDF7(3511, 6332, 11165, 15335, 19323, 23594)},
+ },
+ {
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ },
+ {
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ },
+};
+
+static const u16 default_intra_ext_tx1_cdf[EXTTX_SIZES][AV1_INTRA_MODES][4] = {
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+ {
+ { AOM_CDF5(1127, 12814, 22772, 27483)},
+ { AOM_CDF5(145, 6761, 11980, 26667)},
+ { AOM_CDF5(362, 5887, 11678, 16725)},
+ { AOM_CDF5(385, 15213, 18587, 30693)},
+ { AOM_CDF5(25, 2914, 23134, 27903)},
+ { AOM_CDF5(60, 4470, 11749, 23991)},
+ { AOM_CDF5(37, 3332, 14511, 21448)},
+ { AOM_CDF5(157, 6320, 13036, 17439)},
+ { AOM_CDF5(119, 6719, 12906, 29396)},
+ { AOM_CDF5(47, 5537, 12576, 21499)},
+ { AOM_CDF5(269, 6076, 11258, 23115)},
+ { AOM_CDF5(83, 5615, 12001, 17228)},
+ { AOM_CDF5(1968, 5556, 12023, 18547)},
+ },
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+};
+
+static const u16 default_inter_ext_tx_cdf[2][EXTTX_SIZES][EXT_TX_TYPES] = {
+ {
+ {
+ AOM_CDF16(4458, 5560, 7695, 9709, 13330, 14789, 17537, 20266,
+ 21504, 22848, 23934, 25474, 27727, 28915, 30631)
+ },
+ {
+ AOM_CDF16(1645, 2573, 4778, 5711, 7807, 8622, 10522, 15357, 17674,
+ 20408, 22517, 25010, 27116, 28856, 30749)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ },
+ {
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(16384)
+ },
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(4167)
+ },
+ {
+ AOM_CDF12(770, 2421, 5225, 12907, 15819, 18927, 21561, 24089,
+ 26595, 28526, 30529),
+ AOM_CDF2(1998)
+ },
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(748)
+ },
+ }
+};
+
+static const u16 default_cfl_sign_cdf[CDF_SIZE(CFL_JOINT_SIGNS)] = {
+ AOM_CDF8(1418, 2123, 13340, 18405, 26972, 28343, 32294)
+};
+
+static const u16 default_cfl_alpha_cdf[CFL_ALPHA_CONTEXTS][CDF_SIZE(CFL_ALPHABET_SIZE)] = {
+ {
+ AOM_CDF16(7637, 20719, 31401, 32481, 32657, 32688, 32692, 32696, 32700,
+ 32704, 32708, 32712, 32716, 32720, 32724)
+ },
+ {
+ AOM_CDF16(14365, 23603, 28135, 31168, 32167, 32395, 32487, 32573,
+ 32620, 32647, 32668, 32672, 32676, 32680, 32684)
+ },
+ {
+ AOM_CDF16(11532, 22380, 28445, 31360, 32349, 32523, 32584, 32649,
+ 32673, 32677, 32681, 32685, 32689, 32693, 32697)
+ },
+ {
+ AOM_CDF16(26990, 31402, 32282, 32571, 32692, 32696, 32700, 32704,
+ 32708, 32712, 32716, 32720, 32724, 32728, 32732)
+ },
+ {
+ AOM_CDF16(17248, 26058, 28904, 30608, 31305, 31877, 32126, 32321,
+ 32394, 32464, 32516, 32560, 32576, 32593, 32622)
+ },
+ {
+ AOM_CDF16(14738, 21678, 25779, 27901, 29024, 30302, 30980, 31843,
+ 32144, 32413, 32520, 32594, 32622, 32656, 32660)
+ }
+};
+
+static const u16 default_switchable_interp_cdf[SWITCHABLE_FILTER_CONTEXTS]
+ [CDF_SIZE(AV1_SWITCHABLE_FILTERS)] = {
+ { AOM_CDF3(31935, 32720) }, { AOM_CDF3(5568, 32719) },
+ { AOM_CDF3(422, 2938) }, { AOM_CDF3(28244, 32608) },
+ { AOM_CDF3(31206, 31953) }, { AOM_CDF3(4862, 32121) },
+ { AOM_CDF3(770, 1152) }, { AOM_CDF3(20889, 25637) },
+ { AOM_CDF3(31910, 32724) }, { AOM_CDF3(4120, 32712) },
+ { AOM_CDF3(305, 2247) }, { AOM_CDF3(27403, 32636) },
+ { AOM_CDF3(31022, 32009) }, { AOM_CDF3(2963, 32093) },
+ { AOM_CDF3(601, 943) }, { AOM_CDF3(14969, 21398) }
+};
+
+static const u16 default_newmv_cdf[NEWMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(24035) }, { AOM_CDF2(16630) }, { AOM_CDF2(15339) },
+ { AOM_CDF2(8386) }, { AOM_CDF2(12222) }, { AOM_CDF2(4676) }
+};
+
+static const u16 default_zeromv_cdf[GLOBALMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(2175) }, { AOM_CDF2(1054) }
+};
+
+static const u16 default_refmv_cdf[REFMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(23974) }, { AOM_CDF2(24188) }, { AOM_CDF2(17848) },
+ { AOM_CDF2(28622) }, { AOM_CDF2(24312) }, { AOM_CDF2(19923) }
+};
+
+static const u16 default_drl_cdf[DRL_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(13104) }, { AOM_CDF2(24560) }, { AOM_CDF2(18945) }
+};
+
+static const u16 default_inter_compound_mode_cdf[AV1_INTER_MODE_CONTEXTS]
+ [CDF_SIZE(INTER_COMPOUND_MODES)] = {
+ { AOM_CDF8(7760, 13823, 15808, 17641, 19156, 20666, 26891) },
+ { AOM_CDF8(10730, 19452, 21145, 22749, 24039, 25131, 28724) },
+ { AOM_CDF8(10664, 20221, 21588, 22906, 24295, 25387, 28436) },
+ { AOM_CDF8(13298, 16984, 20471, 24182, 25067, 25736, 26422) },
+ { AOM_CDF8(18904, 23325, 25242, 27432, 27898, 28258, 30758) },
+ { AOM_CDF8(10725, 17454, 20124, 22820, 24195, 25168, 26046) },
+ { AOM_CDF8(17125, 24273, 25814, 27492, 28214, 28704, 30592) },
+ { AOM_CDF8(13046, 23214, 24505, 25942, 27435, 28442, 29330) }
+};
+
+static const u16 default_interintra_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(26887) }, { AOM_CDF2(27597) },
+ { AOM_CDF2(30237) }
+};
+
+static const u16 default_interintra_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(INTERINTRA_MODES)] = {
+ { AOM_CDF4(8192, 16384, 24576) },
+ { AOM_CDF4(1875, 11082, 27332) },
+ { AOM_CDF4(2473, 9996, 26388) },
+ { AOM_CDF4(4238, 11537, 25926) }
+};
+
+static const u16 default_wedge_interintra_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(20036) }, { AOM_CDF2(24957) }, { AOM_CDF2(26704) },
+ { AOM_CDF2(27530) }, { AOM_CDF2(29564) }, { AOM_CDF2(29444) },
+ { AOM_CDF2(26872) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }
+};
+
+static const u16 default_compound_type_cdf[BLOCK_SIZES_ALL][CDF_SIZE(COMPOUND_TYPES - 1)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(23431) },
+ { AOM_CDF2(13171) }, { AOM_CDF2(11470) }, { AOM_CDF2(9770) },
+ { AOM_CDF2(9100) },
+ { AOM_CDF2(8233) }, { AOM_CDF2(6172) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(11820) },
+ { AOM_CDF2(7701) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }
+};
+
+static const u16 default_wedge_idx_cdf[BLOCK_SIZES_ALL][CDF_SIZE(16)] = {
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2438, 4440, 6599, 8663, 11005, 12874, 15751, 18094,
+ 20359, 22362, 24127, 25702, 27752, 29450, 31171)
+ },
+ {
+ AOM_CDF16(806, 3266, 6005, 6738, 7218, 7367, 7771, 14588, 16323,
+ 17367, 18452, 19422, 22839, 26127, 29629)
+ },
+ {
+ AOM_CDF16(2779, 3738, 4683, 7213, 7775, 8017, 8655, 14357, 17939,
+ 21332, 24520, 27470, 29456, 30529, 31656)
+ },
+ {
+ AOM_CDF16(1684, 3625, 5675, 7108, 9302, 11274, 14429, 17144, 19163,
+ 20961, 22884, 24471, 26719, 28714, 30877)
+ },
+ {
+ AOM_CDF16(1142, 3491, 6277, 7314, 8089, 8355, 9023, 13624, 15369,
+ 16730, 18114, 19313, 22521, 26012, 29550)
+ },
+ {
+ AOM_CDF16(2742, 4195, 5727, 8035, 8980, 9336, 10146, 14124, 17270,
+ 20533, 23434, 25972, 27944, 29570, 31416)
+ },
+ {
+ AOM_CDF16(1727, 3948, 6101, 7796, 9841, 12344, 15766, 18944, 20638,
+ 22038, 23963, 25311, 26988, 28766, 31012)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(154, 987, 1925, 2051, 2088, 2111, 2151, 23033, 23703, 24284,
+ 24985, 25684, 27259, 28883, 30911)
+ },
+ {
+ AOM_CDF16(1135, 1322, 1493, 2635, 2696, 2737, 2770, 21016, 22935,
+ 25057, 27251, 29173, 30089, 30960, 31933)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ }
+};
+
+static const u16 default_motion_mode_cdf[BLOCK_SIZES_ALL][CDF_SIZE(MOTION_MODES)] = {
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(7651, 24760) },
+ { AOM_CDF3(4738, 24765) }, { AOM_CDF3(5391, 25528) },
+ { AOM_CDF3(19419, 26810) }, { AOM_CDF3(5123, 23606) },
+ { AOM_CDF3(11606, 24308) }, { AOM_CDF3(26260, 29116) },
+ { AOM_CDF3(20360, 28062) }, { AOM_CDF3(21679, 26830) },
+ { AOM_CDF3(29516, 30701) }, { AOM_CDF3(28898, 30397) },
+ { AOM_CDF3(30878, 31335) }, { AOM_CDF3(32507, 32558) },
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(28799, 31390) }, { AOM_CDF3(26431, 30774) },
+ { AOM_CDF3(28973, 31594) }, { AOM_CDF3(29742, 31203) }
+};
+
+static const u16 default_obmc_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(10437) },
+ { AOM_CDF2(9371) }, { AOM_CDF2(9301) }, { AOM_CDF2(17432) },
+ { AOM_CDF2(14423) },
+ { AOM_CDF2(15142) }, { AOM_CDF2(25817) }, { AOM_CDF2(22823) },
+ { AOM_CDF2(22083) },
+ { AOM_CDF2(30128) }, { AOM_CDF2(31014) }, { AOM_CDF2(31560) },
+ { AOM_CDF2(32638) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(23664) },
+ { AOM_CDF2(20901) },
+ { AOM_CDF2(24008) }, { AOM_CDF2(26879) }
+};
+
+static const u16 default_intra_inter_cdf[INTRA_INTER_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(806) },
+ { AOM_CDF2(16662) },
+ { AOM_CDF2(20186) },
+ { AOM_CDF2(26538) }
+};
+
+static const u16 default_comp_inter_cdf[COMP_INTER_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(26828) },
+ { AOM_CDF2(24035) },
+ { AOM_CDF2(12031) },
+ { AOM_CDF2(10640) },
+ { AOM_CDF2(2901) }
+};
+
+static const u16 default_comp_ref_type_cdf[COMP_REF_TYPE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(1198) },
+ { AOM_CDF2(2070) },
+ { AOM_CDF2(9166) },
+ { AOM_CDF2(7499) },
+ { AOM_CDF2(22475) }
+};
+
+static const u16 default_uni_comp_ref_cdf[UNI_COMP_REF_CONTEXTS]
+ [UNIDIR_COMP_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(5284)}, { AOM_CDF2(3865)}, { AOM_CDF2(3128)} },
+ { { AOM_CDF2(23152)}, { AOM_CDF2(14173)}, { AOM_CDF2(15270)} },
+ { { AOM_CDF2(31774)}, { AOM_CDF2(25120)}, { AOM_CDF2(26710)} }
+};
+
+static const u16 default_single_ref_cdf[REF_CONTEXTS][SINGLE_REFS - 1][CDF_SIZE(2)] = {
+ {
+ { AOM_CDF2(4897)},
+ { AOM_CDF2(1555)},
+ { AOM_CDF2(4236)},
+ { AOM_CDF2(8650)},
+ { AOM_CDF2(904)},
+ { AOM_CDF2(1444)}
+ },
+ {
+ { AOM_CDF2(16973)},
+ { AOM_CDF2(16751)},
+ { AOM_CDF2(19647)},
+ { AOM_CDF2(24773)},
+ { AOM_CDF2(11014)},
+ { AOM_CDF2(15087)}
+ },
+ {
+ { AOM_CDF2(29744)},
+ { AOM_CDF2(30279)},
+ { AOM_CDF2(31194)},
+ { AOM_CDF2(31895)},
+ { AOM_CDF2(26875)},
+ { AOM_CDF2(30304)}
+ }
+};
+
+static const u16 default_comp_ref_cdf[REF_CONTEXTS][FWD_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(4946)}, { AOM_CDF2(9468)}, { AOM_CDF2(1503)} },
+ { { AOM_CDF2(19891)}, { AOM_CDF2(22441)}, { AOM_CDF2(15160)} },
+ { { AOM_CDF2(30731)}, { AOM_CDF2(31059)}, { AOM_CDF2(27544)} }
+};
+
+static const u16 default_comp_bwdref_cdf[REF_CONTEXTS][BWD_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(2235)}, { AOM_CDF2(1423)} },
+ { { AOM_CDF2(17182)}, { AOM_CDF2(15175)} },
+ { { AOM_CDF2(30606)}, { AOM_CDF2(30489)} }
+};
+
+static const u16 default_palette_y_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
+ { AOM_CDF7(7952, 13000, 18149, 21478, 25527, 29241) },
+ { AOM_CDF7(7139, 11421, 16195, 19544, 23666, 28073) },
+ { AOM_CDF7(7788, 12741, 17325, 20500, 24315, 28530) },
+ { AOM_CDF7(8271, 14064, 18246, 21564, 25071, 28533) },
+ { AOM_CDF7(12725, 19180, 21863, 24839, 27535, 30120) },
+ { AOM_CDF7(9711, 14888, 16923, 21052, 25661, 27875) },
+ { AOM_CDF7(14940, 20797, 21678, 24186, 27033, 28999) }
+};
+
+static const u16 default_palette_uv_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
+ { AOM_CDF7(8713, 19979, 27128, 29609, 31331, 32272) },
+ { AOM_CDF7(5839, 15573, 23581, 26947, 29848, 31700) },
+ { AOM_CDF7(4426, 11260, 17999, 21483, 25863, 29430) },
+ { AOM_CDF7(3228, 9464, 14993, 18089, 22523, 27420) },
+ { AOM_CDF7(3768, 8886, 13091, 17852, 22495, 27207) },
+ { AOM_CDF7(2464, 8451, 12861, 21632, 25525, 28555) },
+ { AOM_CDF7(1269, 5435, 10433, 18963, 21700, 25865) }
+};
+
+static const u16 default_palette_y_mode_cdf[PALETTE_BLOCK_SIZES]
+ [PALETTE_Y_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { { AOM_CDF2(31676)}, { AOM_CDF2(3419)}, { AOM_CDF2(1261)} },
+ { { AOM_CDF2(31912)}, { AOM_CDF2(2859)}, { AOM_CDF2(980)} },
+ { { AOM_CDF2(31823)}, { AOM_CDF2(3400)}, { AOM_CDF2(781)} },
+ { { AOM_CDF2(32030)}, { AOM_CDF2(3561)}, { AOM_CDF2(904)} },
+ { { AOM_CDF2(32309)}, { AOM_CDF2(7337)}, { AOM_CDF2(1462)} },
+ { { AOM_CDF2(32265)}, { AOM_CDF2(4015)}, { AOM_CDF2(1521)} },
+ { { AOM_CDF2(32450)}, { AOM_CDF2(7946)}, { AOM_CDF2(129)} }
+};
+
+static const u16 default_palette_uv_mode_cdf[PALETTE_UV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(32461) }, { AOM_CDF2(21488) }
+};
+
+static const u16 default_palette_y_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
+ // Palette sizes 2 & 8
+ {
+ AOM_CDF2(28710),
+ AOM_CDF8(21689, 23883, 25163, 26352, 27506, 28827, 30195)
+ },
+ {
+ AOM_CDF2(16384),
+ AOM_CDF8(6892, 15385, 17840, 21606, 24287, 26753, 29204)
+ },
+ {
+ AOM_CDF2(10553),
+ AOM_CDF8(5651, 23182, 25042, 26518, 27982, 29392, 30900)
+ },
+ {
+ AOM_CDF2(27036),
+ AOM_CDF8(19349, 22578, 24418, 25994, 27524, 29031, 30448)
+ },
+ {
+ AOM_CDF2(31603),
+ AOM_CDF8(31028, 31270, 31504, 31705, 31927, 32153, 32392)
+ },
+ // Palette sizes 3 & 7
+ {
+ AOM_CDF3(27877, 30490),
+ AOM_CDF7(23105, 25199, 26464, 27684, 28931, 30318)
+ },
+ {
+ AOM_CDF3(11532, 25697),
+ AOM_CDF7(6950, 15447, 18952, 22681, 25567, 28563)
+ },
+ {
+ AOM_CDF3(6544, 30234),
+ AOM_CDF7(7560, 23474, 25490, 27203, 28921, 30708)
+ },
+ {
+ AOM_CDF3(23018, 28072),
+ AOM_CDF7(18544, 22373, 24457, 26195, 28119, 30045)
+ },
+ {
+ AOM_CDF3(31915, 32385),
+ AOM_CDF7(31198, 31451, 31670, 31882, 32123, 32391)
+ },
+ // Palette sizes 4 & 6
+ {
+ AOM_CDF4(25572, 28046, 30045),
+ AOM_CDF6(23132, 25407, 26970, 28435, 30073)
+ },
+ {
+ AOM_CDF4(9478, 21590, 27256),
+ AOM_CDF6(7443, 17242, 20717, 24762, 27982)
+ },
+ {
+ AOM_CDF4(7248, 26837, 29824),
+ AOM_CDF6(6300, 24862, 26944, 28784, 30671)
+ },
+ {
+ AOM_CDF4(19167, 24486, 28349),
+ AOM_CDF6(18916, 22895, 25267, 27435, 29652)
+ },
+ {
+ AOM_CDF4(31400, 31825, 32250),
+ AOM_CDF6(31270, 31550, 31808, 32059, 32353)
+ },
+ // Palette size 5
+ {
+ AOM_CDF5(24779, 26955, 28576, 30282),
+ AOM_CDF5(8669, 20364, 24073, 28093)
+ },
+ {
+ AOM_CDF5(4255, 27565, 29377, 31067),
+ AOM_CDF5(19864, 23674, 26716, 29530)
+ },
+ {
+ AOM_CDF5(31646, 31893, 32147, 32426),
+ 0, 0, 0, 0
+ }
+};
+
+static const u16 default_palette_uv_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
+ // Palette sizes 2 & 8
+ {
+ AOM_CDF2(29089),
+ AOM_CDF8(21442, 23288, 24758, 26246, 27649, 28980, 30563)
+ },
+ {
+ AOM_CDF2(16384),
+ AOM_CDF8(5863, 14933, 17552, 20668, 23683, 26411, 29273)
+ },
+ {
+ AOM_CDF2(8713),
+ AOM_CDF8(3415, 25810, 26877, 27990, 29223, 30394, 31618)
+ },
+ {
+ AOM_CDF2(29257),
+ AOM_CDF8(17965, 20084, 22232, 23974, 26274, 28402, 30390)
+ },
+ {
+ AOM_CDF2(31610),
+ AOM_CDF8(31190, 31329, 31516, 31679, 31825, 32026, 32322)
+ },
+ // Palette sizes 3 & 7
+ {
+ AOM_CDF3(25257, 29145),
+ AOM_CDF7(21239, 23168, 25044, 26962, 28705, 30506)
+ },
+ {
+ AOM_CDF3(12287, 27293),
+ AOM_CDF7(6545, 15012, 18004, 21817, 25503, 28701)
+ },
+ {
+ AOM_CDF3(7033, 27960),
+ AOM_CDF7(3448, 26295, 27437, 28704, 30126, 31442)
+ },
+ {
+ AOM_CDF3(20145, 25405),
+ AOM_CDF7(15889, 18323, 21704, 24698, 26976, 29690)
+ },
+ {
+ AOM_CDF3(30608, 31639),
+ AOM_CDF7(30988, 31204, 31479, 31734, 31983, 32325)
+ },
+ // Palette sizes 4 & 6
+ {
+ AOM_CDF4(24210, 27175, 29903),
+ AOM_CDF6(22217, 24567, 26637, 28683, 30548)
+ },
+ {
+ AOM_CDF4(9888, 22386, 27214),
+ AOM_CDF6(7307, 16406, 19636, 24632, 28424)
+ },
+ {
+ AOM_CDF4(5901, 26053, 29293),
+ AOM_CDF6(4441, 25064, 26879, 28942, 30919)
+ },
+ {
+ AOM_CDF4(18318, 22152, 28333),
+ AOM_CDF6(17210, 20528, 23319, 26750, 29582)
+ },
+ {
+ AOM_CDF4(30459, 31136, 31926),
+ AOM_CDF6(30674, 30953, 31396, 31735, 32207)
+ },
+ // Palette size 5
+ {
+ AOM_CDF5(22980, 25479, 27781, 29986),
+ AOM_CDF5(8413, 21408, 24859, 28874)
+ },
+ {
+ AOM_CDF5(2257, 29449, 30594, 31598),
+ AOM_CDF5(19189, 21202, 25915, 28620)
+ },
+ {
+ AOM_CDF5(31844, 32044, 32281, 32518),
+ 0, 0, 0, 0
+ }
+};
+
+static const u16 default_txfm_partition_cdf[TXFM_PARTITION_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(28581) }, { AOM_CDF2(23846) }, { AOM_CDF2(20847) },
+ { AOM_CDF2(24315) }, { AOM_CDF2(18196) }, { AOM_CDF2(12133) },
+ { AOM_CDF2(18791) }, { AOM_CDF2(10887) }, { AOM_CDF2(11005) },
+ { AOM_CDF2(27179) }, { AOM_CDF2(20004) }, { AOM_CDF2(11281) },
+ { AOM_CDF2(26549) }, { AOM_CDF2(19308) }, { AOM_CDF2(14224) },
+ { AOM_CDF2(28015) }, { AOM_CDF2(21546) }, { AOM_CDF2(14400) },
+ { AOM_CDF2(28165) }, { AOM_CDF2(22401) }, { AOM_CDF2(16088) }
+};
+
+static const u16 default_skip_cdfs[SKIP_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(31671) }, { AOM_CDF2(16515) }, { AOM_CDF2(4576) }
+};
+
+static const u16 default_skip_mode_cdfs[SKIP_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(32621) }, { AOM_CDF2(20708) }, { AOM_CDF2(8127) }
+};
+
+static const u16 default_compound_idx_cdfs[COMP_INDEX_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(18244) }, { AOM_CDF2(12865) }, { AOM_CDF2(7053) },
+ { AOM_CDF2(13259) }, { AOM_CDF2(9334) }, { AOM_CDF2(4644) }
+};
+
+static const u16 default_comp_group_idx_cdfs[COMP_GROUP_IDX_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(26607) }, { AOM_CDF2(22891) }, { AOM_CDF2(18840) },
+ { AOM_CDF2(24594) }, { AOM_CDF2(19934) }, { AOM_CDF2(22674) }
+};
+
+static const u16 default_intrabc_cdf[CDF_SIZE(2)] = { AOM_CDF2(30531) };
+
+static const u16 default_filter_intra_mode_cdf[CDF_SIZE(FILTER_INTRA_MODES)] = {
+ AOM_CDF5(8949, 12776, 17211, 29558)
+};
+
+static const u16 default_filter_intra_cdfs[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(4621) }, { AOM_CDF2(6743) }, { AOM_CDF2(5893) }, { AOM_CDF2(7866) },
+ { AOM_CDF2(12551) }, { AOM_CDF2(9394) }, { AOM_CDF2(12408) }, { AOM_CDF2(14301) },
+ { AOM_CDF2(12756) }, { AOM_CDF2(22343) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(12770) }, { AOM_CDF2(10368) }, { AOM_CDF2(20229) }, { AOM_CDF2(18101) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }
+};
+
+static const u16 default_delta_q_cdf[CDF_SIZE(DELTA_Q_PROBS + 1)] = {
+ AOM_CDF4(28160, 32120, 32677)
+};
+
+static const u16 default_delta_lf_multi_cdf[FRAME_LF_COUNT][CDF_SIZE(DELTA_LF_PROBS + 1)] = {
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) }
+};
+
+static const u16 default_delta_lf_cdf[CDF_SIZE(DELTA_LF_PROBS + 1)] = {
+ AOM_CDF4(28160, 32120, 32677)
+};
+
+static const u16 default_segment_pred_cdf[SEG_TEMPORAL_PRED_CTXS][CDF_SIZE(2)] = {
+ { AOM_CDF2(128 * 128) },
+ { AOM_CDF2(128 * 128) },
+ { AOM_CDF2(128 * 128) }
+};
+
+static const u16 default_spatial_pred_seg_tree_cdf[SPATIAL_PREDICTION_PROBS]
+ [CDF_SIZE(MAX_SEGMENTS)] = {
+ {
+ AOM_CDF8(5622, 7893, 16093, 18233, 27809, 28373, 32533),
+ },
+ {
+ AOM_CDF8(14274, 18230, 22557, 24935, 29980, 30851, 32344),
+ },
+ {
+ AOM_CDF8(27527, 28487, 28723, 28890, 32397, 32647, 32679),
+ },
+};
+
+static const u16 default_tx_size_cdf[MAX_TX_CATS]
+ [AV1_TX_SIZE_CONTEXTS][CDF_SIZE(MAX_TX_DEPTH + 1)] = {
+ {
+ { AOM_CDF2(19968)},
+ { AOM_CDF2(19968)},
+ { AOM_CDF2(24320)}
+ },
+ {
+ { AOM_CDF3(12272, 30172)},
+ { AOM_CDF3(12272, 30172)},
+ { AOM_CDF3(18677, 30848)}
+ },
+ {
+ { AOM_CDF3(12986, 15180)},
+ { AOM_CDF3(12986, 15180)},
+ { AOM_CDF3(24302, 25602)}
+ },
+ {
+ { AOM_CDF3(5782, 11475)},
+ { AOM_CDF3(5782, 11475)},
+ { AOM_CDF3(16803, 22759)}
+ },
+};
+
+static const u16 av1_default_dc_sign_cdfs[TOKEN_CDF_Q_CTXS]
+ [PLANE_TYPES][DC_SIGN_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+};
+
+static const u16 av1_default_txb_skip_cdfs[TOKEN_CDF_Q_CTXS]
+ [TX_SIZES][TXB_SKIP_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ { AOM_CDF2(31849)},
+ { AOM_CDF2(5892)},
+ { AOM_CDF2(12112)},
+ { AOM_CDF2(21935)},
+ { AOM_CDF2(20289)},
+ { AOM_CDF2(27473)},
+ { AOM_CDF2(32487)},
+ { AOM_CDF2(7654)},
+ { AOM_CDF2(19473)},
+ { AOM_CDF2(29984)},
+ { AOM_CDF2(9961)},
+ { AOM_CDF2(30242)},
+ { AOM_CDF2(32117)}
+ },
+ {
+ { AOM_CDF2(31548)},
+ { AOM_CDF2(1549)},
+ { AOM_CDF2(10130)},
+ { AOM_CDF2(16656)},
+ { AOM_CDF2(18591)},
+ { AOM_CDF2(26308)},
+ { AOM_CDF2(32537)},
+ { AOM_CDF2(5403)},
+ { AOM_CDF2(18096)},
+ { AOM_CDF2(30003)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(29957)},
+ { AOM_CDF2(5391)},
+ { AOM_CDF2(18039)},
+ { AOM_CDF2(23566)},
+ { AOM_CDF2(22431)},
+ { AOM_CDF2(25822)},
+ { AOM_CDF2(32197)},
+ { AOM_CDF2(3778)},
+ { AOM_CDF2(15336)},
+ { AOM_CDF2(28981)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(17920)},
+ { AOM_CDF2(1818)},
+ { AOM_CDF2(7282)},
+ { AOM_CDF2(25273)},
+ { AOM_CDF2(10923)},
+ { AOM_CDF2(31554)},
+ { AOM_CDF2(32624)},
+ { AOM_CDF2(1366)},
+ { AOM_CDF2(15628)},
+ { AOM_CDF2(30462)},
+ { AOM_CDF2(146)},
+ { AOM_CDF2(5132)},
+ { AOM_CDF2(31657)}
+ },
+ {
+ { AOM_CDF2(6308)},
+ { AOM_CDF2(117)},
+ { AOM_CDF2(1638)},
+ { AOM_CDF2(2161)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(10923)},
+ { AOM_CDF2(30247)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(30371)},
+ { AOM_CDF2(7570)},
+ { AOM_CDF2(13155)},
+ { AOM_CDF2(20751)},
+ { AOM_CDF2(20969)},
+ { AOM_CDF2(27067)},
+ { AOM_CDF2(32013)},
+ { AOM_CDF2(5495)},
+ { AOM_CDF2(17942)},
+ { AOM_CDF2(28280)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31782)},
+ { AOM_CDF2(1836)},
+ { AOM_CDF2(10689)},
+ { AOM_CDF2(17604)},
+ { AOM_CDF2(21622)},
+ { AOM_CDF2(27518)},
+ { AOM_CDF2(32399)},
+ { AOM_CDF2(4419)},
+ { AOM_CDF2(16294)},
+ { AOM_CDF2(28345)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31901)},
+ { AOM_CDF2(10311)},
+ { AOM_CDF2(18047)},
+ { AOM_CDF2(24806)},
+ { AOM_CDF2(23288)},
+ { AOM_CDF2(27914)},
+ { AOM_CDF2(32296)},
+ { AOM_CDF2(4215)},
+ { AOM_CDF2(15756)},
+ { AOM_CDF2(28341)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(26726)},
+ { AOM_CDF2(1045)},
+ { AOM_CDF2(11703)},
+ { AOM_CDF2(20590)},
+ { AOM_CDF2(18554)},
+ { AOM_CDF2(25970)},
+ { AOM_CDF2(31938)},
+ { AOM_CDF2(5583)},
+ { AOM_CDF2(21313)},
+ { AOM_CDF2(29390)},
+ { AOM_CDF2(641)},
+ { AOM_CDF2(22265)},
+ { AOM_CDF2(31452)}
+ },
+ {
+ { AOM_CDF2(26584)},
+ { AOM_CDF2(188)},
+ { AOM_CDF2(8847)},
+ { AOM_CDF2(24519)},
+ { AOM_CDF2(22938)},
+ { AOM_CDF2(30583)},
+ { AOM_CDF2(32608)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(29614)},
+ { AOM_CDF2(9068)},
+ { AOM_CDF2(12924)},
+ { AOM_CDF2(19538)},
+ { AOM_CDF2(17737)},
+ { AOM_CDF2(24619)},
+ { AOM_CDF2(30642)},
+ { AOM_CDF2(4119)},
+ { AOM_CDF2(16026)},
+ { AOM_CDF2(25657)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31957)},
+ { AOM_CDF2(3230)},
+ { AOM_CDF2(11153)},
+ { AOM_CDF2(18123)},
+ { AOM_CDF2(20143)},
+ { AOM_CDF2(26536)},
+ { AOM_CDF2(31986)},
+ { AOM_CDF2(3050)},
+ { AOM_CDF2(14603)},
+ { AOM_CDF2(25155)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(32363)},
+ { AOM_CDF2(10692)},
+ { AOM_CDF2(19090)},
+ { AOM_CDF2(24357)},
+ { AOM_CDF2(24442)},
+ { AOM_CDF2(28312)},
+ { AOM_CDF2(32169)},
+ { AOM_CDF2(3648)},
+ { AOM_CDF2(15690)},
+ { AOM_CDF2(26815)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(30669)},
+ { AOM_CDF2(3832)},
+ { AOM_CDF2(11663)},
+ { AOM_CDF2(18889)},
+ { AOM_CDF2(19782)},
+ { AOM_CDF2(23313)},
+ { AOM_CDF2(31330)},
+ { AOM_CDF2(5124)},
+ { AOM_CDF2(18719)},
+ { AOM_CDF2(28468)},
+ { AOM_CDF2(3082)},
+ { AOM_CDF2(20982)},
+ { AOM_CDF2(29443)}
+ },
+ {
+ { AOM_CDF2(28573)},
+ { AOM_CDF2(3183)},
+ { AOM_CDF2(17802)},
+ { AOM_CDF2(25977)},
+ { AOM_CDF2(26677)},
+ { AOM_CDF2(27832)},
+ { AOM_CDF2(32387)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26887)},
+ { AOM_CDF2(6729)},
+ { AOM_CDF2(10361)},
+ { AOM_CDF2(17442)},
+ { AOM_CDF2(15045)},
+ { AOM_CDF2(22478)},
+ { AOM_CDF2(29072)},
+ { AOM_CDF2(2713)},
+ { AOM_CDF2(11861)},
+ { AOM_CDF2(20773)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31903)},
+ { AOM_CDF2(2044)},
+ { AOM_CDF2(7528)},
+ { AOM_CDF2(14618)},
+ { AOM_CDF2(16182)},
+ { AOM_CDF2(24168)},
+ { AOM_CDF2(31037)},
+ { AOM_CDF2(2786)},
+ { AOM_CDF2(11194)},
+ { AOM_CDF2(20155)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(32510)},
+ { AOM_CDF2(8430)},
+ { AOM_CDF2(17318)},
+ { AOM_CDF2(24154)},
+ { AOM_CDF2(23674)},
+ { AOM_CDF2(28789)},
+ { AOM_CDF2(32139)},
+ { AOM_CDF2(3440)},
+ { AOM_CDF2(13117)},
+ { AOM_CDF2(22702)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31671)},
+ { AOM_CDF2(2056)},
+ { AOM_CDF2(11746)},
+ { AOM_CDF2(16852)},
+ { AOM_CDF2(18635)},
+ { AOM_CDF2(24715)},
+ { AOM_CDF2(31484)},
+ { AOM_CDF2(4656)},
+ { AOM_CDF2(16074)},
+ { AOM_CDF2(24704)},
+ { AOM_CDF2(1806)},
+ { AOM_CDF2(14645)},
+ { AOM_CDF2(25336)}
+ },
+ {
+ { AOM_CDF2(31539)},
+ { AOM_CDF2(8433)},
+ { AOM_CDF2(20576)},
+ { AOM_CDF2(27904)},
+ { AOM_CDF2(27852)},
+ { AOM_CDF2(30026)},
+ { AOM_CDF2(32441)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_extra_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
+ [EOB_COEF_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ {
+ { AOM_CDF2(16961)},
+ { AOM_CDF2(17223)},
+ { AOM_CDF2(7621)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(19069)},
+ { AOM_CDF2(22525)},
+ { AOM_CDF2(13377)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20401)},
+ { AOM_CDF2(17025)},
+ { AOM_CDF2(12845)},
+ { AOM_CDF2(12873)},
+ { AOM_CDF2(14094)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20681)},
+ { AOM_CDF2(20701)},
+ { AOM_CDF2(15250)},
+ { AOM_CDF2(15017)},
+ { AOM_CDF2(14928)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23905)},
+ { AOM_CDF2(17194)},
+ { AOM_CDF2(16170)},
+ { AOM_CDF2(17695)},
+ { AOM_CDF2(13826)},
+ { AOM_CDF2(15810)},
+ { AOM_CDF2(12036)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(23959)},
+ { AOM_CDF2(20799)},
+ { AOM_CDF2(19021)},
+ { AOM_CDF2(16203)},
+ { AOM_CDF2(17886)},
+ { AOM_CDF2(14144)},
+ { AOM_CDF2(12010)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(27399)},
+ { AOM_CDF2(16327)},
+ { AOM_CDF2(18071)},
+ { AOM_CDF2(19584)},
+ { AOM_CDF2(20721)},
+ { AOM_CDF2(18432)},
+ { AOM_CDF2(19560)},
+ { AOM_CDF2(10150)},
+ { AOM_CDF2(8805)},
+ },
+ {
+ { AOM_CDF2(24932)},
+ { AOM_CDF2(20833)},
+ { AOM_CDF2(12027)},
+ { AOM_CDF2(16670)},
+ { AOM_CDF2(19914)},
+ { AOM_CDF2(15106)},
+ { AOM_CDF2(17662)},
+ { AOM_CDF2(13783)},
+ { AOM_CDF2(28756)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23406)},
+ { AOM_CDF2(21845)},
+ { AOM_CDF2(18432)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(17096)},
+ { AOM_CDF2(12561)},
+ { AOM_CDF2(17320)},
+ { AOM_CDF2(22395)},
+ { AOM_CDF2(21370)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(17471)},
+ { AOM_CDF2(20223)},
+ { AOM_CDF2(11357)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20335)},
+ { AOM_CDF2(21667)},
+ { AOM_CDF2(14818)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20430)},
+ { AOM_CDF2(20662)},
+ { AOM_CDF2(15367)},
+ { AOM_CDF2(16970)},
+ { AOM_CDF2(14657)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22117)},
+ { AOM_CDF2(22028)},
+ { AOM_CDF2(18650)},
+ { AOM_CDF2(16042)},
+ { AOM_CDF2(15885)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(22409)},
+ { AOM_CDF2(21012)},
+ { AOM_CDF2(15650)},
+ { AOM_CDF2(17395)},
+ { AOM_CDF2(15469)},
+ { AOM_CDF2(20205)},
+ { AOM_CDF2(19511)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(24220)},
+ { AOM_CDF2(22480)},
+ { AOM_CDF2(17737)},
+ { AOM_CDF2(18916)},
+ { AOM_CDF2(19268)},
+ { AOM_CDF2(18412)},
+ { AOM_CDF2(18844)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(25991)},
+ { AOM_CDF2(20314)},
+ { AOM_CDF2(17731)},
+ { AOM_CDF2(19678)},
+ { AOM_CDF2(18649)},
+ { AOM_CDF2(17307)},
+ { AOM_CDF2(21798)},
+ { AOM_CDF2(17549)},
+ { AOM_CDF2(15630)},
+ },
+ {
+ { AOM_CDF2(26585)},
+ { AOM_CDF2(21469)},
+ { AOM_CDF2(20432)},
+ { AOM_CDF2(17735)},
+ { AOM_CDF2(19280)},
+ { AOM_CDF2(15235)},
+ { AOM_CDF2(20297)},
+ { AOM_CDF2(22471)},
+ { AOM_CDF2(28997)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26605)},
+ { AOM_CDF2(11304)},
+ { AOM_CDF2(16726)},
+ { AOM_CDF2(16560)},
+ { AOM_CDF2(20866)},
+ { AOM_CDF2(23524)},
+ { AOM_CDF2(19878)},
+ { AOM_CDF2(13469)},
+ { AOM_CDF2(23084)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(18983)},
+ { AOM_CDF2(20512)},
+ { AOM_CDF2(14885)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20090)},
+ { AOM_CDF2(19444)},
+ { AOM_CDF2(17286)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19139)},
+ { AOM_CDF2(21487)},
+ { AOM_CDF2(18959)},
+ { AOM_CDF2(20910)},
+ { AOM_CDF2(19089)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20536)},
+ { AOM_CDF2(20664)},
+ { AOM_CDF2(20625)},
+ { AOM_CDF2(19123)},
+ { AOM_CDF2(14862)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19833)},
+ { AOM_CDF2(21502)},
+ { AOM_CDF2(17485)},
+ { AOM_CDF2(20267)},
+ { AOM_CDF2(18353)},
+ { AOM_CDF2(23329)},
+ { AOM_CDF2(21478)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22041)},
+ { AOM_CDF2(23434)},
+ { AOM_CDF2(20001)},
+ { AOM_CDF2(20554)},
+ { AOM_CDF2(20951)},
+ { AOM_CDF2(20145)},
+ { AOM_CDF2(15562)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23312)},
+ { AOM_CDF2(21607)},
+ { AOM_CDF2(16526)},
+ { AOM_CDF2(18957)},
+ { AOM_CDF2(18034)},
+ { AOM_CDF2(18934)},
+ { AOM_CDF2(24247)},
+ { AOM_CDF2(16921)},
+ { AOM_CDF2(17080)},
+ },
+ {
+ { AOM_CDF2(26579)},
+ { AOM_CDF2(24910)},
+ { AOM_CDF2(18637)},
+ { AOM_CDF2(19800)},
+ { AOM_CDF2(20388)},
+ { AOM_CDF2(9887)},
+ { AOM_CDF2(15642)},
+ { AOM_CDF2(30198)},
+ { AOM_CDF2(24721)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26998)},
+ { AOM_CDF2(16737)},
+ { AOM_CDF2(17838)},
+ { AOM_CDF2(18922)},
+ { AOM_CDF2(19515)},
+ { AOM_CDF2(18636)},
+ { AOM_CDF2(17333)},
+ { AOM_CDF2(15776)},
+ { AOM_CDF2(22658)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(20177)},
+ { AOM_CDF2(20789)},
+ { AOM_CDF2(20262)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(21416)},
+ { AOM_CDF2(20855)},
+ { AOM_CDF2(23410)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20238)},
+ { AOM_CDF2(21057)},
+ { AOM_CDF2(19159)},
+ { AOM_CDF2(22337)},
+ { AOM_CDF2(20159)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20125)},
+ { AOM_CDF2(20559)},
+ { AOM_CDF2(21707)},
+ { AOM_CDF2(22296)},
+ { AOM_CDF2(17333)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19941)},
+ { AOM_CDF2(20527)},
+ { AOM_CDF2(21470)},
+ { AOM_CDF2(22487)},
+ { AOM_CDF2(19558)},
+ { AOM_CDF2(22354)},
+ { AOM_CDF2(20331)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22752)},
+ { AOM_CDF2(25006)},
+ { AOM_CDF2(22075)},
+ { AOM_CDF2(21576)},
+ { AOM_CDF2(17740)},
+ { AOM_CDF2(21690)},
+ { AOM_CDF2(19211)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(21442)},
+ { AOM_CDF2(22358)},
+ { AOM_CDF2(18503)},
+ { AOM_CDF2(20291)},
+ { AOM_CDF2(19945)},
+ { AOM_CDF2(21294)},
+ { AOM_CDF2(21178)},
+ { AOM_CDF2(19400)},
+ { AOM_CDF2(10556)},
+ },
+ {
+ { AOM_CDF2(24648)},
+ { AOM_CDF2(24949)},
+ { AOM_CDF2(20708)},
+ { AOM_CDF2(23905)},
+ { AOM_CDF2(20501)},
+ { AOM_CDF2(9558)},
+ { AOM_CDF2(9423)},
+ { AOM_CDF2(30365)},
+ { AOM_CDF2(19253)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26064)},
+ { AOM_CDF2(22098)},
+ { AOM_CDF2(19613)},
+ { AOM_CDF2(20525)},
+ { AOM_CDF2(17595)},
+ { AOM_CDF2(16618)},
+ { AOM_CDF2(20497)},
+ { AOM_CDF2(18989)},
+ { AOM_CDF2(15513)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi16_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][4] = {
+ {
+ {
+ { AOM_CDF5(840, 1039, 1980, 4895)},
+ { AOM_CDF5(370, 671, 1883, 4471)}
+ },
+ {
+ { AOM_CDF5(3247, 4950, 9688, 14563)},
+ { AOM_CDF5(1904, 3354, 7763, 14647)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(2125, 2551, 5165, 8946)},
+ { AOM_CDF5(513, 765, 1859, 6339)}
+ },
+ {
+ { AOM_CDF5(7637, 9498, 14259, 19108)},
+ { AOM_CDF5(2497, 4096, 8866, 16993)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(4016, 4897, 8881, 14968)},
+ { AOM_CDF5(716, 1105, 2646, 10056)}
+ },
+ {
+ { AOM_CDF5(11139, 13270, 18241, 23566)},
+ { AOM_CDF5(3192, 5032, 10297, 19755)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(6708, 8958, 14746, 22133)},
+ { AOM_CDF5(1222, 2074, 4783, 15410)}
+ },
+ {
+ { AOM_CDF5(19575, 21766, 26044, 29709)},
+ { AOM_CDF5(7297, 10767, 19273, 28194)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi32_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF6(400, 520, 977, 2102, 6542)},
+ { AOM_CDF6(210, 405, 1315, 3326, 7537)}
+ },
+ {
+ { AOM_CDF6(2636, 4273, 7588, 11794, 20401)},
+ { AOM_CDF6(1786, 3179, 6902, 11357, 19054)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(989, 1249, 2019, 4151, 10785)},
+ { AOM_CDF6(313, 441, 1099, 2917, 8562)}
+ },
+ {
+ { AOM_CDF6(8394, 10352, 13932, 18855, 26014)},
+ { AOM_CDF6(2578, 4124, 8181, 13670, 24234)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(2515, 3003, 4452, 8162, 16041)},
+ { AOM_CDF6(574, 821, 1836, 5089, 13128)}
+ },
+ {
+ { AOM_CDF6(13468, 16303, 20361, 25105, 29281)},
+ { AOM_CDF6(3542, 5502, 10415, 16760, 25644)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(4617, 5709, 8446, 13584, 23135)},
+ { AOM_CDF6(1156, 1702, 3675, 9274, 20539)}
+ },
+ {
+ { AOM_CDF6(22086, 24282, 27010, 29770, 31743)},
+ { AOM_CDF6(7699, 10897, 20891, 26926, 31628)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi64_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF7(329, 498, 1101, 1784, 3265, 7758)},
+ { AOM_CDF7(335, 730, 1459, 5494, 8755, 12997)}
+ },
+ {
+ { AOM_CDF7(3505, 5304, 10086, 13814, 17684, 23370)},
+ { AOM_CDF7(1563, 2700, 4876, 10911, 14706, 22480)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(1260, 1446, 2253, 3712, 6652, 13369)},
+ { AOM_CDF7(401, 605, 1029, 2563, 5845, 12626)}
+ },
+ {
+ { AOM_CDF7(8609, 10612, 14624, 18714, 22614, 29024)},
+ { AOM_CDF7(1923, 3127, 5867, 9703, 14277, 27100)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(2374, 2772, 4583, 7276, 12288, 19706)},
+ { AOM_CDF7(497, 810, 1315, 3000, 7004, 15641)}
+ },
+ {
+ { AOM_CDF7(15050, 17126, 21410, 24886, 28156, 30726)},
+ { AOM_CDF7(4034, 6290, 10235, 14982, 21214, 28491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(6307, 7541, 12060, 16358, 22553, 27865)},
+ { AOM_CDF7(1289, 2320, 3971, 7926, 14153, 24291)}
+ },
+ {
+ { AOM_CDF7(24212, 25708, 28268, 30035, 31307, 32049)},
+ { AOM_CDF7(8726, 12378, 19409, 26450, 30038, 32462)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi128_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF8(219, 482, 1140, 2091, 3680, 6028, 12586)},
+ { AOM_CDF8(371, 699, 1254, 4830, 9479, 12562, 17497)}
+ },
+ {
+ { AOM_CDF8(5245, 7456, 12880, 15852, 20033, 23932, 27608)},
+ { AOM_CDF8(2054, 3472, 5869, 14232, 18242, 20590, 26752)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(685, 933, 1488, 2714, 4766, 8562, 19254)},
+ { AOM_CDF8(217, 352, 618, 2303, 5261, 9969, 17472)}
+ },
+ {
+ { AOM_CDF8(8045, 11200, 15497, 19595, 23948, 27408, 30938)},
+ { AOM_CDF8(2310, 4160, 7471, 14997, 17931, 20768, 30240)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(1366, 1738, 2527, 5016, 9355, 15797, 24643)},
+ { AOM_CDF8(354, 558, 944, 2760, 7287, 14037, 21779)}
+ },
+ {
+ { AOM_CDF8(13627, 16246, 20173, 24429, 27948, 30415, 31863)},
+ { AOM_CDF8(6275, 9889, 14769, 23164, 27988, 30493, 32272)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(3472, 4885, 7489, 12481, 18517, 24536, 29635)},
+ { AOM_CDF8(886, 1731, 3271, 8469, 15569, 22126, 28383)}
+ },
+ {
+ { AOM_CDF8(24313, 26062, 28385, 30107, 31217, 31898, 32345)},
+ { AOM_CDF8(9165, 13282, 21150, 30286, 31894, 32571, 32712)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi256_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF9(310, 584, 1887, 3589, 6168, 8611, 11352, 15652)},
+ { AOM_CDF9(998, 1850, 2998, 5604, 17341, 19888, 22899, 25583)}
+ },
+ {
+ { AOM_CDF9(2520, 3240, 5952, 8870, 12577, 17558, 19954, 24168)},
+ { AOM_CDF9(2203, 4130, 7435, 10739, 20652, 23681, 25609, 27261)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(1448, 2109, 4151, 6263, 9329, 13260, 17944, 23300)},
+ { AOM_CDF9(399, 1019, 1749, 3038, 10444, 15546, 22739, 27294)}
+ },
+ {
+ { AOM_CDF9(6402, 8148, 12623, 15072, 18728, 22847, 26447, 29377)},
+ { AOM_CDF9(1674, 3252, 5734, 10159, 22397, 23802, 24821, 30940)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(3089, 3920, 6038, 9460, 14266, 19881, 25766, 29176)},
+ { AOM_CDF9(1084, 2358, 3488, 5122, 11483, 18103, 26023, 29799)}
+ },
+ {
+ { AOM_CDF9(11514, 13794, 17480, 20754, 24361, 27378, 29492, 31277)},
+ { AOM_CDF9(6571, 9610, 15516, 21826, 29092, 30829, 31842, 32708)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(5348, 7113, 11820, 15924, 22106, 26777, 30334, 31757)},
+ { AOM_CDF9(2453, 4474, 6307, 8777, 16474, 22975, 29000, 31547)}
+ },
+ {
+ { AOM_CDF9(23110, 24597, 27140, 28894, 30167, 30927, 31392, 32094)},
+ { AOM_CDF9(9998, 17661, 25178, 28097, 31308, 32038, 32403, 32695)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi512_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
+ {
+ {
+ { AOM_CDF10(641, 983, 3707, 5430, 10234, 14958, 18788, 23412, 26061)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(5095, 6446, 9996, 13354, 16017, 17986, 20919, 26129, 29140)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(1230, 2278, 5035, 7776, 11871, 15346, 19590, 24584, 28749)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(7265, 9979, 15819, 19250, 21780, 23846, 26478, 28396, 31811)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(2624, 3936, 6480, 9686, 13979, 17726, 23267, 28410, 31078)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(12015, 14769, 19588, 22052, 24222, 25812, 27300, 29219, 32114)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(5927, 7809, 10923, 14597, 19439, 24135, 28456, 31142, 32060)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(21093, 23043, 25742, 27658, 29097, 29716, 30073, 30820, 31956)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi1024_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
+ {
+ {
+ { AOM_CDF11(393, 421, 751, 1623, 3160,
+ 6352, 13345, 18047, 22571, 25830)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(1865, 1988, 2930, 4242, 10533,
+ 16538, 21354, 27255, 28546, 31784)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(696, 948, 3145, 5702, 9706,
+ 13217, 17851, 21856, 25692, 28034)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(2672, 3591, 9330, 17084, 22725,
+ 24284, 26527, 28027, 28377, 30876)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(2784, 3831, 7041, 10521, 14847,
+ 18844, 23155, 26682, 29229, 31045)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(9577, 12466, 17739, 20750, 22061,
+ 23215, 24601, 25483, 25843, 32056)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(6698, 8334, 11961, 15762, 20186,
+ 23862, 27434, 29326, 31082, 32050)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(20569, 22426, 25569, 26859, 28053,
+ 28913, 29486, 29724, 29807, 32570)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ }
+};
+
+static const u16 av1_default_coeff_lps_multi_cdfs[TOKEN_CDF_Q_CTXS]
+ [TX_SIZES][PLANE_TYPES][LEVEL_CONTEXTS][CDF_SIZE(BR_CDF_SIZE) + 1] = {
+ {
+ {
+ {
+ { AOM_CDF4(14298, 20718, 24174)}, { AOM_CDF4(12536, 19601, 23789)},
+ { AOM_CDF4(8712, 15051, 19503)}, { AOM_CDF4(6170, 11327, 15434)},
+ { AOM_CDF4(4742, 8926, 12538)}, { AOM_CDF4(3803, 7317, 10546)},
+ { AOM_CDF4(1696, 3317, 4871)}, { AOM_CDF4(14392, 19951, 22756)},
+ { AOM_CDF4(15978, 23218, 26818)}, { AOM_CDF4(12187, 19474, 23889)},
+ { AOM_CDF4(9176, 15640, 20259)}, { AOM_CDF4(7068, 12655, 17028)},
+ { AOM_CDF4(5656, 10442, 14472)}, { AOM_CDF4(2580, 4992, 7244)},
+ { AOM_CDF4(12136, 18049, 21426)}, { AOM_CDF4(13784, 20721, 24481)},
+ { AOM_CDF4(10836, 17621, 21900)}, { AOM_CDF4(8372, 14444, 18847)},
+ { AOM_CDF4(6523, 11779, 16000)}, { AOM_CDF4(5337, 9898, 13760)},
+ { AOM_CDF4(3034, 5860, 8462)}
+ },
+ {
+ { AOM_CDF4(15967, 22905, 26286)}, { AOM_CDF4(13534, 20654, 24579)},
+ { AOM_CDF4(9504, 16092, 20535)}, { AOM_CDF4(6975, 12568, 16903)},
+ { AOM_CDF4(5364, 10091, 14020)}, { AOM_CDF4(4357, 8370, 11857)},
+ { AOM_CDF4(2506, 4934, 7218)}, { AOM_CDF4(23032, 28815, 30936)},
+ { AOM_CDF4(19540, 26704, 29719)}, { AOM_CDF4(15158, 22969, 27097)},
+ { AOM_CDF4(11408, 18865, 23650)}, { AOM_CDF4(8885, 15448, 20250)},
+ { AOM_CDF4(7108, 12853, 17416)}, { AOM_CDF4(4231, 8041, 11480)},
+ { AOM_CDF4(19823, 26490, 29156)}, { AOM_CDF4(18890, 25929, 28932)},
+ { AOM_CDF4(15660, 23491, 27433)}, { AOM_CDF4(12147, 19776, 24488)},
+ { AOM_CDF4(9728, 16774, 21649)}, { AOM_CDF4(7919, 14277, 19066)},
+ { AOM_CDF4(5440, 10170, 14185)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(14406, 20862, 24414)}, { AOM_CDF4(11824, 18907, 23109)},
+ { AOM_CDF4(8257, 14393, 18803)}, { AOM_CDF4(5860, 10747, 14778)},
+ { AOM_CDF4(4475, 8486, 11984)}, { AOM_CDF4(3606, 6954, 10043)},
+ { AOM_CDF4(1736, 3410, 5048)}, { AOM_CDF4(14430, 20046, 22882)},
+ { AOM_CDF4(15593, 22899, 26709)}, { AOM_CDF4(12102, 19368, 23811)},
+ { AOM_CDF4(9059, 15584, 20262)}, { AOM_CDF4(6999, 12603, 17048)},
+ { AOM_CDF4(5684, 10497, 14553)}, { AOM_CDF4(2822, 5438, 7862)},
+ { AOM_CDF4(15785, 21585, 24359)}, { AOM_CDF4(18347, 25229, 28266)},
+ { AOM_CDF4(14974, 22487, 26389)}, { AOM_CDF4(11423, 18681, 23271)},
+ { AOM_CDF4(8863, 15350, 20008)}, { AOM_CDF4(7153, 12852, 17278)},
+ { AOM_CDF4(3707, 7036, 9982)}
+ },
+ {
+ { AOM_CDF4(15460, 21696, 25469)}, { AOM_CDF4(12170, 19249, 23191)},
+ { AOM_CDF4(8723, 15027, 19332)}, { AOM_CDF4(6428, 11704, 15874)},
+ { AOM_CDF4(4922, 9292, 13052)}, { AOM_CDF4(4139, 7695, 11010)},
+ { AOM_CDF4(2291, 4508, 6598)}, { AOM_CDF4(19856, 26920, 29828)},
+ { AOM_CDF4(17923, 25289, 28792)}, { AOM_CDF4(14278, 21968, 26297)},
+ { AOM_CDF4(10910, 18136, 22950)}, { AOM_CDF4(8423, 14815, 19627)},
+ { AOM_CDF4(6771, 12283, 16774)}, { AOM_CDF4(4074, 7750, 11081)},
+ { AOM_CDF4(19852, 26074, 28672)}, { AOM_CDF4(19371, 26110, 28989)},
+ { AOM_CDF4(16265, 23873, 27663)}, { AOM_CDF4(12758, 20378, 24952)},
+ { AOM_CDF4(10095, 17098, 21961)}, { AOM_CDF4(8250, 14628, 19451)},
+ { AOM_CDF4(5205, 9745, 13622)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(10563, 16233, 19763)}, { AOM_CDF4(9794, 16022, 19804)},
+ { AOM_CDF4(6750, 11945, 15759)}, { AOM_CDF4(4963, 9186, 12752)},
+ { AOM_CDF4(3845, 7435, 10627)}, { AOM_CDF4(3051, 6085, 8834)},
+ { AOM_CDF4(1311, 2596, 3830)}, { AOM_CDF4(11246, 16404, 19689)},
+ { AOM_CDF4(12315, 18911, 22731)}, { AOM_CDF4(10557, 17095, 21289)},
+ { AOM_CDF4(8136, 14006, 18249)}, { AOM_CDF4(6348, 11474, 15565)},
+ { AOM_CDF4(5196, 9655, 13400)}, { AOM_CDF4(2349, 4526, 6587)},
+ { AOM_CDF4(13337, 18730, 21569)}, { AOM_CDF4(19306, 26071, 28882)},
+ { AOM_CDF4(15952, 23540, 27254)}, { AOM_CDF4(12409, 19934, 24430)},
+ { AOM_CDF4(9760, 16706, 21389)}, { AOM_CDF4(8004, 14220, 18818)},
+ { AOM_CDF4(4138, 7794, 10961)}
+ },
+ {
+ { AOM_CDF4(10870, 16684, 20949)}, { AOM_CDF4(9664, 15230, 18680)},
+ { AOM_CDF4(6886, 12109, 15408)}, { AOM_CDF4(4825, 8900, 12305)},
+ { AOM_CDF4(3630, 7162, 10314)}, { AOM_CDF4(3036, 6429, 9387)},
+ { AOM_CDF4(1671, 3296, 4940)}, { AOM_CDF4(13819, 19159, 23026)},
+ { AOM_CDF4(11984, 19108, 23120)}, { AOM_CDF4(10690, 17210, 21663)},
+ { AOM_CDF4(7984, 14154, 18333)}, { AOM_CDF4(6868, 12294, 16124)},
+ { AOM_CDF4(5274, 8994, 12868)}, { AOM_CDF4(2988, 5771, 8424)},
+ { AOM_CDF4(19736, 26647, 29141)}, { AOM_CDF4(18933, 26070, 28984)},
+ { AOM_CDF4(15779, 23048, 27200)}, { AOM_CDF4(12638, 20061, 24532)},
+ { AOM_CDF4(10692, 17545, 22220)}, { AOM_CDF4(9217, 15251, 20054)},
+ { AOM_CDF4(5078, 9284, 12594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(2331, 3662, 5244)}, { AOM_CDF4(2891, 4771, 6145)},
+ { AOM_CDF4(4598, 7623, 9729)}, { AOM_CDF4(3520, 6845, 9199)},
+ { AOM_CDF4(3417, 6119, 9324)}, { AOM_CDF4(2601, 5412, 7385)},
+ { AOM_CDF4(600, 1173, 1744)}, { AOM_CDF4(7672, 13286, 17469)},
+ { AOM_CDF4(4232, 7792, 10793)}, { AOM_CDF4(2915, 5317, 7397)},
+ { AOM_CDF4(2318, 4356, 6152)}, { AOM_CDF4(2127, 4000, 5554)},
+ { AOM_CDF4(1850, 3478, 5275)}, { AOM_CDF4(977, 1933, 2843)},
+ { AOM_CDF4(18280, 24387, 27989)}, { AOM_CDF4(15852, 22671, 26185)},
+ { AOM_CDF4(13845, 20951, 24789)}, { AOM_CDF4(11055, 17966, 22129)},
+ { AOM_CDF4(9138, 15422, 19801)}, { AOM_CDF4(7454, 13145, 17456)},
+ { AOM_CDF4(3370, 6393, 9013)}
+ },
+ {
+ { AOM_CDF4(5842, 9229, 10838)}, { AOM_CDF4(2313, 3491, 4276)},
+ { AOM_CDF4(2998, 6104, 7496)}, { AOM_CDF4(2420, 7447, 9868)},
+ { AOM_CDF4(3034, 8495, 10923)}, { AOM_CDF4(4076, 8937, 10975)},
+ { AOM_CDF4(1086, 2370, 3299)}, { AOM_CDF4(9714, 17254, 20444)},
+ { AOM_CDF4(8543, 13698, 17123)}, { AOM_CDF4(4918, 9007, 11910)},
+ { AOM_CDF4(4129, 7532, 10553)}, { AOM_CDF4(2364, 5533, 8058)},
+ { AOM_CDF4(1834, 3546, 5563)}, { AOM_CDF4(1473, 2908, 4133)},
+ { AOM_CDF4(15405, 21193, 25619)}, { AOM_CDF4(15691, 21952, 26561)},
+ { AOM_CDF4(12962, 19194, 24165)}, { AOM_CDF4(10272, 17855, 22129)},
+ { AOM_CDF4(8588, 15270, 20718)}, { AOM_CDF4(8682, 14669, 19500)},
+ { AOM_CDF4(4870, 9636, 13205)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(14995, 21341, 24749)}, { AOM_CDF4(13158, 20289, 24601)},
+ { AOM_CDF4(8941, 15326, 19876)}, { AOM_CDF4(6297, 11541, 15807)},
+ { AOM_CDF4(4817, 9029, 12776)}, { AOM_CDF4(3731, 7273, 10627)},
+ { AOM_CDF4(1847, 3617, 5354)}, { AOM_CDF4(14472, 19659, 22343)},
+ { AOM_CDF4(16806, 24162, 27533)}, { AOM_CDF4(12900, 20404, 24713)},
+ { AOM_CDF4(9411, 16112, 20797)}, { AOM_CDF4(7056, 12697, 17148)},
+ { AOM_CDF4(5544, 10339, 14460)}, { AOM_CDF4(2954, 5704, 8319)},
+ { AOM_CDF4(12464, 18071, 21354)}, { AOM_CDF4(15482, 22528, 26034)},
+ { AOM_CDF4(12070, 19269, 23624)}, { AOM_CDF4(8953, 15406, 20106)},
+ { AOM_CDF4(7027, 12730, 17220)}, { AOM_CDF4(5887, 10913, 15140)},
+ { AOM_CDF4(3793, 7278, 10447)}
+ },
+ {
+ { AOM_CDF4(15571, 22232, 25749)}, { AOM_CDF4(14506, 21575, 25374)},
+ { AOM_CDF4(10189, 17089, 21569)}, { AOM_CDF4(7316, 13301, 17915)},
+ { AOM_CDF4(5783, 10912, 15190)}, { AOM_CDF4(4760, 9155, 13088)},
+ { AOM_CDF4(2993, 5966, 8774)}, { AOM_CDF4(23424, 28903, 30778)},
+ { AOM_CDF4(20775, 27666, 30290)}, { AOM_CDF4(16474, 24410, 28299)},
+ { AOM_CDF4(12471, 20180, 24987)}, { AOM_CDF4(9410, 16487, 21439)},
+ { AOM_CDF4(7536, 13614, 18529)}, { AOM_CDF4(5048, 9586, 13549)},
+ { AOM_CDF4(21090, 27290, 29756)}, { AOM_CDF4(20796, 27402, 30026)},
+ { AOM_CDF4(17819, 25485, 28969)}, { AOM_CDF4(13860, 21909, 26462)},
+ { AOM_CDF4(11002, 18494, 23529)}, { AOM_CDF4(8953, 15929, 20897)},
+ { AOM_CDF4(6448, 11918, 16454)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(15999, 22208, 25449)}, { AOM_CDF4(13050, 19988, 24122)},
+ { AOM_CDF4(8594, 14864, 19378)}, { AOM_CDF4(6033, 11079, 15238)},
+ { AOM_CDF4(4554, 8683, 12347)}, { AOM_CDF4(3672, 7139, 10337)},
+ { AOM_CDF4(1900, 3771, 5576)}, { AOM_CDF4(15788, 21340, 23949)},
+ { AOM_CDF4(16825, 24235, 27758)}, { AOM_CDF4(12873, 20402, 24810)},
+ { AOM_CDF4(9590, 16363, 21094)}, { AOM_CDF4(7352, 13209, 17733)},
+ { AOM_CDF4(5960, 10989, 15184)}, { AOM_CDF4(3232, 6234, 9007)},
+ { AOM_CDF4(15761, 20716, 23224)}, { AOM_CDF4(19318, 25989, 28759)},
+ { AOM_CDF4(15529, 23094, 26929)}, { AOM_CDF4(11662, 18989, 23641)},
+ { AOM_CDF4(8955, 15568, 20366)}, { AOM_CDF4(7281, 13106, 17708)},
+ { AOM_CDF4(4248, 8059, 11440)}
+ },
+ {
+ { AOM_CDF4(14899, 21217, 24503)}, { AOM_CDF4(13519, 20283, 24047)},
+ { AOM_CDF4(9429, 15966, 20365)}, { AOM_CDF4(6700, 12355, 16652)},
+ { AOM_CDF4(5088, 9704, 13716)}, { AOM_CDF4(4243, 8154, 11731)},
+ { AOM_CDF4(2702, 5364, 7861)}, { AOM_CDF4(22745, 28388, 30454)},
+ { AOM_CDF4(20235, 27146, 29922)}, { AOM_CDF4(15896, 23715, 27637)},
+ { AOM_CDF4(11840, 19350, 24131)}, { AOM_CDF4(9122, 15932, 20880)},
+ { AOM_CDF4(7488, 13581, 18362)}, { AOM_CDF4(5114, 9568, 13370)},
+ { AOM_CDF4(20845, 26553, 28932)}, { AOM_CDF4(20981, 27372, 29884)},
+ { AOM_CDF4(17781, 25335, 28785)}, { AOM_CDF4(13760, 21708, 26297)},
+ { AOM_CDF4(10975, 18415, 23365)}, { AOM_CDF4(9045, 15789, 20686)},
+ { AOM_CDF4(6130, 11199, 15423)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(13549, 19724, 23158)}, { AOM_CDF4(11844, 18382, 22246)},
+ { AOM_CDF4(7919, 13619, 17773)}, { AOM_CDF4(5486, 10143, 13946)},
+ { AOM_CDF4(4166, 7983, 11324)}, { AOM_CDF4(3364, 6506, 9427)},
+ { AOM_CDF4(1598, 3160, 4674)}, { AOM_CDF4(15281, 20979, 23781)},
+ { AOM_CDF4(14939, 22119, 25952)}, { AOM_CDF4(11363, 18407, 22812)},
+ { AOM_CDF4(8609, 14857, 19370)}, { AOM_CDF4(6737, 12184, 16480)},
+ { AOM_CDF4(5506, 10263, 14262)}, { AOM_CDF4(2990, 5786, 8380)},
+ { AOM_CDF4(20249, 25253, 27417)}, { AOM_CDF4(21070, 27518, 30001)},
+ { AOM_CDF4(16854, 24469, 28074)}, { AOM_CDF4(12864, 20486, 25000)},
+ { AOM_CDF4(9962, 16978, 21778)}, { AOM_CDF4(8074, 14338, 19048)},
+ { AOM_CDF4(4494, 8479, 11906)}
+ },
+ {
+ { AOM_CDF4(13960, 19617, 22829)}, { AOM_CDF4(11150, 17341, 21228)},
+ { AOM_CDF4(7150, 12964, 17190)}, { AOM_CDF4(5331, 10002, 13867)},
+ { AOM_CDF4(4167, 7744, 11057)}, { AOM_CDF4(3480, 6629, 9646)},
+ { AOM_CDF4(1883, 3784, 5686)}, { AOM_CDF4(18752, 25660, 28912)},
+ { AOM_CDF4(16968, 24586, 28030)}, { AOM_CDF4(13520, 21055, 25313)},
+ { AOM_CDF4(10453, 17626, 22280)}, { AOM_CDF4(8386, 14505, 19116)},
+ { AOM_CDF4(6742, 12595, 17008)}, { AOM_CDF4(4273, 8140, 11499)},
+ { AOM_CDF4(22120, 27827, 30233)}, { AOM_CDF4(20563, 27358, 29895)},
+ { AOM_CDF4(17076, 24644, 28153)}, { AOM_CDF4(13362, 20942, 25309)},
+ { AOM_CDF4(10794, 17965, 22695)}, { AOM_CDF4(9014, 15652, 20319)},
+ { AOM_CDF4(5708, 10512, 14497)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5705, 10930, 15725)}, { AOM_CDF4(7946, 12765, 16115)},
+ { AOM_CDF4(6801, 12123, 16226)}, { AOM_CDF4(5462, 10135, 14200)},
+ { AOM_CDF4(4189, 8011, 11507)}, { AOM_CDF4(3191, 6229, 9408)},
+ { AOM_CDF4(1057, 2137, 3212)}, { AOM_CDF4(10018, 17067, 21491)},
+ { AOM_CDF4(7380, 12582, 16453)}, { AOM_CDF4(6068, 10845, 14339)},
+ { AOM_CDF4(5098, 9198, 12555)}, { AOM_CDF4(4312, 8010, 11119)},
+ { AOM_CDF4(3700, 6966, 9781)}, { AOM_CDF4(1693, 3326, 4887)},
+ { AOM_CDF4(18757, 24930, 27774)}, { AOM_CDF4(17648, 24596, 27817)},
+ { AOM_CDF4(14707, 22052, 26026)}, { AOM_CDF4(11720, 18852, 23292)},
+ { AOM_CDF4(9357, 15952, 20525)}, { AOM_CDF4(7810, 13753, 18210)},
+ { AOM_CDF4(3879, 7333, 10328)}
+ },
+ {
+ { AOM_CDF4(8278, 13242, 15922)}, { AOM_CDF4(10547, 15867, 18919)},
+ { AOM_CDF4(9106, 15842, 20609)}, { AOM_CDF4(6833, 13007, 17218)},
+ { AOM_CDF4(4811, 9712, 13923)}, { AOM_CDF4(3985, 7352, 11128)},
+ { AOM_CDF4(1688, 3458, 5262)}, { AOM_CDF4(12951, 21861, 26510)},
+ { AOM_CDF4(9788, 16044, 20276)}, { AOM_CDF4(6309, 11244, 14870)},
+ { AOM_CDF4(5183, 9349, 12566)}, { AOM_CDF4(4389, 8229, 11492)},
+ { AOM_CDF4(3633, 6945, 10620)}, { AOM_CDF4(3600, 6847, 9907)},
+ { AOM_CDF4(21748, 28137, 30255)}, { AOM_CDF4(19436, 26581, 29560)},
+ { AOM_CDF4(16359, 24201, 27953)}, { AOM_CDF4(13961, 21693, 25871)},
+ { AOM_CDF4(11544, 18686, 23322)}, { AOM_CDF4(9372, 16462, 20952)},
+ { AOM_CDF4(6138, 11210, 15390)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(16138, 22223, 25509)}, { AOM_CDF4(15347, 22430, 26332)},
+ { AOM_CDF4(9614, 16736, 21332)}, { AOM_CDF4(6600, 12275, 16907)},
+ { AOM_CDF4(4811, 9424, 13547)}, { AOM_CDF4(3748, 7809, 11420)},
+ { AOM_CDF4(2254, 4587, 6890)}, { AOM_CDF4(15196, 20284, 23177)},
+ { AOM_CDF4(18317, 25469, 28451)}, { AOM_CDF4(13918, 21651, 25842)},
+ { AOM_CDF4(10052, 17150, 21995)}, { AOM_CDF4(7499, 13630, 18587)},
+ { AOM_CDF4(6158, 11417, 16003)}, { AOM_CDF4(4014, 7785, 11252)},
+ { AOM_CDF4(15048, 21067, 24384)}, { AOM_CDF4(18202, 25346, 28553)},
+ { AOM_CDF4(14302, 22019, 26356)}, { AOM_CDF4(10839, 18139, 23166)},
+ { AOM_CDF4(8715, 15744, 20806)}, { AOM_CDF4(7536, 13576, 18544)},
+ { AOM_CDF4(5413, 10335, 14498)}
+ },
+ {
+ { AOM_CDF4(17394, 24501, 27895)}, { AOM_CDF4(15889, 23420, 27185)},
+ { AOM_CDF4(11561, 19133, 23870)}, { AOM_CDF4(8285, 14812, 19844)},
+ { AOM_CDF4(6496, 12043, 16550)}, { AOM_CDF4(4771, 9574, 13677)},
+ { AOM_CDF4(3603, 6830, 10144)}, { AOM_CDF4(21656, 27704, 30200)},
+ { AOM_CDF4(21324, 27915, 30511)}, { AOM_CDF4(17327, 25336, 28997)},
+ { AOM_CDF4(13417, 21381, 26033)}, { AOM_CDF4(10132, 17425, 22338)},
+ { AOM_CDF4(8580, 15016, 19633)}, { AOM_CDF4(5694, 11477, 16411)},
+ { AOM_CDF4(24116, 29780, 31450)}, { AOM_CDF4(23853, 29695, 31591)},
+ { AOM_CDF4(20085, 27614, 30428)}, { AOM_CDF4(15326, 24335, 28575)},
+ { AOM_CDF4(11814, 19472, 24810)}, { AOM_CDF4(10221, 18611, 24767)},
+ { AOM_CDF4(7689, 14558, 20321)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(16214, 22380, 25770)}, { AOM_CDF4(14213, 21304, 25295)},
+ { AOM_CDF4(9213, 15823, 20455)}, { AOM_CDF4(6395, 11758, 16139)},
+ { AOM_CDF4(4779, 9187, 13066)}, { AOM_CDF4(3821, 7501, 10953)},
+ { AOM_CDF4(2293, 4567, 6795)}, { AOM_CDF4(15859, 21283, 23820)},
+ { AOM_CDF4(18404, 25602, 28726)}, { AOM_CDF4(14325, 21980, 26206)},
+ { AOM_CDF4(10669, 17937, 22720)}, { AOM_CDF4(8297, 14642, 19447)},
+ { AOM_CDF4(6746, 12389, 16893)}, { AOM_CDF4(4324, 8251, 11770)},
+ { AOM_CDF4(16532, 21631, 24475)}, { AOM_CDF4(20667, 27150, 29668)},
+ { AOM_CDF4(16728, 24510, 28175)}, { AOM_CDF4(12861, 20645, 25332)},
+ { AOM_CDF4(10076, 17361, 22417)}, { AOM_CDF4(8395, 14940, 19963)},
+ { AOM_CDF4(5731, 10683, 14912)}
+ },
+ {
+ { AOM_CDF4(14433, 21155, 24938)}, { AOM_CDF4(14658, 21716, 25545)},
+ { AOM_CDF4(9923, 16824, 21557)}, { AOM_CDF4(6982, 13052, 17721)},
+ { AOM_CDF4(5419, 10503, 15050)}, { AOM_CDF4(4852, 9162, 13014)},
+ { AOM_CDF4(3271, 6395, 9630)}, { AOM_CDF4(22210, 27833, 30109)},
+ { AOM_CDF4(20750, 27368, 29821)}, { AOM_CDF4(16894, 24828, 28573)},
+ { AOM_CDF4(13247, 21276, 25757)}, { AOM_CDF4(10038, 17265, 22563)},
+ { AOM_CDF4(8587, 14947, 20327)}, { AOM_CDF4(5645, 11371, 15252)},
+ { AOM_CDF4(22027, 27526, 29714)}, { AOM_CDF4(23098, 29146, 31221)},
+ { AOM_CDF4(19886, 27341, 30272)}, { AOM_CDF4(15609, 23747, 28046)},
+ { AOM_CDF4(11993, 20065, 24939)}, { AOM_CDF4(9637, 18267, 23671)},
+ { AOM_CDF4(7625, 13801, 19144)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(14438, 20798, 24089)}, { AOM_CDF4(12621, 19203, 23097)},
+ { AOM_CDF4(8177, 14125, 18402)}, { AOM_CDF4(5674, 10501, 14456)},
+ { AOM_CDF4(4236, 8239, 11733)}, { AOM_CDF4(3447, 6750, 9806)},
+ { AOM_CDF4(1986, 3950, 5864)}, { AOM_CDF4(16208, 22099, 24930)},
+ { AOM_CDF4(16537, 24025, 27585)}, { AOM_CDF4(12780, 20381, 24867)},
+ { AOM_CDF4(9767, 16612, 21416)}, { AOM_CDF4(7686, 13738, 18398)},
+ { AOM_CDF4(6333, 11614, 15964)}, { AOM_CDF4(3941, 7571, 10836)},
+ { AOM_CDF4(22819, 27422, 29202)}, { AOM_CDF4(22224, 28514, 30721)},
+ { AOM_CDF4(17660, 25433, 28913)}, { AOM_CDF4(13574, 21482, 26002)},
+ { AOM_CDF4(10629, 17977, 22938)}, { AOM_CDF4(8612, 15298, 20265)},
+ { AOM_CDF4(5607, 10491, 14596)}
+ },
+ {
+ { AOM_CDF4(13569, 19800, 23206)}, { AOM_CDF4(13128, 19924, 23869)},
+ { AOM_CDF4(8329, 14841, 19403)}, { AOM_CDF4(6130, 10976, 15057)},
+ { AOM_CDF4(4682, 8839, 12518)}, { AOM_CDF4(3656, 7409, 10588)},
+ { AOM_CDF4(2577, 5099, 7412)}, { AOM_CDF4(22427, 28684, 30585)},
+ { AOM_CDF4(20913, 27750, 30139)}, { AOM_CDF4(15840, 24109, 27834)},
+ { AOM_CDF4(12308, 20029, 24569)}, { AOM_CDF4(10216, 16785, 21458)},
+ { AOM_CDF4(8309, 14203, 19113)}, { AOM_CDF4(6043, 11168, 15307)},
+ { AOM_CDF4(23166, 28901, 30998)}, { AOM_CDF4(21899, 28405, 30751)},
+ { AOM_CDF4(18413, 26091, 29443)}, { AOM_CDF4(15233, 23114, 27352)},
+ { AOM_CDF4(12683, 20472, 25288)}, { AOM_CDF4(10702, 18259, 23409)},
+ { AOM_CDF4(8125, 14464, 19226)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9040, 14786, 18360)}, { AOM_CDF4(9979, 15718, 19415)},
+ { AOM_CDF4(7913, 13918, 18311)}, { AOM_CDF4(5859, 10889, 15184)},
+ { AOM_CDF4(4593, 8677, 12510)}, { AOM_CDF4(3820, 7396, 10791)},
+ { AOM_CDF4(1730, 3471, 5192)}, { AOM_CDF4(11803, 18365, 22709)},
+ { AOM_CDF4(11419, 18058, 22225)}, { AOM_CDF4(9418, 15774, 20243)},
+ { AOM_CDF4(7539, 13325, 17657)}, { AOM_CDF4(6233, 11317, 15384)},
+ { AOM_CDF4(5137, 9656, 13545)}, { AOM_CDF4(2977, 5774, 8349)},
+ { AOM_CDF4(21207, 27246, 29640)}, { AOM_CDF4(19547, 26578, 29497)},
+ { AOM_CDF4(16169, 23871, 27690)}, { AOM_CDF4(12820, 20458, 25018)},
+ { AOM_CDF4(10224, 17332, 22214)}, { AOM_CDF4(8526, 15048, 19884)},
+ { AOM_CDF4(5037, 9410, 13118)}
+ },
+ {
+ { AOM_CDF4(12339, 17329, 20140)}, { AOM_CDF4(13505, 19895, 23225)},
+ { AOM_CDF4(9847, 16944, 21564)}, { AOM_CDF4(7280, 13256, 18348)},
+ { AOM_CDF4(4712, 10009, 14454)}, { AOM_CDF4(4361, 7914, 12477)},
+ { AOM_CDF4(2870, 5628, 7995)}, { AOM_CDF4(20061, 25504, 28526)},
+ { AOM_CDF4(15235, 22878, 26145)}, { AOM_CDF4(12985, 19958, 24155)},
+ { AOM_CDF4(9782, 16641, 21403)}, { AOM_CDF4(9456, 16360, 20760)},
+ { AOM_CDF4(6855, 12940, 18557)}, { AOM_CDF4(5661, 10564, 15002)},
+ { AOM_CDF4(25656, 30602, 31894)}, { AOM_CDF4(22570, 29107, 31092)},
+ { AOM_CDF4(18917, 26423, 29541)}, { AOM_CDF4(15940, 23649, 27754)},
+ { AOM_CDF4(12803, 20581, 25219)}, { AOM_CDF4(11082, 18695, 23376)},
+ { AOM_CDF4(7939, 14373, 19005)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(18315, 24289, 27551)}, { AOM_CDF4(16854, 24068, 27835)},
+ { AOM_CDF4(10140, 17927, 23173)}, { AOM_CDF4(6722, 12982, 18267)},
+ { AOM_CDF4(4661, 9826, 14706)}, { AOM_CDF4(3832, 8165, 12294)},
+ { AOM_CDF4(2795, 6098, 9245)}, { AOM_CDF4(17145, 23326, 26672)},
+ { AOM_CDF4(20733, 27680, 30308)}, { AOM_CDF4(16032, 24461, 28546)},
+ { AOM_CDF4(11653, 20093, 25081)}, { AOM_CDF4(9290, 16429, 22086)},
+ { AOM_CDF4(7796, 14598, 19982)}, { AOM_CDF4(6502, 12378, 17441)},
+ { AOM_CDF4(21681, 27732, 30320)}, { AOM_CDF4(22389, 29044, 31261)},
+ { AOM_CDF4(19027, 26731, 30087)}, { AOM_CDF4(14739, 23755, 28624)},
+ { AOM_CDF4(11358, 20778, 25511)}, { AOM_CDF4(10995, 18073, 24190)},
+ { AOM_CDF4(9162, 14990, 20617)}
+ },
+ {
+ { AOM_CDF4(21425, 27952, 30388)}, { AOM_CDF4(18062, 25838, 29034)},
+ { AOM_CDF4(11956, 19881, 24808)}, { AOM_CDF4(7718, 15000, 20980)},
+ { AOM_CDF4(5702, 11254, 16143)}, { AOM_CDF4(4898, 9088, 16864)},
+ { AOM_CDF4(3679, 6776, 11907)}, { AOM_CDF4(23294, 30160, 31663)},
+ { AOM_CDF4(24397, 29896, 31836)}, { AOM_CDF4(19245, 27128, 30593)},
+ { AOM_CDF4(13202, 19825, 26404)}, { AOM_CDF4(11578, 19297, 23957)},
+ { AOM_CDF4(8073, 13297, 21370)}, { AOM_CDF4(5461, 10923, 19745)},
+ { AOM_CDF4(27367, 30521, 31934)}, { AOM_CDF4(24904, 30671, 31940)},
+ { AOM_CDF4(23075, 28460, 31299)}, { AOM_CDF4(14400, 23658, 30417)},
+ { AOM_CDF4(13885, 23882, 28325)}, { AOM_CDF4(14746, 22938, 27853)},
+ { AOM_CDF4(5461, 16384, 27307)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(18274, 24813, 27890)}, { AOM_CDF4(15537, 23149, 27003)},
+ { AOM_CDF4(9449, 16740, 21827)}, { AOM_CDF4(6700, 12498, 17261)},
+ { AOM_CDF4(4988, 9866, 14198)}, { AOM_CDF4(4236, 8147, 11902)},
+ { AOM_CDF4(2867, 5860, 8654)}, { AOM_CDF4(17124, 23171, 26101)},
+ { AOM_CDF4(20396, 27477, 30148)}, { AOM_CDF4(16573, 24629, 28492)},
+ { AOM_CDF4(12749, 20846, 25674)}, { AOM_CDF4(10233, 17878, 22818)},
+ { AOM_CDF4(8525, 15332, 20363)}, { AOM_CDF4(6283, 11632, 16255)},
+ { AOM_CDF4(20466, 26511, 29286)}, { AOM_CDF4(23059, 29174, 31191)},
+ { AOM_CDF4(19481, 27263, 30241)}, { AOM_CDF4(15458, 23631, 28137)},
+ { AOM_CDF4(12416, 20608, 25693)}, { AOM_CDF4(10261, 18011, 23261)},
+ { AOM_CDF4(8016, 14655, 19666)}
+ },
+ {
+ { AOM_CDF4(17616, 24586, 28112)}, { AOM_CDF4(15809, 23299, 27155)},
+ { AOM_CDF4(10767, 18890, 23793)}, { AOM_CDF4(7727, 14255, 18865)},
+ { AOM_CDF4(6129, 11926, 16882)}, { AOM_CDF4(4482, 9704, 14861)},
+ { AOM_CDF4(3277, 7452, 11522)}, { AOM_CDF4(22956, 28551, 30730)},
+ { AOM_CDF4(22724, 28937, 30961)}, { AOM_CDF4(18467, 26324, 29580)},
+ { AOM_CDF4(13234, 20713, 25649)}, { AOM_CDF4(11181, 17592, 22481)},
+ { AOM_CDF4(8291, 18358, 24576)}, { AOM_CDF4(7568, 11881, 14984)},
+ { AOM_CDF4(24948, 29001, 31147)}, { AOM_CDF4(25674, 30619, 32151)},
+ { AOM_CDF4(20841, 26793, 29603)}, { AOM_CDF4(14669, 24356, 28666)},
+ { AOM_CDF4(11334, 23593, 28219)}, { AOM_CDF4(8922, 14762, 22873)},
+ { AOM_CDF4(8301, 13544, 20535)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(17113, 23733, 27081)}, { AOM_CDF4(14139, 21406, 25452)},
+ { AOM_CDF4(8552, 15002, 19776)}, { AOM_CDF4(5871, 11120, 15378)},
+ { AOM_CDF4(4455, 8616, 12253)}, { AOM_CDF4(3469, 6910, 10386)},
+ { AOM_CDF4(2255, 4553, 6782)}, { AOM_CDF4(18224, 24376, 27053)},
+ { AOM_CDF4(19290, 26710, 29614)}, { AOM_CDF4(14936, 22991, 27184)},
+ { AOM_CDF4(11238, 18951, 23762)}, { AOM_CDF4(8786, 15617, 20588)},
+ { AOM_CDF4(7317, 13228, 18003)}, { AOM_CDF4(5101, 9512, 13493)},
+ { AOM_CDF4(22639, 28222, 30210)}, { AOM_CDF4(23216, 29331, 31307)},
+ { AOM_CDF4(19075, 26762, 29895)}, { AOM_CDF4(15014, 23113, 27457)},
+ { AOM_CDF4(11938, 19857, 24752)}, { AOM_CDF4(9942, 17280, 22282)},
+ { AOM_CDF4(7167, 13144, 17752)}
+ },
+ {
+ { AOM_CDF4(15820, 22738, 26488)}, { AOM_CDF4(13530, 20885, 25216)},
+ { AOM_CDF4(8395, 15530, 20452)}, { AOM_CDF4(6574, 12321, 16380)},
+ { AOM_CDF4(5353, 10419, 14568)}, { AOM_CDF4(4613, 8446, 12381)},
+ { AOM_CDF4(3440, 7158, 9903)}, { AOM_CDF4(24247, 29051, 31224)},
+ { AOM_CDF4(22118, 28058, 30369)}, { AOM_CDF4(16498, 24768, 28389)},
+ { AOM_CDF4(12920, 21175, 26137)}, { AOM_CDF4(10730, 18619, 25352)},
+ { AOM_CDF4(10187, 16279, 22791)}, { AOM_CDF4(9310, 14631, 22127)},
+ { AOM_CDF4(24970, 30558, 32057)}, { AOM_CDF4(24801, 29942, 31698)},
+ { AOM_CDF4(22432, 28453, 30855)}, { AOM_CDF4(19054, 25680, 29580)},
+ { AOM_CDF4(14392, 23036, 28109)}, { AOM_CDF4(12495, 20947, 26650)},
+ { AOM_CDF4(12442, 20326, 26214)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(12162, 18785, 22648)}, { AOM_CDF4(12749, 19697, 23806)},
+ { AOM_CDF4(8580, 15297, 20346)}, { AOM_CDF4(6169, 11749, 16543)},
+ { AOM_CDF4(4836, 9391, 13448)}, { AOM_CDF4(3821, 7711, 11613)},
+ { AOM_CDF4(2228, 4601, 7070)}, { AOM_CDF4(16319, 24725, 28280)},
+ { AOM_CDF4(15698, 23277, 27168)}, { AOM_CDF4(12726, 20368, 25047)},
+ { AOM_CDF4(9912, 17015, 21976)}, { AOM_CDF4(7888, 14220, 19179)},
+ { AOM_CDF4(6777, 12284, 17018)}, { AOM_CDF4(4492, 8590, 12252)},
+ { AOM_CDF4(23249, 28904, 30947)}, { AOM_CDF4(21050, 27908, 30512)},
+ { AOM_CDF4(17440, 25340, 28949)}, { AOM_CDF4(14059, 22018, 26541)},
+ { AOM_CDF4(11288, 18903, 23898)}, { AOM_CDF4(9411, 16342, 21428)},
+ { AOM_CDF4(6278, 11588, 15944)}
+ },
+ {
+ { AOM_CDF4(13981, 20067, 23226)}, { AOM_CDF4(16922, 23580, 26783)},
+ { AOM_CDF4(11005, 19039, 24487)}, { AOM_CDF4(7389, 14218, 19798)},
+ { AOM_CDF4(5598, 11505, 17206)}, { AOM_CDF4(6090, 11213, 15659)},
+ { AOM_CDF4(3820, 7371, 10119)}, { AOM_CDF4(21082, 26925, 29675)},
+ { AOM_CDF4(21262, 28627, 31128)}, { AOM_CDF4(18392, 26454, 30437)},
+ { AOM_CDF4(14870, 22910, 27096)}, { AOM_CDF4(12620, 19484, 24908)},
+ { AOM_CDF4(9290, 16553, 22802)}, { AOM_CDF4(6668, 14288, 20004)},
+ { AOM_CDF4(27704, 31055, 31949)}, { AOM_CDF4(24709, 29978, 31788)},
+ { AOM_CDF4(21668, 29264, 31657)}, { AOM_CDF4(18295, 26968, 30074)},
+ { AOM_CDF4(16399, 24422, 29313)}, { AOM_CDF4(14347, 23026, 28104)},
+ { AOM_CDF4(12370, 19806, 24477)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ }
+};
+
+static const u16 av1_default_coeff_base_multi_cdfs
+ [TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
+ [SIG_COEF_CONTEXTS][CDF_SIZE(NUM_BASE_LEVELS + 2) + 1] = {
+ {
+ {
+ {
+ { AOM_CDF4(4034, 8930, 12727)}, { AOM_CDF4(18082, 29741, 31877)},
+ { AOM_CDF4(12596, 26124, 30493)}, { AOM_CDF4(9446, 21118, 27005)},
+ { AOM_CDF4(6308, 15141, 21279)}, { AOM_CDF4(2463, 6357, 9783)},
+ { AOM_CDF4(20667, 30546, 31929)}, { AOM_CDF4(13043, 26123, 30134)},
+ { AOM_CDF4(8151, 18757, 24778)}, { AOM_CDF4(5255, 12839, 18632)},
+ { AOM_CDF4(2820, 7206, 11161)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(15736, 27553, 30604)},
+ { AOM_CDF4(11210, 23794, 28787)}, { AOM_CDF4(5947, 13874, 19701)},
+ { AOM_CDF4(4215, 9323, 13891)}, { AOM_CDF4(2833, 6462, 10059)},
+ { AOM_CDF4(19605, 30393, 31582)}, { AOM_CDF4(13523, 26252, 30248)},
+ { AOM_CDF4(8446, 18622, 24512)}, { AOM_CDF4(3818, 10343, 15974)},
+ { AOM_CDF4(1481, 4117, 6796)}, { AOM_CDF4(22649, 31302, 32190)},
+ { AOM_CDF4(14829, 27127, 30449)}, { AOM_CDF4(8313, 17702, 23304)},
+ { AOM_CDF4(3022, 8301, 12786)}, { AOM_CDF4(1536, 4412, 7184)},
+ { AOM_CDF4(22354, 29774, 31372)}, { AOM_CDF4(14723, 25472, 29214)},
+ { AOM_CDF4(6673, 13745, 18662)}, { AOM_CDF4(2068, 5766, 9322)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6302, 16444, 21761)}, { AOM_CDF4(23040, 31538, 32475)},
+ { AOM_CDF4(15196, 28452, 31496)}, { AOM_CDF4(10020, 22946, 28514)},
+ { AOM_CDF4(6533, 16862, 23501)}, { AOM_CDF4(3538, 9816, 15076)},
+ { AOM_CDF4(24444, 31875, 32525)}, { AOM_CDF4(15881, 28924, 31635)},
+ { AOM_CDF4(9922, 22873, 28466)}, { AOM_CDF4(6527, 16966, 23691)},
+ { AOM_CDF4(4114, 11303, 17220)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20201, 30770, 32209)},
+ { AOM_CDF4(14754, 28071, 31258)}, { AOM_CDF4(8378, 20186, 26517)},
+ { AOM_CDF4(5916, 15299, 21978)}, { AOM_CDF4(4268, 11583, 17901)},
+ { AOM_CDF4(24361, 32025, 32581)}, { AOM_CDF4(18673, 30105, 31943)},
+ { AOM_CDF4(10196, 22244, 27576)}, { AOM_CDF4(5495, 14349, 20417)},
+ { AOM_CDF4(2676, 7415, 11498)}, { AOM_CDF4(24678, 31958, 32585)},
+ { AOM_CDF4(18629, 29906, 31831)}, { AOM_CDF4(9364, 20724, 26315)},
+ { AOM_CDF4(4641, 12318, 18094)}, { AOM_CDF4(2758, 7387, 11579)},
+ { AOM_CDF4(25433, 31842, 32469)}, { AOM_CDF4(18795, 29289, 31411)},
+ { AOM_CDF4(7644, 17584, 23592)}, { AOM_CDF4(3408, 9014, 15047)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4536, 10072, 14001)}, { AOM_CDF4(25459, 31416, 32206)},
+ { AOM_CDF4(16605, 28048, 30818)}, { AOM_CDF4(11008, 22857, 27719)},
+ { AOM_CDF4(6915, 16268, 22315)}, { AOM_CDF4(2625, 6812, 10537)},
+ { AOM_CDF4(24257, 31788, 32499)}, { AOM_CDF4(16880, 29454, 31879)},
+ { AOM_CDF4(11958, 25054, 29778)}, { AOM_CDF4(7916, 18718, 25084)},
+ { AOM_CDF4(3383, 8777, 13446)}, { AOM_CDF4(22720, 31603, 32393)},
+ { AOM_CDF4(14960, 28125, 31335)}, { AOM_CDF4(9731, 22210, 27928)},
+ { AOM_CDF4(6304, 15832, 22277)}, { AOM_CDF4(2910, 7818, 12166)},
+ { AOM_CDF4(20375, 30627, 32131)}, { AOM_CDF4(13904, 27284, 30887)},
+ { AOM_CDF4(9368, 21558, 27144)}, { AOM_CDF4(5937, 14966, 21119)},
+ { AOM_CDF4(2667, 7225, 11319)}, { AOM_CDF4(23970, 31470, 32378)},
+ { AOM_CDF4(17173, 29734, 32018)}, { AOM_CDF4(12795, 25441, 29965)},
+ { AOM_CDF4(8981, 19680, 25893)}, { AOM_CDF4(4728, 11372, 16902)},
+ { AOM_CDF4(24287, 31797, 32439)}, { AOM_CDF4(16703, 29145, 31696)},
+ { AOM_CDF4(10833, 23554, 28725)}, { AOM_CDF4(6468, 16566, 23057)},
+ { AOM_CDF4(2415, 6562, 10278)}, { AOM_CDF4(26610, 32395, 32659)},
+ { AOM_CDF4(18590, 30498, 32117)}, { AOM_CDF4(12420, 25756, 29950)},
+ { AOM_CDF4(7639, 18746, 24710)}, { AOM_CDF4(3001, 8086, 12347)},
+ { AOM_CDF4(25076, 32064, 32580)}, { AOM_CDF4(17946, 30128, 32028)},
+ { AOM_CDF4(12024, 24985, 29378)}, { AOM_CDF4(7517, 18390, 24304)},
+ { AOM_CDF4(3243, 8781, 13331)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6037, 16771, 21957)}, { AOM_CDF4(24774, 31704, 32426)},
+ { AOM_CDF4(16830, 28589, 31056)}, { AOM_CDF4(10602, 22828, 27760)},
+ { AOM_CDF4(6733, 16829, 23071)}, { AOM_CDF4(3250, 8914, 13556)},
+ { AOM_CDF4(25582, 32220, 32668)}, { AOM_CDF4(18659, 30342, 32223)},
+ { AOM_CDF4(12546, 26149, 30515)}, { AOM_CDF4(8420, 20451, 26801)},
+ { AOM_CDF4(4636, 12420, 18344)}, { AOM_CDF4(27581, 32362, 32639)},
+ { AOM_CDF4(18987, 30083, 31978)}, { AOM_CDF4(11327, 24248, 29084)},
+ { AOM_CDF4(7264, 17719, 24120)}, { AOM_CDF4(3995, 10768, 16169)},
+ { AOM_CDF4(25893, 31831, 32487)}, { AOM_CDF4(16577, 28587, 31379)},
+ { AOM_CDF4(10189, 22748, 28182)}, { AOM_CDF4(6832, 17094, 23556)},
+ { AOM_CDF4(3708, 10110, 15334)}, { AOM_CDF4(25904, 32282, 32656)},
+ { AOM_CDF4(19721, 30792, 32276)}, { AOM_CDF4(12819, 26243, 30411)},
+ { AOM_CDF4(8572, 20614, 26891)}, { AOM_CDF4(5364, 14059, 20467)},
+ { AOM_CDF4(26580, 32438, 32677)}, { AOM_CDF4(20852, 31225, 32340)},
+ { AOM_CDF4(12435, 25700, 29967)}, { AOM_CDF4(8691, 20825, 26976)},
+ { AOM_CDF4(4446, 12209, 17269)}, { AOM_CDF4(27350, 32429, 32696)},
+ { AOM_CDF4(21372, 30977, 32272)}, { AOM_CDF4(12673, 25270, 29853)},
+ { AOM_CDF4(9208, 20925, 26640)}, { AOM_CDF4(5018, 13351, 18732)},
+ { AOM_CDF4(27351, 32479, 32713)}, { AOM_CDF4(21398, 31209, 32387)},
+ { AOM_CDF4(12162, 25047, 29842)}, { AOM_CDF4(7896, 18691, 25319)},
+ { AOM_CDF4(4670, 12882, 18881)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5487, 10460, 13708)}, { AOM_CDF4(21597, 28303, 30674)},
+ { AOM_CDF4(11037, 21953, 26476)}, { AOM_CDF4(8147, 17962, 22952)},
+ { AOM_CDF4(5242, 13061, 18532)}, { AOM_CDF4(1889, 5208, 8182)},
+ { AOM_CDF4(26774, 32133, 32590)}, { AOM_CDF4(17844, 29564, 31767)},
+ { AOM_CDF4(11690, 24438, 29171)}, { AOM_CDF4(7542, 18215, 24459)},
+ { AOM_CDF4(2993, 8050, 12319)}, { AOM_CDF4(28023, 32328, 32591)},
+ { AOM_CDF4(18651, 30126, 31954)}, { AOM_CDF4(12164, 25146, 29589)},
+ { AOM_CDF4(7762, 18530, 24771)}, { AOM_CDF4(3492, 9183, 13920)},
+ { AOM_CDF4(27591, 32008, 32491)}, { AOM_CDF4(17149, 28853, 31510)},
+ { AOM_CDF4(11485, 24003, 28860)}, { AOM_CDF4(7697, 18086, 24210)},
+ { AOM_CDF4(3075, 7999, 12218)}, { AOM_CDF4(28268, 32482, 32654)},
+ { AOM_CDF4(19631, 31051, 32404)}, { AOM_CDF4(13860, 27260, 31020)},
+ { AOM_CDF4(9605, 21613, 27594)}, { AOM_CDF4(4876, 12162, 17908)},
+ { AOM_CDF4(27248, 32316, 32576)}, { AOM_CDF4(18955, 30457, 32075)},
+ { AOM_CDF4(11824, 23997, 28795)}, { AOM_CDF4(7346, 18196, 24647)},
+ { AOM_CDF4(3403, 9247, 14111)}, { AOM_CDF4(29711, 32655, 32735)},
+ { AOM_CDF4(21169, 31394, 32417)}, { AOM_CDF4(13487, 27198, 30957)},
+ { AOM_CDF4(8828, 21683, 27614)}, { AOM_CDF4(4270, 11451, 17038)},
+ { AOM_CDF4(28708, 32578, 32731)}, { AOM_CDF4(20120, 31241, 32482)},
+ { AOM_CDF4(13692, 27550, 31321)}, { AOM_CDF4(9418, 22514, 28439)},
+ { AOM_CDF4(4999, 13283, 19462)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(5673, 14302, 19711)}, { AOM_CDF4(26251, 30701, 31834)},
+ { AOM_CDF4(12782, 23783, 27803)}, { AOM_CDF4(9127, 20657, 25808)},
+ { AOM_CDF4(6368, 16208, 21462)}, { AOM_CDF4(2465, 7177, 10822)},
+ { AOM_CDF4(29961, 32563, 32719)}, { AOM_CDF4(18318, 29891, 31949)},
+ { AOM_CDF4(11361, 24514, 29357)}, { AOM_CDF4(7900, 19603, 25607)},
+ { AOM_CDF4(4002, 10590, 15546)}, { AOM_CDF4(29637, 32310, 32595)},
+ { AOM_CDF4(18296, 29913, 31809)}, { AOM_CDF4(10144, 21515, 26871)},
+ { AOM_CDF4(5358, 14322, 20394)}, { AOM_CDF4(3067, 8362, 13346)},
+ { AOM_CDF4(28652, 32470, 32676)}, { AOM_CDF4(17538, 30771, 32209)},
+ { AOM_CDF4(13924, 26882, 30494)}, { AOM_CDF4(10496, 22837, 27869)},
+ { AOM_CDF4(7236, 16396, 21621)}, { AOM_CDF4(30743, 32687, 32746)},
+ { AOM_CDF4(23006, 31676, 32489)}, { AOM_CDF4(14494, 27828, 31120)},
+ { AOM_CDF4(10174, 22801, 28352)}, { AOM_CDF4(6242, 15281, 21043)},
+ { AOM_CDF4(25817, 32243, 32720)}, { AOM_CDF4(18618, 31367, 32325)},
+ { AOM_CDF4(13997, 28318, 31878)}, { AOM_CDF4(12255, 26534, 31383)},
+ { AOM_CDF4(9561, 21588, 28450)}, { AOM_CDF4(28188, 32635, 32724)},
+ { AOM_CDF4(22060, 32365, 32728)}, { AOM_CDF4(18102, 30690, 32528)},
+ { AOM_CDF4(14196, 28864, 31999)}, { AOM_CDF4(12262, 25792, 30865)},
+ { AOM_CDF4(24176, 32109, 32628)}, { AOM_CDF4(18280, 29681, 31963)},
+ { AOM_CDF4(10205, 23703, 29664)}, { AOM_CDF4(7889, 20025, 27676)},
+ { AOM_CDF4(6060, 16743, 23970)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5141, 7096, 8260)}, { AOM_CDF4(27186, 29022, 29789)},
+ { AOM_CDF4(6668, 12568, 15682)}, { AOM_CDF4(2172, 6181, 8638)},
+ { AOM_CDF4(1126, 3379, 4531)}, { AOM_CDF4(443, 1361, 2254)},
+ { AOM_CDF4(26083, 31153, 32436)}, { AOM_CDF4(13486, 24603, 28483)},
+ { AOM_CDF4(6508, 14840, 19910)}, { AOM_CDF4(3386, 8800, 13286)},
+ { AOM_CDF4(1530, 4322, 7054)}, { AOM_CDF4(29639, 32080, 32548)},
+ { AOM_CDF4(15897, 27552, 30290)}, { AOM_CDF4(8588, 20047, 25383)},
+ { AOM_CDF4(4889, 13339, 19269)}, { AOM_CDF4(2240, 6871, 10498)},
+ { AOM_CDF4(28165, 32197, 32517)}, { AOM_CDF4(20735, 30427, 31568)},
+ { AOM_CDF4(14325, 24671, 27692)}, { AOM_CDF4(5119, 12554, 17805)},
+ { AOM_CDF4(1810, 5441, 8261)}, { AOM_CDF4(31212, 32724, 32748)},
+ { AOM_CDF4(23352, 31766, 32545)}, { AOM_CDF4(14669, 27570, 31059)},
+ { AOM_CDF4(8492, 20894, 27272)}, { AOM_CDF4(3644, 10194, 15204)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(2461, 7013, 9371)}, { AOM_CDF4(24749, 29600, 30986)},
+ { AOM_CDF4(9466, 19037, 22417)}, { AOM_CDF4(3584, 9280, 14400)},
+ { AOM_CDF4(1505, 3929, 5433)}, { AOM_CDF4(677, 1500, 2736)},
+ { AOM_CDF4(23987, 30702, 32117)}, { AOM_CDF4(13554, 24571, 29263)},
+ { AOM_CDF4(6211, 14556, 21155)}, { AOM_CDF4(3135, 10972, 15625)},
+ { AOM_CDF4(2435, 7127, 11427)}, { AOM_CDF4(31300, 32532, 32550)},
+ { AOM_CDF4(14757, 30365, 31954)}, { AOM_CDF4(4405, 11612, 18553)},
+ { AOM_CDF4(580, 4132, 7322)}, { AOM_CDF4(1695, 10169, 14124)},
+ { AOM_CDF4(30008, 32282, 32591)}, { AOM_CDF4(19244, 30108, 31748)},
+ { AOM_CDF4(11180, 24158, 29555)}, { AOM_CDF4(5650, 14972, 19209)},
+ { AOM_CDF4(2114, 5109, 8456)}, { AOM_CDF4(31856, 32716, 32748)},
+ { AOM_CDF4(23012, 31664, 32572)}, { AOM_CDF4(13694, 26656, 30636)},
+ { AOM_CDF4(8142, 19508, 26093)}, { AOM_CDF4(4253, 10955, 16724)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(601, 983, 1311)}, { AOM_CDF4(18725, 23406, 28087)},
+ { AOM_CDF4(5461, 8192, 10923)}, { AOM_CDF4(3781, 15124, 21425)},
+ { AOM_CDF4(2587, 7761, 12072)}, { AOM_CDF4(106, 458, 810)},
+ { AOM_CDF4(22282, 29710, 31894)}, { AOM_CDF4(8508, 20926, 25984)},
+ { AOM_CDF4(3726, 12713, 18083)}, { AOM_CDF4(1620, 7112, 10893)},
+ { AOM_CDF4(729, 2236, 3495)}, { AOM_CDF4(30163, 32474, 32684)},
+ { AOM_CDF4(18304, 30464, 32000)}, { AOM_CDF4(11443, 26526, 29647)},
+ { AOM_CDF4(6007, 15292, 21299)}, { AOM_CDF4(2234, 6703, 8937)},
+ { AOM_CDF4(30954, 32177, 32571)}, { AOM_CDF4(17363, 29562, 31076)},
+ { AOM_CDF4(9686, 22464, 27410)}, { AOM_CDF4(8192, 16384, 21390)},
+ { AOM_CDF4(1755, 8046, 11264)}, { AOM_CDF4(31168, 32734, 32748)},
+ { AOM_CDF4(22486, 31441, 32471)}, { AOM_CDF4(12833, 25627, 29738)},
+ { AOM_CDF4(6980, 17379, 23122)}, { AOM_CDF4(3111, 8887, 13479)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(6041, 11854, 15927)}, { AOM_CDF4(20326, 30905, 32251)},
+ { AOM_CDF4(14164, 26831, 30725)}, { AOM_CDF4(9760, 20647, 26585)},
+ { AOM_CDF4(6416, 14953, 21219)}, { AOM_CDF4(2966, 7151, 10891)},
+ { AOM_CDF4(23567, 31374, 32254)}, { AOM_CDF4(14978, 27416, 30946)},
+ { AOM_CDF4(9434, 20225, 26254)}, { AOM_CDF4(6658, 14558, 20535)},
+ { AOM_CDF4(3916, 8677, 12989)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(18088, 29545, 31587)},
+ { AOM_CDF4(13062, 25843, 30073)}, { AOM_CDF4(8940, 16827, 22251)},
+ { AOM_CDF4(7654, 13220, 17973)}, { AOM_CDF4(5733, 10316, 14456)},
+ { AOM_CDF4(22879, 31388, 32114)}, { AOM_CDF4(15215, 27993, 30955)},
+ { AOM_CDF4(9397, 19445, 24978)}, { AOM_CDF4(3442, 9813, 15344)},
+ { AOM_CDF4(1368, 3936, 6532)}, { AOM_CDF4(25494, 32033, 32406)},
+ { AOM_CDF4(16772, 27963, 30718)}, { AOM_CDF4(9419, 18165, 23260)},
+ { AOM_CDF4(2677, 7501, 11797)}, { AOM_CDF4(1516, 4344, 7170)},
+ { AOM_CDF4(26556, 31454, 32101)}, { AOM_CDF4(17128, 27035, 30108)},
+ { AOM_CDF4(8324, 15344, 20249)}, { AOM_CDF4(1903, 5696, 9469)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8455, 19003, 24368)}, { AOM_CDF4(23563, 32021, 32604)},
+ { AOM_CDF4(16237, 29446, 31935)}, { AOM_CDF4(10724, 23999, 29358)},
+ { AOM_CDF4(6725, 17528, 24416)}, { AOM_CDF4(3927, 10927, 16825)},
+ { AOM_CDF4(26313, 32288, 32634)}, { AOM_CDF4(17430, 30095, 32095)},
+ { AOM_CDF4(11116, 24606, 29679)}, { AOM_CDF4(7195, 18384, 25269)},
+ { AOM_CDF4(4726, 12852, 19315)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(22822, 31648, 32483)},
+ { AOM_CDF4(16724, 29633, 31929)}, { AOM_CDF4(10261, 23033, 28725)},
+ { AOM_CDF4(7029, 17840, 24528)}, { AOM_CDF4(4867, 13886, 21502)},
+ { AOM_CDF4(25298, 31892, 32491)}, { AOM_CDF4(17809, 29330, 31512)},
+ { AOM_CDF4(9668, 21329, 26579)}, { AOM_CDF4(4774, 12956, 18976)},
+ { AOM_CDF4(2322, 7030, 11540)}, { AOM_CDF4(25472, 31920, 32543)},
+ { AOM_CDF4(17957, 29387, 31632)}, { AOM_CDF4(9196, 20593, 26400)},
+ { AOM_CDF4(4680, 12705, 19202)}, { AOM_CDF4(2917, 8456, 13436)},
+ { AOM_CDF4(26471, 32059, 32574)}, { AOM_CDF4(18458, 29783, 31909)},
+ { AOM_CDF4(8400, 19464, 25956)}, { AOM_CDF4(3812, 10973, 17206)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(6779, 13743, 17678)}, { AOM_CDF4(24806, 31797, 32457)},
+ { AOM_CDF4(17616, 29047, 31372)}, { AOM_CDF4(11063, 23175, 28003)},
+ { AOM_CDF4(6521, 16110, 22324)}, { AOM_CDF4(2764, 7504, 11654)},
+ { AOM_CDF4(25266, 32367, 32637)}, { AOM_CDF4(19054, 30553, 32175)},
+ { AOM_CDF4(12139, 25212, 29807)}, { AOM_CDF4(7311, 18162, 24704)},
+ { AOM_CDF4(3397, 9164, 14074)}, { AOM_CDF4(25988, 32208, 32522)},
+ { AOM_CDF4(16253, 28912, 31526)}, { AOM_CDF4(9151, 21387, 27372)},
+ { AOM_CDF4(5688, 14915, 21496)}, { AOM_CDF4(2717, 7627, 12004)},
+ { AOM_CDF4(23144, 31855, 32443)}, { AOM_CDF4(16070, 28491, 31325)},
+ { AOM_CDF4(8702, 20467, 26517)}, { AOM_CDF4(5243, 13956, 20367)},
+ { AOM_CDF4(2621, 7335, 11567)}, { AOM_CDF4(26636, 32340, 32630)},
+ { AOM_CDF4(19990, 31050, 32341)}, { AOM_CDF4(13243, 26105, 30315)},
+ { AOM_CDF4(8588, 19521, 25918)}, { AOM_CDF4(4717, 11585, 17304)},
+ { AOM_CDF4(25844, 32292, 32582)}, { AOM_CDF4(19090, 30635, 32097)},
+ { AOM_CDF4(11963, 24546, 28939)}, { AOM_CDF4(6218, 16087, 22354)},
+ { AOM_CDF4(2340, 6608, 10426)}, { AOM_CDF4(28046, 32576, 32694)},
+ { AOM_CDF4(21178, 31313, 32296)}, { AOM_CDF4(13486, 26184, 29870)},
+ { AOM_CDF4(7149, 17871, 23723)}, { AOM_CDF4(2833, 7958, 12259)},
+ { AOM_CDF4(27710, 32528, 32686)}, { AOM_CDF4(20674, 31076, 32268)},
+ { AOM_CDF4(12413, 24955, 29243)}, { AOM_CDF4(6676, 16927, 23097)},
+ { AOM_CDF4(2966, 8333, 12919)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8639, 19339, 24429)}, { AOM_CDF4(24404, 31837, 32525)},
+ { AOM_CDF4(16997, 29425, 31784)}, { AOM_CDF4(11253, 24234, 29149)},
+ { AOM_CDF4(6751, 17394, 24028)}, { AOM_CDF4(3490, 9830, 15191)},
+ { AOM_CDF4(26283, 32471, 32714)}, { AOM_CDF4(19599, 31168, 32442)},
+ { AOM_CDF4(13146, 26954, 30893)}, { AOM_CDF4(8214, 20588, 26890)},
+ { AOM_CDF4(4699, 13081, 19300)}, { AOM_CDF4(28212, 32458, 32669)},
+ { AOM_CDF4(18594, 30316, 32100)}, { AOM_CDF4(11219, 24408, 29234)},
+ { AOM_CDF4(6865, 17656, 24149)}, { AOM_CDF4(3678, 10362, 16006)},
+ { AOM_CDF4(25825, 32136, 32616)}, { AOM_CDF4(17313, 29853, 32021)},
+ { AOM_CDF4(11197, 24471, 29472)}, { AOM_CDF4(6947, 17781, 24405)},
+ { AOM_CDF4(3768, 10660, 16261)}, { AOM_CDF4(27352, 32500, 32706)},
+ { AOM_CDF4(20850, 31468, 32469)}, { AOM_CDF4(14021, 27707, 31133)},
+ { AOM_CDF4(8964, 21748, 27838)}, { AOM_CDF4(5437, 14665, 21187)},
+ { AOM_CDF4(26304, 32492, 32698)}, { AOM_CDF4(20409, 31380, 32385)},
+ { AOM_CDF4(13682, 27222, 30632)}, { AOM_CDF4(8974, 21236, 26685)},
+ { AOM_CDF4(4234, 11665, 16934)}, { AOM_CDF4(26273, 32357, 32711)},
+ { AOM_CDF4(20672, 31242, 32441)}, { AOM_CDF4(14172, 27254, 30902)},
+ { AOM_CDF4(9870, 21898, 27275)}, { AOM_CDF4(5164, 13506, 19270)},
+ { AOM_CDF4(26725, 32459, 32728)}, { AOM_CDF4(20991, 31442, 32527)},
+ { AOM_CDF4(13071, 26434, 30811)}, { AOM_CDF4(8184, 20090, 26742)},
+ { AOM_CDF4(4803, 13255, 19895)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7555, 14942, 18501)}, { AOM_CDF4(24410, 31178, 32287)},
+ { AOM_CDF4(14394, 26738, 30253)}, { AOM_CDF4(8413, 19554, 25195)},
+ { AOM_CDF4(4766, 12924, 18785)}, { AOM_CDF4(2029, 5806, 9207)},
+ { AOM_CDF4(26776, 32364, 32663)}, { AOM_CDF4(18732, 29967, 31931)},
+ { AOM_CDF4(11005, 23786, 28852)}, { AOM_CDF4(6466, 16909, 23510)},
+ { AOM_CDF4(3044, 8638, 13419)}, { AOM_CDF4(29208, 32582, 32704)},
+ { AOM_CDF4(20068, 30857, 32208)}, { AOM_CDF4(12003, 25085, 29595)},
+ { AOM_CDF4(6947, 17750, 24189)}, { AOM_CDF4(3245, 9103, 14007)},
+ { AOM_CDF4(27359, 32465, 32669)}, { AOM_CDF4(19421, 30614, 32174)},
+ { AOM_CDF4(11915, 25010, 29579)}, { AOM_CDF4(6950, 17676, 24074)},
+ { AOM_CDF4(3007, 8473, 13096)}, { AOM_CDF4(29002, 32676, 32735)},
+ { AOM_CDF4(22102, 31849, 32576)}, { AOM_CDF4(14408, 28009, 31405)},
+ { AOM_CDF4(9027, 21679, 27931)}, { AOM_CDF4(4694, 12678, 18748)},
+ { AOM_CDF4(28216, 32528, 32682)}, { AOM_CDF4(20849, 31264, 32318)},
+ { AOM_CDF4(12756, 25815, 29751)}, { AOM_CDF4(7565, 18801, 24923)},
+ { AOM_CDF4(3509, 9533, 14477)}, { AOM_CDF4(30133, 32687, 32739)},
+ { AOM_CDF4(23063, 31910, 32515)}, { AOM_CDF4(14588, 28051, 31132)},
+ { AOM_CDF4(9085, 21649, 27457)}, { AOM_CDF4(4261, 11654, 17264)},
+ { AOM_CDF4(29518, 32691, 32748)}, { AOM_CDF4(22451, 31959, 32613)},
+ { AOM_CDF4(14864, 28722, 31700)}, { AOM_CDF4(9695, 22964, 28716)},
+ { AOM_CDF4(4932, 13358, 19502)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6465, 16958, 21688)}, { AOM_CDF4(25199, 31514, 32360)},
+ { AOM_CDF4(14774, 27149, 30607)}, { AOM_CDF4(9257, 21438, 26972)},
+ { AOM_CDF4(5723, 15183, 21882)}, { AOM_CDF4(3150, 8879, 13731)},
+ { AOM_CDF4(26989, 32262, 32682)}, { AOM_CDF4(17396, 29937, 32085)},
+ { AOM_CDF4(11387, 24901, 29784)}, { AOM_CDF4(7289, 18821, 25548)},
+ { AOM_CDF4(3734, 10577, 16086)}, { AOM_CDF4(29728, 32501, 32695)},
+ { AOM_CDF4(17431, 29701, 31903)}, { AOM_CDF4(9921, 22826, 28300)},
+ { AOM_CDF4(5896, 15434, 22068)}, { AOM_CDF4(3430, 9646, 14757)},
+ { AOM_CDF4(28614, 32511, 32705)}, { AOM_CDF4(19364, 30638, 32263)},
+ { AOM_CDF4(13129, 26254, 30402)}, { AOM_CDF4(8754, 20484, 26440)},
+ { AOM_CDF4(4378, 11607, 17110)}, { AOM_CDF4(30292, 32671, 32744)},
+ { AOM_CDF4(21780, 31603, 32501)}, { AOM_CDF4(14314, 27829, 31291)},
+ { AOM_CDF4(9611, 22327, 28263)}, { AOM_CDF4(4890, 13087, 19065)},
+ { AOM_CDF4(25862, 32567, 32733)}, { AOM_CDF4(20794, 32050, 32567)},
+ { AOM_CDF4(17243, 30625, 32254)}, { AOM_CDF4(13283, 27628, 31474)},
+ { AOM_CDF4(9669, 22532, 28918)}, { AOM_CDF4(27435, 32697, 32748)},
+ { AOM_CDF4(24922, 32390, 32714)}, { AOM_CDF4(21449, 31504, 32536)},
+ { AOM_CDF4(16392, 29729, 31832)}, { AOM_CDF4(11692, 24884, 29076)},
+ { AOM_CDF4(24193, 32290, 32735)}, { AOM_CDF4(18909, 31104, 32563)},
+ { AOM_CDF4(12236, 26841, 31403)}, { AOM_CDF4(8171, 21840, 29082)},
+ { AOM_CDF4(7224, 17280, 25275)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(3078, 6839, 9890)}, { AOM_CDF4(13837, 20450, 24479)},
+ { AOM_CDF4(5914, 14222, 19328)}, { AOM_CDF4(3866, 10267, 14762)},
+ { AOM_CDF4(2612, 7208, 11042)}, { AOM_CDF4(1067, 2991, 4776)},
+ { AOM_CDF4(25817, 31646, 32529)}, { AOM_CDF4(13708, 26338, 30385)},
+ { AOM_CDF4(7328, 18585, 24870)}, { AOM_CDF4(4691, 13080, 19276)},
+ { AOM_CDF4(1825, 5253, 8352)}, { AOM_CDF4(29386, 32315, 32624)},
+ { AOM_CDF4(17160, 29001, 31360)}, { AOM_CDF4(9602, 21862, 27396)},
+ { AOM_CDF4(5915, 15772, 22148)}, { AOM_CDF4(2786, 7779, 12047)},
+ { AOM_CDF4(29246, 32450, 32663)}, { AOM_CDF4(18696, 29929, 31818)},
+ { AOM_CDF4(10510, 23369, 28560)}, { AOM_CDF4(6229, 16499, 23125)},
+ { AOM_CDF4(2608, 7448, 11705)}, { AOM_CDF4(30753, 32710, 32748)},
+ { AOM_CDF4(21638, 31487, 32503)}, { AOM_CDF4(12937, 26854, 30870)},
+ { AOM_CDF4(8182, 20596, 26970)}, { AOM_CDF4(3637, 10269, 15497)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(5244, 12150, 16906)}, { AOM_CDF4(20486, 26858, 29701)},
+ { AOM_CDF4(7756, 18317, 23735)}, { AOM_CDF4(3452, 9256, 13146)},
+ { AOM_CDF4(2020, 5206, 8229)}, { AOM_CDF4(1801, 4993, 7903)},
+ { AOM_CDF4(27051, 31858, 32531)}, { AOM_CDF4(15988, 27531, 30619)},
+ { AOM_CDF4(9188, 21484, 26719)}, { AOM_CDF4(6273, 17186, 23800)},
+ { AOM_CDF4(3108, 9355, 14764)}, { AOM_CDF4(31076, 32520, 32680)},
+ { AOM_CDF4(18119, 30037, 31850)}, { AOM_CDF4(10244, 22969, 27472)},
+ { AOM_CDF4(4692, 14077, 19273)}, { AOM_CDF4(3694, 11677, 17556)},
+ { AOM_CDF4(30060, 32581, 32720)}, { AOM_CDF4(21011, 30775, 32120)},
+ { AOM_CDF4(11931, 24820, 29289)}, { AOM_CDF4(7119, 17662, 24356)},
+ { AOM_CDF4(3833, 10706, 16304)}, { AOM_CDF4(31954, 32731, 32748)},
+ { AOM_CDF4(23913, 31724, 32489)}, { AOM_CDF4(15520, 28060, 31286)},
+ { AOM_CDF4(11517, 23008, 28571)}, { AOM_CDF4(6193, 14508, 20629)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(1035, 2807, 4156)}, { AOM_CDF4(13162, 18138, 20939)},
+ { AOM_CDF4(2696, 6633, 8755)}, { AOM_CDF4(1373, 4161, 6853)},
+ { AOM_CDF4(1099, 2746, 4716)}, { AOM_CDF4(340, 1021, 1599)},
+ { AOM_CDF4(22826, 30419, 32135)}, { AOM_CDF4(10395, 21762, 26942)},
+ { AOM_CDF4(4726, 12407, 17361)}, { AOM_CDF4(2447, 7080, 10593)},
+ { AOM_CDF4(1227, 3717, 6011)}, { AOM_CDF4(28156, 31424, 31934)},
+ { AOM_CDF4(16915, 27754, 30373)}, { AOM_CDF4(9148, 20990, 26431)},
+ { AOM_CDF4(5950, 15515, 21148)}, { AOM_CDF4(2492, 7327, 11526)},
+ { AOM_CDF4(30602, 32477, 32670)}, { AOM_CDF4(20026, 29955, 31568)},
+ { AOM_CDF4(11220, 23628, 28105)}, { AOM_CDF4(6652, 17019, 22973)},
+ { AOM_CDF4(3064, 8536, 13043)}, { AOM_CDF4(31769, 32724, 32748)},
+ { AOM_CDF4(22230, 30887, 32373)}, { AOM_CDF4(12234, 25079, 29731)},
+ { AOM_CDF4(7326, 18816, 25353)}, { AOM_CDF4(3933, 10907, 16616)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(8896, 16227, 20630)}, { AOM_CDF4(23629, 31782, 32527)},
+ { AOM_CDF4(15173, 27755, 31321)}, { AOM_CDF4(10158, 21233, 27382)},
+ { AOM_CDF4(6420, 14857, 21558)}, { AOM_CDF4(3269, 8155, 12646)},
+ { AOM_CDF4(24835, 32009, 32496)}, { AOM_CDF4(16509, 28421, 31579)},
+ { AOM_CDF4(10957, 21514, 27418)}, { AOM_CDF4(7881, 15930, 22096)},
+ { AOM_CDF4(5388, 10960, 15918)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20745, 30773, 32093)},
+ { AOM_CDF4(15200, 27221, 30861)}, { AOM_CDF4(13032, 20873, 25667)},
+ { AOM_CDF4(12285, 18663, 23494)}, { AOM_CDF4(11563, 17481, 21489)},
+ { AOM_CDF4(26260, 31982, 32320)}, { AOM_CDF4(15397, 28083, 31100)},
+ { AOM_CDF4(9742, 19217, 24824)}, { AOM_CDF4(3261, 9629, 15362)},
+ { AOM_CDF4(1480, 4322, 7499)}, { AOM_CDF4(27599, 32256, 32460)},
+ { AOM_CDF4(16857, 27659, 30774)}, { AOM_CDF4(9551, 18290, 23748)},
+ { AOM_CDF4(3052, 8933, 14103)}, { AOM_CDF4(2021, 5910, 9787)},
+ { AOM_CDF4(29005, 32015, 32392)}, { AOM_CDF4(17677, 27694, 30863)},
+ { AOM_CDF4(9204, 17356, 23219)}, { AOM_CDF4(2403, 7516, 12814)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(10808, 22056, 26896)}, { AOM_CDF4(25739, 32313, 32676)},
+ { AOM_CDF4(17288, 30203, 32221)}, { AOM_CDF4(11359, 24878, 29896)},
+ { AOM_CDF4(6949, 17767, 24893)}, { AOM_CDF4(4287, 11796, 18071)},
+ { AOM_CDF4(27880, 32521, 32705)}, { AOM_CDF4(19038, 31004, 32414)},
+ { AOM_CDF4(12564, 26345, 30768)}, { AOM_CDF4(8269, 19947, 26779)},
+ { AOM_CDF4(5674, 14657, 21674)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(25742, 32319, 32671)},
+ { AOM_CDF4(19557, 31164, 32454)}, { AOM_CDF4(13381, 26381, 30755)},
+ { AOM_CDF4(10101, 21466, 26722)}, { AOM_CDF4(9209, 19650, 26825)},
+ { AOM_CDF4(27107, 31917, 32432)}, { AOM_CDF4(18056, 28893, 31203)},
+ { AOM_CDF4(10200, 21434, 26764)}, { AOM_CDF4(4660, 12913, 19502)},
+ { AOM_CDF4(2368, 6930, 12504)}, { AOM_CDF4(26960, 32158, 32613)},
+ { AOM_CDF4(18628, 30005, 32031)}, { AOM_CDF4(10233, 22442, 28232)},
+ { AOM_CDF4(5471, 14630, 21516)}, { AOM_CDF4(3235, 10767, 17109)},
+ { AOM_CDF4(27696, 32440, 32692)}, { AOM_CDF4(20032, 31167, 32438)},
+ { AOM_CDF4(8700, 21341, 28442)}, { AOM_CDF4(5662, 14831, 21795)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9704, 17294, 21132)}, { AOM_CDF4(26762, 32278, 32633)},
+ { AOM_CDF4(18382, 29620, 31819)}, { AOM_CDF4(10891, 23475, 28723)},
+ { AOM_CDF4(6358, 16583, 23309)}, { AOM_CDF4(3248, 9118, 14141)},
+ { AOM_CDF4(27204, 32573, 32699)}, { AOM_CDF4(19818, 30824, 32329)},
+ { AOM_CDF4(11772, 25120, 30041)}, { AOM_CDF4(6995, 18033, 25039)},
+ { AOM_CDF4(3752, 10442, 16098)}, { AOM_CDF4(27222, 32256, 32559)},
+ { AOM_CDF4(15356, 28399, 31475)}, { AOM_CDF4(8821, 20635, 27057)},
+ { AOM_CDF4(5511, 14404, 21239)}, { AOM_CDF4(2935, 8222, 13051)},
+ { AOM_CDF4(24875, 32120, 32529)}, { AOM_CDF4(15233, 28265, 31445)},
+ { AOM_CDF4(8605, 20570, 26932)}, { AOM_CDF4(5431, 14413, 21196)},
+ { AOM_CDF4(2994, 8341, 13223)}, { AOM_CDF4(28201, 32604, 32700)},
+ { AOM_CDF4(21041, 31446, 32456)}, { AOM_CDF4(13221, 26213, 30475)},
+ { AOM_CDF4(8255, 19385, 26037)}, { AOM_CDF4(4930, 12585, 18830)},
+ { AOM_CDF4(28768, 32448, 32627)}, { AOM_CDF4(19705, 30561, 32021)},
+ { AOM_CDF4(11572, 23589, 28220)}, { AOM_CDF4(5532, 15034, 21446)},
+ { AOM_CDF4(2460, 7150, 11456)}, { AOM_CDF4(29874, 32619, 32699)},
+ { AOM_CDF4(21621, 31071, 32201)}, { AOM_CDF4(12511, 24747, 28992)},
+ { AOM_CDF4(6281, 16395, 22748)}, { AOM_CDF4(3246, 9278, 14497)},
+ { AOM_CDF4(29715, 32625, 32712)}, { AOM_CDF4(20958, 31011, 32283)},
+ { AOM_CDF4(11233, 23671, 28806)}, { AOM_CDF4(6012, 16128, 22868)},
+ { AOM_CDF4(3427, 9851, 15414)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(11016, 22111, 26794)}, { AOM_CDF4(25946, 32357, 32677)},
+ { AOM_CDF4(17890, 30452, 32252)}, { AOM_CDF4(11678, 25142, 29816)},
+ { AOM_CDF4(6720, 17534, 24584)}, { AOM_CDF4(4230, 11665, 17820)},
+ { AOM_CDF4(28400, 32623, 32747)}, { AOM_CDF4(21164, 31668, 32575)},
+ { AOM_CDF4(13572, 27388, 31182)}, { AOM_CDF4(8234, 20750, 27358)},
+ { AOM_CDF4(5065, 14055, 20897)}, { AOM_CDF4(28981, 32547, 32705)},
+ { AOM_CDF4(18681, 30543, 32239)}, { AOM_CDF4(10919, 24075, 29286)},
+ { AOM_CDF4(6431, 17199, 24077)}, { AOM_CDF4(3819, 10464, 16618)},
+ { AOM_CDF4(26870, 32467, 32693)}, { AOM_CDF4(19041, 30831, 32347)},
+ { AOM_CDF4(11794, 25211, 30016)}, { AOM_CDF4(6888, 18019, 24970)},
+ { AOM_CDF4(4370, 12363, 18992)}, { AOM_CDF4(29578, 32670, 32744)},
+ { AOM_CDF4(23159, 32007, 32613)}, { AOM_CDF4(15315, 28669, 31676)},
+ { AOM_CDF4(9298, 22607, 28782)}, { AOM_CDF4(6144, 15913, 22968)},
+ { AOM_CDF4(28110, 32499, 32669)}, { AOM_CDF4(21574, 30937, 32015)},
+ { AOM_CDF4(12759, 24818, 28727)}, { AOM_CDF4(6545, 16761, 23042)},
+ { AOM_CDF4(3649, 10597, 16833)}, { AOM_CDF4(28163, 32552, 32728)},
+ { AOM_CDF4(22101, 31469, 32464)}, { AOM_CDF4(13160, 25472, 30143)},
+ { AOM_CDF4(7303, 18684, 25468)}, { AOM_CDF4(5241, 13975, 20955)},
+ { AOM_CDF4(28400, 32631, 32744)}, { AOM_CDF4(22104, 31793, 32603)},
+ { AOM_CDF4(13557, 26571, 30846)}, { AOM_CDF4(7749, 19861, 26675)},
+ { AOM_CDF4(4873, 14030, 21234)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9800, 17635, 21073)}, { AOM_CDF4(26153, 31885, 32527)},
+ { AOM_CDF4(15038, 27852, 31006)}, { AOM_CDF4(8718, 20564, 26486)},
+ { AOM_CDF4(5128, 14076, 20514)}, { AOM_CDF4(2636, 7566, 11925)},
+ { AOM_CDF4(27551, 32504, 32701)}, { AOM_CDF4(18310, 30054, 32100)},
+ { AOM_CDF4(10211, 23420, 29082)}, { AOM_CDF4(6222, 16876, 23916)},
+ { AOM_CDF4(3462, 9954, 15498)}, { AOM_CDF4(29991, 32633, 32721)},
+ { AOM_CDF4(19883, 30751, 32201)}, { AOM_CDF4(11141, 24184, 29285)},
+ { AOM_CDF4(6420, 16940, 23774)}, { AOM_CDF4(3392, 9753, 15118)},
+ { AOM_CDF4(28465, 32616, 32712)}, { AOM_CDF4(19850, 30702, 32244)},
+ { AOM_CDF4(10983, 24024, 29223)}, { AOM_CDF4(6294, 16770, 23582)},
+ { AOM_CDF4(3244, 9283, 14509)}, { AOM_CDF4(30023, 32717, 32748)},
+ { AOM_CDF4(22940, 32032, 32626)}, { AOM_CDF4(14282, 27928, 31473)},
+ { AOM_CDF4(8562, 21327, 27914)}, { AOM_CDF4(4846, 13393, 19919)},
+ { AOM_CDF4(29981, 32590, 32695)}, { AOM_CDF4(20465, 30963, 32166)},
+ { AOM_CDF4(11479, 23579, 28195)}, { AOM_CDF4(5916, 15648, 22073)},
+ { AOM_CDF4(3031, 8605, 13398)}, { AOM_CDF4(31146, 32691, 32739)},
+ { AOM_CDF4(23106, 31724, 32444)}, { AOM_CDF4(13783, 26738, 30439)},
+ { AOM_CDF4(7852, 19468, 25807)}, { AOM_CDF4(3860, 11124, 16853)},
+ { AOM_CDF4(31014, 32724, 32748)}, { AOM_CDF4(23629, 32109, 32628)},
+ { AOM_CDF4(14747, 28115, 31403)}, { AOM_CDF4(8545, 21242, 27478)},
+ { AOM_CDF4(4574, 12781, 19067)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(9185, 19694, 24688)}, { AOM_CDF4(26081, 31985, 32621)},
+ { AOM_CDF4(16015, 29000, 31787)}, { AOM_CDF4(10542, 23690, 29206)},
+ { AOM_CDF4(6732, 17945, 24677)}, { AOM_CDF4(3916, 11039, 16722)},
+ { AOM_CDF4(28224, 32566, 32744)}, { AOM_CDF4(19100, 31138, 32485)},
+ { AOM_CDF4(12528, 26620, 30879)}, { AOM_CDF4(7741, 20277, 26885)},
+ { AOM_CDF4(4566, 12845, 18990)}, { AOM_CDF4(29933, 32593, 32718)},
+ { AOM_CDF4(17670, 30333, 32155)}, { AOM_CDF4(10385, 23600, 28909)},
+ { AOM_CDF4(6243, 16236, 22407)}, { AOM_CDF4(3976, 10389, 16017)},
+ { AOM_CDF4(28377, 32561, 32738)}, { AOM_CDF4(19366, 31175, 32482)},
+ { AOM_CDF4(13327, 27175, 31094)}, { AOM_CDF4(8258, 20769, 27143)},
+ { AOM_CDF4(4703, 13198, 19527)}, { AOM_CDF4(31086, 32706, 32748)},
+ { AOM_CDF4(22853, 31902, 32583)}, { AOM_CDF4(14759, 28186, 31419)},
+ { AOM_CDF4(9284, 22382, 28348)}, { AOM_CDF4(5585, 15192, 21868)},
+ { AOM_CDF4(28291, 32652, 32746)}, { AOM_CDF4(19849, 32107, 32571)},
+ { AOM_CDF4(14834, 26818, 29214)}, { AOM_CDF4(10306, 22594, 28672)},
+ { AOM_CDF4(6615, 17384, 23384)}, { AOM_CDF4(28947, 32604, 32745)},
+ { AOM_CDF4(25625, 32289, 32646)}, { AOM_CDF4(18758, 28672, 31403)},
+ { AOM_CDF4(10017, 23430, 28523)}, { AOM_CDF4(6862, 15269, 22131)},
+ { AOM_CDF4(23933, 32509, 32739)}, { AOM_CDF4(19927, 31495, 32631)},
+ { AOM_CDF4(11903, 26023, 30621)}, { AOM_CDF4(7026, 20094, 27252)},
+ { AOM_CDF4(5998, 18106, 24437)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4456, 11274, 15533)}, { AOM_CDF4(21219, 29079, 31616)},
+ { AOM_CDF4(11173, 23774, 28567)}, { AOM_CDF4(7282, 18293, 24263)},
+ { AOM_CDF4(4890, 13286, 19115)}, { AOM_CDF4(1890, 5508, 8659)},
+ { AOM_CDF4(26651, 32136, 32647)}, { AOM_CDF4(14630, 28254, 31455)},
+ { AOM_CDF4(8716, 21287, 27395)}, { AOM_CDF4(5615, 15331, 22008)},
+ { AOM_CDF4(2675, 7700, 12150)}, { AOM_CDF4(29954, 32526, 32690)},
+ { AOM_CDF4(16126, 28982, 31633)}, { AOM_CDF4(9030, 21361, 27352)},
+ { AOM_CDF4(5411, 14793, 21271)}, { AOM_CDF4(2943, 8422, 13163)},
+ { AOM_CDF4(29539, 32601, 32730)}, { AOM_CDF4(18125, 30385, 32201)},
+ { AOM_CDF4(10422, 24090, 29468)}, { AOM_CDF4(6468, 17487, 24438)},
+ { AOM_CDF4(2970, 8653, 13531)}, { AOM_CDF4(30912, 32715, 32748)},
+ { AOM_CDF4(20666, 31373, 32497)}, { AOM_CDF4(12509, 26640, 30917)},
+ { AOM_CDF4(8058, 20629, 27290)}, { AOM_CDF4(4231, 12006, 18052)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(10202, 20633, 25484)}, { AOM_CDF4(27336, 31445, 32352)},
+ { AOM_CDF4(12420, 24384, 28552)}, { AOM_CDF4(7648, 18115, 23856)},
+ { AOM_CDF4(5662, 14341, 19902)}, { AOM_CDF4(3611, 10328, 15390)},
+ { AOM_CDF4(30945, 32616, 32736)}, { AOM_CDF4(18682, 30505, 32253)},
+ { AOM_CDF4(11513, 25336, 30203)}, { AOM_CDF4(7449, 19452, 26148)},
+ { AOM_CDF4(4482, 13051, 18886)}, { AOM_CDF4(32022, 32690, 32747)},
+ { AOM_CDF4(18578, 30501, 32146)}, { AOM_CDF4(11249, 23368, 28631)},
+ { AOM_CDF4(5645, 16958, 22158)}, { AOM_CDF4(5009, 11444, 16637)},
+ { AOM_CDF4(31357, 32710, 32748)}, { AOM_CDF4(21552, 31494, 32504)},
+ { AOM_CDF4(13891, 27677, 31340)}, { AOM_CDF4(9051, 22098, 28172)},
+ { AOM_CDF4(5190, 13377, 19486)}, { AOM_CDF4(32364, 32740, 32748)},
+ { AOM_CDF4(24839, 31907, 32551)}, { AOM_CDF4(17160, 28779, 31696)},
+ { AOM_CDF4(12452, 24137, 29602)}, { AOM_CDF4(6165, 15389, 22477)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(2575, 7281, 11077)}, { AOM_CDF4(14002, 20866, 25402)},
+ { AOM_CDF4(6343, 15056, 19658)}, { AOM_CDF4(4474, 11858, 17041)},
+ { AOM_CDF4(2865, 8299, 12534)}, { AOM_CDF4(1344, 3949, 6391)},
+ { AOM_CDF4(24720, 31239, 32459)}, { AOM_CDF4(12585, 25356, 29968)},
+ { AOM_CDF4(7181, 18246, 24444)}, { AOM_CDF4(5025, 13667, 19885)},
+ { AOM_CDF4(2521, 7304, 11605)}, { AOM_CDF4(29908, 32252, 32584)},
+ { AOM_CDF4(17421, 29156, 31575)}, { AOM_CDF4(9889, 22188, 27782)},
+ { AOM_CDF4(5878, 15647, 22123)}, { AOM_CDF4(2814, 8665, 13323)},
+ { AOM_CDF4(30183, 32568, 32713)}, { AOM_CDF4(18528, 30195, 32049)},
+ { AOM_CDF4(10982, 24606, 29657)}, { AOM_CDF4(6957, 18165, 25231)},
+ { AOM_CDF4(3508, 10118, 15468)}, { AOM_CDF4(31761, 32736, 32748)},
+ { AOM_CDF4(21041, 31328, 32546)}, { AOM_CDF4(12568, 26732, 31166)},
+ { AOM_CDF4(8052, 20720, 27733)}, { AOM_CDF4(4336, 12192, 18396)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(7062, 16472, 22319)}, { AOM_CDF4(24538, 32261, 32674)},
+ { AOM_CDF4(13675, 28041, 31779)}, { AOM_CDF4(8590, 20674, 27631)},
+ { AOM_CDF4(5685, 14675, 22013)}, { AOM_CDF4(3655, 9898, 15731)},
+ { AOM_CDF4(26493, 32418, 32658)}, { AOM_CDF4(16376, 29342, 32090)},
+ { AOM_CDF4(10594, 22649, 28970)}, { AOM_CDF4(8176, 17170, 24303)},
+ { AOM_CDF4(5605, 12694, 19139)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(23888, 31902, 32542)},
+ { AOM_CDF4(18612, 29687, 31987)}, { AOM_CDF4(16245, 24852, 29249)},
+ { AOM_CDF4(15765, 22608, 27559)}, { AOM_CDF4(19895, 24699, 27510)},
+ { AOM_CDF4(28401, 32212, 32457)}, { AOM_CDF4(15274, 27825, 30980)},
+ { AOM_CDF4(9364, 18128, 24332)}, { AOM_CDF4(2283, 8193, 15082)},
+ { AOM_CDF4(1228, 3972, 7881)}, { AOM_CDF4(29455, 32469, 32620)},
+ { AOM_CDF4(17981, 28245, 31388)}, { AOM_CDF4(10921, 20098, 26240)},
+ { AOM_CDF4(3743, 11829, 18657)}, { AOM_CDF4(2374, 9593, 15715)},
+ { AOM_CDF4(31068, 32466, 32635)}, { AOM_CDF4(20321, 29572, 31971)},
+ { AOM_CDF4(10771, 20255, 27119)}, { AOM_CDF4(2795, 10410, 17361)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(9320, 22102, 27840)}, { AOM_CDF4(27057, 32464, 32724)},
+ { AOM_CDF4(16331, 30268, 32309)}, { AOM_CDF4(10319, 23935, 29720)},
+ { AOM_CDF4(6189, 16448, 24106)}, { AOM_CDF4(3589, 10884, 18808)},
+ { AOM_CDF4(29026, 32624, 32748)}, { AOM_CDF4(19226, 31507, 32587)},
+ { AOM_CDF4(12692, 26921, 31203)}, { AOM_CDF4(7049, 19532, 27635)},
+ { AOM_CDF4(7727, 15669, 23252)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(28056, 32625, 32748)},
+ { AOM_CDF4(22383, 32075, 32669)}, { AOM_CDF4(15417, 27098, 31749)},
+ { AOM_CDF4(18127, 26493, 27190)}, { AOM_CDF4(5461, 16384, 21845)},
+ { AOM_CDF4(27982, 32091, 32584)}, { AOM_CDF4(19045, 29868, 31972)},
+ { AOM_CDF4(10397, 22266, 27932)}, { AOM_CDF4(5990, 13697, 21500)},
+ { AOM_CDF4(1792, 6912, 15104)}, { AOM_CDF4(28198, 32501, 32718)},
+ { AOM_CDF4(21534, 31521, 32569)}, { AOM_CDF4(11109, 25217, 30017)},
+ { AOM_CDF4(5671, 15124, 26151)}, { AOM_CDF4(4681, 14043, 18725)},
+ { AOM_CDF4(28688, 32580, 32741)}, { AOM_CDF4(22576, 32079, 32661)},
+ { AOM_CDF4(10627, 22141, 28340)}, { AOM_CDF4(9362, 14043, 28087)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7754, 16948, 22142)}, { AOM_CDF4(25670, 32330, 32691)},
+ { AOM_CDF4(15663, 29225, 31994)}, { AOM_CDF4(9878, 23288, 29158)},
+ { AOM_CDF4(6419, 17088, 24336)}, { AOM_CDF4(3859, 11003, 17039)},
+ { AOM_CDF4(27562, 32595, 32725)}, { AOM_CDF4(17575, 30588, 32399)},
+ { AOM_CDF4(10819, 24838, 30309)}, { AOM_CDF4(7124, 18686, 25916)},
+ { AOM_CDF4(4479, 12688, 19340)}, { AOM_CDF4(28385, 32476, 32673)},
+ { AOM_CDF4(15306, 29005, 31938)}, { AOM_CDF4(8937, 21615, 28322)},
+ { AOM_CDF4(5982, 15603, 22786)}, { AOM_CDF4(3620, 10267, 16136)},
+ { AOM_CDF4(27280, 32464, 32667)}, { AOM_CDF4(15607, 29160, 32004)},
+ { AOM_CDF4(9091, 22135, 28740)}, { AOM_CDF4(6232, 16632, 24020)},
+ { AOM_CDF4(4047, 11377, 17672)}, { AOM_CDF4(29220, 32630, 32718)},
+ { AOM_CDF4(19650, 31220, 32462)}, { AOM_CDF4(13050, 26312, 30827)},
+ { AOM_CDF4(9228, 20870, 27468)}, { AOM_CDF4(6146, 15149, 21971)},
+ { AOM_CDF4(30169, 32481, 32623)}, { AOM_CDF4(17212, 29311, 31554)},
+ { AOM_CDF4(9911, 21311, 26882)}, { AOM_CDF4(4487, 13314, 20372)},
+ { AOM_CDF4(2570, 7772, 12889)}, { AOM_CDF4(30924, 32613, 32708)},
+ { AOM_CDF4(19490, 30206, 32107)}, { AOM_CDF4(11232, 23998, 29276)},
+ { AOM_CDF4(6769, 17955, 25035)}, { AOM_CDF4(4398, 12623, 19214)},
+ { AOM_CDF4(30609, 32627, 32722)}, { AOM_CDF4(19370, 30582, 32287)},
+ { AOM_CDF4(10457, 23619, 29409)}, { AOM_CDF4(6443, 17637, 24834)},
+ { AOM_CDF4(4645, 13236, 20106)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8626, 20271, 26216)}, { AOM_CDF4(26707, 32406, 32711)},
+ { AOM_CDF4(16999, 30329, 32286)}, { AOM_CDF4(11445, 25123, 30286)},
+ { AOM_CDF4(6411, 18828, 25601)}, { AOM_CDF4(6801, 12458, 20248)},
+ { AOM_CDF4(29918, 32682, 32748)}, { AOM_CDF4(20649, 31739, 32618)},
+ { AOM_CDF4(12879, 27773, 31581)}, { AOM_CDF4(7896, 21751, 28244)},
+ { AOM_CDF4(5260, 14870, 23698)}, { AOM_CDF4(29252, 32593, 32731)},
+ { AOM_CDF4(17072, 30460, 32294)}, { AOM_CDF4(10653, 24143, 29365)},
+ { AOM_CDF4(6536, 17490, 23983)}, { AOM_CDF4(4929, 13170, 20085)},
+ { AOM_CDF4(28137, 32518, 32715)}, { AOM_CDF4(18171, 30784, 32407)},
+ { AOM_CDF4(11437, 25436, 30459)}, { AOM_CDF4(7252, 18534, 26176)},
+ { AOM_CDF4(4126, 13353, 20978)}, { AOM_CDF4(31162, 32726, 32748)},
+ { AOM_CDF4(23017, 32222, 32701)}, { AOM_CDF4(15629, 29233, 32046)},
+ { AOM_CDF4(9387, 22621, 29480)}, { AOM_CDF4(6922, 17616, 25010)},
+ { AOM_CDF4(28838, 32265, 32614)}, { AOM_CDF4(19701, 30206, 31920)},
+ { AOM_CDF4(11214, 22410, 27933)}, { AOM_CDF4(5320, 14177, 23034)},
+ { AOM_CDF4(5049, 12881, 17827)}, { AOM_CDF4(27484, 32471, 32734)},
+ { AOM_CDF4(21076, 31526, 32561)}, { AOM_CDF4(12707, 26303, 31211)},
+ { AOM_CDF4(8169, 21722, 28219)}, { AOM_CDF4(6045, 19406, 27042)},
+ { AOM_CDF4(27753, 32572, 32745)}, { AOM_CDF4(20832, 31878, 32653)},
+ { AOM_CDF4(13250, 27356, 31674)}, { AOM_CDF4(7718, 21508, 29858)},
+ { AOM_CDF4(7209, 18350, 25559)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7876, 16901, 21741)}, { AOM_CDF4(24001, 31898, 32625)},
+ { AOM_CDF4(14529, 27959, 31451)}, { AOM_CDF4(8273, 20818, 27258)},
+ { AOM_CDF4(5278, 14673, 21510)}, { AOM_CDF4(2983, 8843, 14039)},
+ { AOM_CDF4(28016, 32574, 32732)}, { AOM_CDF4(17471, 30306, 32301)},
+ { AOM_CDF4(10224, 24063, 29728)}, { AOM_CDF4(6602, 17954, 25052)},
+ { AOM_CDF4(4002, 11585, 17759)}, { AOM_CDF4(30190, 32634, 32739)},
+ { AOM_CDF4(17497, 30282, 32270)}, { AOM_CDF4(10229, 23729, 29538)},
+ { AOM_CDF4(6344, 17211, 24440)}, { AOM_CDF4(3849, 11189, 17108)},
+ { AOM_CDF4(28570, 32583, 32726)}, { AOM_CDF4(17521, 30161, 32238)},
+ { AOM_CDF4(10153, 23565, 29378)}, { AOM_CDF4(6455, 17341, 24443)},
+ { AOM_CDF4(3907, 11042, 17024)}, { AOM_CDF4(30689, 32715, 32748)},
+ { AOM_CDF4(21546, 31840, 32610)}, { AOM_CDF4(13547, 27581, 31459)},
+ { AOM_CDF4(8912, 21757, 28309)}, { AOM_CDF4(5548, 15080, 22046)},
+ { AOM_CDF4(30783, 32540, 32685)}, { AOM_CDF4(17540, 29528, 31668)},
+ { AOM_CDF4(10160, 21468, 26783)}, { AOM_CDF4(4724, 13393, 20054)},
+ { AOM_CDF4(2702, 8174, 13102)}, { AOM_CDF4(31648, 32686, 32742)},
+ { AOM_CDF4(20954, 31094, 32337)}, { AOM_CDF4(12420, 25698, 30179)},
+ { AOM_CDF4(7304, 19320, 26248)}, { AOM_CDF4(4366, 12261, 18864)},
+ { AOM_CDF4(31581, 32723, 32748)}, { AOM_CDF4(21373, 31586, 32525)},
+ { AOM_CDF4(12744, 26625, 30885)}, { AOM_CDF4(7431, 20322, 26950)},
+ { AOM_CDF4(4692, 13323, 20111)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(7833, 18369, 24095)}, { AOM_CDF4(26650, 32273, 32702)},
+ { AOM_CDF4(16371, 29961, 32191)}, { AOM_CDF4(11055, 24082, 29629)},
+ { AOM_CDF4(6892, 18644, 25400)}, { AOM_CDF4(5006, 13057, 19240)},
+ { AOM_CDF4(29834, 32666, 32748)}, { AOM_CDF4(19577, 31335, 32570)},
+ { AOM_CDF4(12253, 26509, 31122)}, { AOM_CDF4(7991, 20772, 27711)},
+ { AOM_CDF4(5677, 15910, 23059)}, { AOM_CDF4(30109, 32532, 32720)},
+ { AOM_CDF4(16747, 30166, 32252)}, { AOM_CDF4(10134, 23542, 29184)},
+ { AOM_CDF4(5791, 16176, 23556)}, { AOM_CDF4(4362, 10414, 17284)},
+ { AOM_CDF4(29492, 32626, 32748)}, { AOM_CDF4(19894, 31402, 32525)},
+ { AOM_CDF4(12942, 27071, 30869)}, { AOM_CDF4(8346, 21216, 27405)},
+ { AOM_CDF4(6572, 17087, 23859)}, { AOM_CDF4(32035, 32735, 32748)},
+ { AOM_CDF4(22957, 31838, 32618)}, { AOM_CDF4(14724, 28572, 31772)},
+ { AOM_CDF4(10364, 23999, 29553)}, { AOM_CDF4(7004, 18433, 25655)},
+ { AOM_CDF4(27528, 32277, 32681)}, { AOM_CDF4(16959, 31171, 32096)},
+ { AOM_CDF4(10486, 23593, 27962)}, { AOM_CDF4(8192, 16384, 23211)},
+ { AOM_CDF4(8937, 17873, 20852)}, { AOM_CDF4(27715, 32002, 32615)},
+ { AOM_CDF4(15073, 29491, 31676)}, { AOM_CDF4(11264, 24576, 28672)},
+ { AOM_CDF4(2341, 18725, 23406)}, { AOM_CDF4(7282, 18204, 25486)},
+ { AOM_CDF4(28547, 32213, 32657)}, { AOM_CDF4(20788, 29773, 32239)},
+ { AOM_CDF4(6780, 21469, 30508)}, { AOM_CDF4(5958, 14895, 23831)},
+ { AOM_CDF4(16384, 21845, 27307)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5992, 14304, 19765)}, { AOM_CDF4(22612, 31238, 32456)},
+ { AOM_CDF4(13456, 27162, 31087)}, { AOM_CDF4(8001, 20062, 26504)},
+ { AOM_CDF4(5168, 14105, 20764)}, { AOM_CDF4(2632, 7771, 12385)},
+ { AOM_CDF4(27034, 32344, 32709)}, { AOM_CDF4(15850, 29415, 31997)},
+ { AOM_CDF4(9494, 22776, 28841)}, { AOM_CDF4(6151, 16830, 23969)},
+ { AOM_CDF4(3461, 10039, 15722)}, { AOM_CDF4(30134, 32569, 32731)},
+ { AOM_CDF4(15638, 29422, 31945)}, { AOM_CDF4(9150, 21865, 28218)},
+ { AOM_CDF4(5647, 15719, 22676)}, { AOM_CDF4(3402, 9772, 15477)},
+ { AOM_CDF4(28530, 32586, 32735)}, { AOM_CDF4(17139, 30298, 32292)},
+ { AOM_CDF4(10200, 24039, 29685)}, { AOM_CDF4(6419, 17674, 24786)},
+ { AOM_CDF4(3544, 10225, 15824)}, { AOM_CDF4(31333, 32726, 32748)},
+ { AOM_CDF4(20618, 31487, 32544)}, { AOM_CDF4(12901, 27217, 31232)},
+ { AOM_CDF4(8624, 21734, 28171)}, { AOM_CDF4(5104, 14191, 20748)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(11206, 21090, 26561)}, { AOM_CDF4(28759, 32279, 32671)},
+ { AOM_CDF4(14171, 27952, 31569)}, { AOM_CDF4(9743, 22907, 29141)},
+ { AOM_CDF4(6871, 17886, 24868)}, { AOM_CDF4(4960, 13152, 19315)},
+ { AOM_CDF4(31077, 32661, 32748)}, { AOM_CDF4(19400, 31195, 32515)},
+ { AOM_CDF4(12752, 26858, 31040)}, { AOM_CDF4(8370, 22098, 28591)},
+ { AOM_CDF4(5457, 15373, 22298)}, { AOM_CDF4(31697, 32706, 32748)},
+ { AOM_CDF4(17860, 30657, 32333)}, { AOM_CDF4(12510, 24812, 29261)},
+ { AOM_CDF4(6180, 19124, 24722)}, { AOM_CDF4(5041, 13548, 17959)},
+ { AOM_CDF4(31552, 32716, 32748)}, { AOM_CDF4(21908, 31769, 32623)},
+ { AOM_CDF4(14470, 28201, 31565)}, { AOM_CDF4(9493, 22982, 28608)},
+ { AOM_CDF4(6858, 17240, 24137)}, { AOM_CDF4(32543, 32752, 32756)},
+ { AOM_CDF4(24286, 32097, 32666)}, { AOM_CDF4(15958, 29217, 32024)},
+ { AOM_CDF4(10207, 24234, 29958)}, { AOM_CDF4(6929, 18305, 25652)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4137, 10847, 15682)}, { AOM_CDF4(17824, 27001, 30058)},
+ { AOM_CDF4(10204, 22796, 28291)}, { AOM_CDF4(6076, 15935, 22125)},
+ { AOM_CDF4(3852, 10937, 16816)}, { AOM_CDF4(2252, 6324, 10131)},
+ { AOM_CDF4(25840, 32016, 32662)}, { AOM_CDF4(15109, 28268, 31531)},
+ { AOM_CDF4(9385, 22231, 28340)}, { AOM_CDF4(6082, 16672, 23479)},
+ { AOM_CDF4(3318, 9427, 14681)}, { AOM_CDF4(30594, 32574, 32718)},
+ { AOM_CDF4(16836, 29552, 31859)}, { AOM_CDF4(9556, 22542, 28356)},
+ { AOM_CDF4(6305, 16725, 23540)}, { AOM_CDF4(3376, 9895, 15184)},
+ { AOM_CDF4(29383, 32617, 32745)}, { AOM_CDF4(18891, 30809, 32401)},
+ { AOM_CDF4(11688, 25942, 30687)}, { AOM_CDF4(7468, 19469, 26651)},
+ { AOM_CDF4(3909, 11358, 17012)}, { AOM_CDF4(31564, 32736, 32748)},
+ { AOM_CDF4(20906, 31611, 32600)}, { AOM_CDF4(13191, 27621, 31537)},
+ { AOM_CDF4(8768, 22029, 28676)}, { AOM_CDF4(5079, 14109, 20906)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ }
+};
+
+static const u16 av1_default_coeff_base_eob_multi_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES]
+ [PLANE_TYPES][SIG_COEF_CONTEXTS_EOB][CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
+ {
+ {
+ {
+ { AOM_CDF3(17837, 29055)},
+ { AOM_CDF3(29600, 31446)},
+ { AOM_CDF3(30844, 31878)},
+ { AOM_CDF3(24926, 28948)}
+ },
+ {
+ { AOM_CDF3(21365, 30026)},
+ { AOM_CDF3(30512, 32423)},
+ { AOM_CDF3(31658, 32621)},
+ { AOM_CDF3(29630, 31881)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(5717, 26477)},
+ { AOM_CDF3(30491, 31703)},
+ { AOM_CDF3(31550, 32158)},
+ { AOM_CDF3(29648, 31491)}
+ },
+ {
+ { AOM_CDF3(12608, 27820)},
+ { AOM_CDF3(30680, 32225)},
+ { AOM_CDF3(30809, 32335)},
+ { AOM_CDF3(31299, 32423)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1786, 12612)},
+ { AOM_CDF3(30663, 31625)},
+ { AOM_CDF3(32339, 32468)},
+ { AOM_CDF3(31148, 31833)}
+ },
+ {
+ { AOM_CDF3(18857, 23865)},
+ { AOM_CDF3(31428, 32428)},
+ { AOM_CDF3(31744, 32373)},
+ { AOM_CDF3(31775, 32526)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1787, 2532)},
+ { AOM_CDF3(30832, 31662)},
+ { AOM_CDF3(31824, 32682)},
+ { AOM_CDF3(32133, 32569)}
+ },
+ {
+ { AOM_CDF3(13751, 22235)},
+ { AOM_CDF3(32089, 32409)},
+ { AOM_CDF3(27084, 27920)},
+ { AOM_CDF3(29291, 32594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1725, 3449)},
+ { AOM_CDF3(31102, 31935)},
+ { AOM_CDF3(32457, 32613)},
+ { AOM_CDF3(32412, 32649)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(17560, 29888)},
+ { AOM_CDF3(29671, 31549)},
+ { AOM_CDF3(31007, 32056)},
+ { AOM_CDF3(27286, 30006)}
+ },
+ {
+ { AOM_CDF3(26594, 31212)},
+ { AOM_CDF3(31208, 32582)},
+ { AOM_CDF3(31835, 32637)},
+ { AOM_CDF3(30595, 32206)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(15239, 29932)},
+ { AOM_CDF3(31315, 32095)},
+ { AOM_CDF3(32130, 32434)},
+ { AOM_CDF3(30864, 31996)}
+ },
+ {
+ { AOM_CDF3(26279, 30968)},
+ { AOM_CDF3(31142, 32495)},
+ { AOM_CDF3(31713, 32540)},
+ { AOM_CDF3(31929, 32594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(2644, 25198)},
+ { AOM_CDF3(32038, 32451)},
+ { AOM_CDF3(32639, 32695)},
+ { AOM_CDF3(32166, 32518)}
+ },
+ {
+ { AOM_CDF3(17187, 27668)},
+ { AOM_CDF3(31714, 32550)},
+ { AOM_CDF3(32283, 32678)},
+ { AOM_CDF3(31930, 32563)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1044, 2257)},
+ { AOM_CDF3(30755, 31923)},
+ { AOM_CDF3(32208, 32693)},
+ { AOM_CDF3(32244, 32615)}
+ },
+ {
+ { AOM_CDF3(21317, 26207)},
+ { AOM_CDF3(29133, 30868)},
+ { AOM_CDF3(29311, 31231)},
+ { AOM_CDF3(29657, 31087)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(478, 1834)},
+ { AOM_CDF3(31005, 31987)},
+ { AOM_CDF3(32317, 32724)},
+ { AOM_CDF3(30865, 32648)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(20092, 30774)},
+ { AOM_CDF3(30695, 32020)},
+ { AOM_CDF3(31131, 32103)},
+ { AOM_CDF3(28666, 30870)}
+ },
+ {
+ { AOM_CDF3(27258, 31095)},
+ { AOM_CDF3(31804, 32623)},
+ { AOM_CDF3(31763, 32528)},
+ { AOM_CDF3(31438, 32506)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(18049, 30489)},
+ { AOM_CDF3(31706, 32286)},
+ { AOM_CDF3(32163, 32473)},
+ { AOM_CDF3(31550, 32184)}
+ },
+ {
+ { AOM_CDF3(27116, 30842)},
+ { AOM_CDF3(31971, 32598)},
+ { AOM_CDF3(32088, 32576)},
+ { AOM_CDF3(32067, 32664)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(12854, 29093)},
+ { AOM_CDF3(32272, 32558)},
+ { AOM_CDF3(32667, 32729)},
+ { AOM_CDF3(32306, 32585)}
+ },
+ {
+ { AOM_CDF3(25476, 30366)},
+ { AOM_CDF3(32169, 32687)},
+ { AOM_CDF3(32479, 32689)},
+ { AOM_CDF3(31673, 32634)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(2809, 19301)},
+ { AOM_CDF3(32205, 32622)},
+ { AOM_CDF3(32338, 32730)},
+ { AOM_CDF3(31786, 32616)}
+ },
+ {
+ { AOM_CDF3(22737, 29105)},
+ { AOM_CDF3(30810, 32362)},
+ { AOM_CDF3(30014, 32627)},
+ { AOM_CDF3(30528, 32574)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(935, 3382)},
+ { AOM_CDF3(30789, 31909)},
+ { AOM_CDF3(32466, 32756)},
+ { AOM_CDF3(30860, 32513)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(22497, 31198)},
+ { AOM_CDF3(31715, 32495)},
+ { AOM_CDF3(31606, 32337)},
+ { AOM_CDF3(30388, 31990)}
+ },
+ {
+ { AOM_CDF3(27877, 31584)},
+ { AOM_CDF3(32170, 32728)},
+ { AOM_CDF3(32155, 32688)},
+ { AOM_CDF3(32219, 32702)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(21457, 31043)},
+ { AOM_CDF3(31951, 32483)},
+ { AOM_CDF3(32153, 32562)},
+ { AOM_CDF3(31473, 32215)}
+ },
+ {
+ { AOM_CDF3(27558, 31151)},
+ { AOM_CDF3(32020, 32640)},
+ { AOM_CDF3(32097, 32575)},
+ { AOM_CDF3(32242, 32719)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(19980, 30591)},
+ { AOM_CDF3(32219, 32597)},
+ { AOM_CDF3(32581, 32706)},
+ { AOM_CDF3(31803, 32287)}
+ },
+ {
+ { AOM_CDF3(26473, 30507)},
+ { AOM_CDF3(32431, 32723)},
+ { AOM_CDF3(32196, 32611)},
+ { AOM_CDF3(31588, 32528)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(24647, 30463)},
+ { AOM_CDF3(32412, 32695)},
+ { AOM_CDF3(32468, 32720)},
+ { AOM_CDF3(31269, 32523)}
+ },
+ {
+ { AOM_CDF3(28482, 31505)},
+ { AOM_CDF3(32152, 32701)},
+ { AOM_CDF3(31732, 32598)},
+ { AOM_CDF3(31767, 32712)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(12358, 24977)},
+ { AOM_CDF3(31331, 32385)},
+ { AOM_CDF3(32634, 32756)},
+ { AOM_CDF3(30411, 32548)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ }
+};
+
+static const u16 default_joint_cdf[] = { ICDF(4096), ICDF(11264), ICDF(19328)};
+static const u16 default_clsss_cdf[][10] = {
+ // Vertical component
+ {
+ ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
+ ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
+ },
+ // Horizontal component
+ {
+ ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
+ ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
+ }
+};
+
+static const u16 default_clsss0_fp_cdf[][2][3] = {
+ // Vertical component
+ {
+ { ICDF(16384), ICDF(24576), ICDF(26624)},
+ { ICDF(12288), ICDF(21248), ICDF(24128)}
+ },
+ // Horizontal component
+ {
+ { ICDF(16384), ICDF(24576), ICDF(26624)},
+ { ICDF(12288), ICDF(21248), ICDF(24128)}
+ }
+};
+
+static const u16 default_fp_cdf[][3] = {
+ // Vertical component
+ {
+ ICDF(8192), ICDF(17408), ICDF(21248)
+ },
+ // Horizontal component
+ {
+ ICDF(8192), ICDF(17408), ICDF(21248)
+ }
+};
+
+static const u16 default_sign_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
+static const u16 default_class0_hp_cdf[] = { ICDF(160 * 128), ICDF(160 * 128)};
+static const u16 default_hp_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
+static const u16 default_class0_cdf[] = { ICDF(216 * 128), ICDF(216 * 128)};
+static const u16 default_bits_cdf[][10] = {
+ {
+ ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
+ ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
+ ICDF(128 * 234), ICDF(128 * 240)
+ },
+ {
+ ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
+ ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
+ ICDF(128 * 234), ICDF(128 * 240)
+ }
+};
+
+static int rockchip_av1_get_q_ctx(int q)
+{
+ if (q <= 20)
+ return 0;
+ if (q <= 60)
+ return 1;
+ if (q <= 120)
+ return 2;
+ return 3;
+}
+
+void rockchip_av1_default_coeff_probs(u32 base_qindex, void *ptr)
+{
+ struct av1cdfs *cdfs = (struct av1cdfs *)ptr;
+ const int index = rockchip_av1_get_q_ctx(base_qindex);
+
+ memcpy(cdfs->txb_skip_cdf, av1_default_txb_skip_cdfs[index],
+ sizeof(av1_default_txb_skip_cdfs[0]));
+ memcpy(cdfs->eob_extra_cdf, av1_default_eob_extra_cdfs[index],
+ sizeof(av1_default_eob_extra_cdfs[0]));
+ memcpy(cdfs->dc_sign_cdf, av1_default_dc_sign_cdfs[index],
+ sizeof(av1_default_dc_sign_cdfs[0]));
+ memcpy(cdfs->coeff_br_cdf, av1_default_coeff_lps_multi_cdfs[index],
+ sizeof(av1_default_coeff_lps_multi_cdfs[0]));
+ memcpy(cdfs->coeff_base_cdf, av1_default_coeff_base_multi_cdfs[index],
+ sizeof(av1_default_coeff_base_multi_cdfs[0]));
+ memcpy(cdfs->coeff_base_eob_cdf,
+ av1_default_coeff_base_eob_multi_cdfs[index],
+ sizeof(av1_default_coeff_base_eob_multi_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf16, av1_default_eob_multi16_cdfs[index],
+ sizeof(av1_default_eob_multi16_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf32, av1_default_eob_multi32_cdfs[index],
+ sizeof(av1_default_eob_multi32_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf64, av1_default_eob_multi64_cdfs[index],
+ sizeof(av1_default_eob_multi64_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf128, av1_default_eob_multi128_cdfs[index],
+ sizeof(av1_default_eob_multi128_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf256, av1_default_eob_multi256_cdfs[index],
+ sizeof(av1_default_eob_multi256_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf512, av1_default_eob_multi512_cdfs[index],
+ sizeof(av1_default_eob_multi512_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf1024, av1_default_eob_multi1024_cdfs[index],
+ sizeof(av1_default_eob_multi1024_cdfs[0]));
+}
+
+void rockchip_av1_set_default_cdfs(struct av1cdfs *cdfs,
+ struct mvcdfs *cdfs_ndvc)
+{
+ memcpy(cdfs->partition_cdf, default_partition_cdf,
+ sizeof(cdfs->partition_cdf));
+
+ memcpy(cdfs->tx_type_intra0_cdf, default_intra_ext_tx0_cdf,
+ sizeof(cdfs->tx_type_intra0_cdf));
+ memcpy(cdfs->tx_type_intra1_cdf, default_intra_ext_tx1_cdf,
+ sizeof(cdfs->tx_type_intra1_cdf));
+ memcpy(cdfs->tx_type_inter_cdf, default_inter_ext_tx_cdf,
+ sizeof(cdfs->tx_type_inter_cdf));
+
+ memcpy(cdfs->vartx_part_cdf, default_txfm_partition_cdf,
+ sizeof(cdfs->vartx_part_cdf));
+ memcpy(cdfs->mbskip_cdf, default_skip_cdfs, sizeof(cdfs->mbskip_cdf));
+ memcpy(cdfs->delta_q_cdf, default_delta_q_cdf,
+ sizeof(cdfs->delta_q_cdf));
+ memcpy(cdfs->delta_lf_multi_cdf, default_delta_lf_multi_cdf,
+ sizeof(cdfs->delta_lf_multi_cdf));
+ memcpy(cdfs->delta_lf_cdf, default_delta_lf_cdf,
+ sizeof(cdfs->delta_lf_cdf));
+
+ memcpy(cdfs->segment_pred_cdf, default_segment_pred_cdf,
+ sizeof(cdfs->segment_pred_cdf));
+
+ memcpy(cdfs->spatial_pred_seg_tree_cdf,
+ default_spatial_pred_seg_tree_cdf,
+ sizeof(cdfs->spatial_pred_seg_tree_cdf));
+
+ memcpy(cdfs->skip_mode_cdf, default_skip_mode_cdfs,
+ sizeof(cdfs->skip_mode_cdf));
+
+ memcpy(cdfs->tx_size_cdf, default_tx_size_cdf,
+ sizeof(cdfs->tx_size_cdf));
+
+ memcpy(cdfs->kf_ymode_cdf, default_kf_y_mode_cdf,
+ sizeof(cdfs->kf_ymode_cdf));
+ memcpy(cdfs->uv_mode_cdf, default_uv_mode_cdf,
+ sizeof(cdfs->uv_mode_cdf));
+ memcpy(cdfs->if_ymode_cdf, default_if_y_mode_cdf,
+ sizeof(cdfs->if_ymode_cdf));
+
+ memcpy(cdfs->intra_inter_cdf, default_intra_inter_cdf,
+ sizeof(cdfs->intra_inter_cdf));
+
+ memcpy(cdfs->comp_ref_cdf, default_comp_ref_cdf,
+ sizeof(cdfs->comp_ref_cdf));
+ memcpy(cdfs->comp_bwdref_cdf, default_comp_bwdref_cdf,
+ sizeof(cdfs->comp_bwdref_cdf));
+
+ memcpy(cdfs->comp_inter_cdf, default_comp_inter_cdf,
+ sizeof(cdfs->comp_inter_cdf));
+
+ memcpy(cdfs->single_ref_cdf, default_single_ref_cdf,
+ sizeof(cdfs->single_ref_cdf));
+ memcpy(cdfs->comp_ref_type_cdf, default_comp_ref_type_cdf,
+ sizeof(cdfs->comp_ref_type_cdf));
+ memcpy(cdfs->uni_comp_ref_cdf, default_uni_comp_ref_cdf,
+ sizeof(cdfs->uni_comp_ref_cdf));
+
+ memcpy(cdfs->newmv_cdf, default_newmv_cdf, sizeof(cdfs->newmv_cdf));
+ memcpy(cdfs->zeromv_cdf, default_zeromv_cdf, sizeof(cdfs->zeromv_cdf));
+ memcpy(cdfs->refmv_cdf, default_refmv_cdf, sizeof(cdfs->refmv_cdf));
+ memcpy(cdfs->drl_cdf, default_drl_cdf, sizeof(cdfs->drl_cdf));
+
+ memcpy(cdfs->interp_filter_cdf, default_switchable_interp_cdf,
+ sizeof(cdfs->interp_filter_cdf));
+
+ // Regular MV cdfs
+ memcpy(cdfs->mv_cdf.joint_cdf, default_joint_cdf,
+ sizeof(cdfs->mv_cdf.joint_cdf));
+ memcpy(cdfs->mv_cdf.sign_cdf, default_sign_cdf,
+ sizeof(cdfs->mv_cdf.sign_cdf));
+ memcpy(cdfs->mv_cdf.clsss_cdf, default_clsss_cdf,
+ sizeof(cdfs->mv_cdf.clsss_cdf));
+ memcpy(cdfs->mv_cdf.clsss0_fp_cdf, default_clsss0_fp_cdf,
+ sizeof(cdfs->mv_cdf.clsss0_fp_cdf));
+ memcpy(cdfs->mv_cdf.fp_cdf, default_fp_cdf,
+ sizeof(cdfs->mv_cdf.fp_cdf));
+ memcpy(cdfs->mv_cdf.class0_hp_cdf, default_class0_hp_cdf,
+ sizeof(cdfs->mv_cdf.class0_hp_cdf));
+ memcpy(cdfs->mv_cdf.hp_cdf, default_hp_cdf,
+ sizeof(cdfs->mv_cdf.hp_cdf));
+ memcpy(cdfs->mv_cdf.class0_cdf, default_class0_cdf,
+ sizeof(cdfs->mv_cdf.class0_cdf));
+ memcpy(cdfs->mv_cdf.bits_cdf, default_bits_cdf,
+ sizeof(cdfs->mv_cdf.bits_cdf));
+
+ // Intrabc cdfs
+ memcpy(cdfs_ndvc->joint_cdf, default_joint_cdf,
+ sizeof(cdfs_ndvc->joint_cdf));
+ memcpy(cdfs_ndvc->sign_cdf, default_sign_cdf,
+ sizeof(cdfs_ndvc->sign_cdf));
+ memcpy(cdfs_ndvc->clsss_cdf, default_clsss_cdf,
+ sizeof(cdfs_ndvc->clsss_cdf));
+ memcpy(cdfs_ndvc->clsss0_fp_cdf, default_clsss0_fp_cdf,
+ sizeof(cdfs_ndvc->clsss0_fp_cdf));
+ memcpy(cdfs_ndvc->fp_cdf, default_fp_cdf, sizeof(cdfs_ndvc->fp_cdf));
+ memcpy(cdfs_ndvc->class0_hp_cdf, default_class0_hp_cdf,
+ sizeof(cdfs_ndvc->class0_hp_cdf));
+ memcpy(cdfs_ndvc->hp_cdf, default_hp_cdf, sizeof(cdfs_ndvc->hp_cdf));
+ memcpy(cdfs_ndvc->class0_cdf, default_class0_cdf,
+ sizeof(cdfs_ndvc->class0_cdf));
+ memcpy(cdfs_ndvc->bits_cdf, default_bits_cdf,
+ sizeof(cdfs_ndvc->bits_cdf));
+
+ memcpy(cdfs->obmc_cdf, default_obmc_cdf, sizeof(cdfs->obmc_cdf));
+ memcpy(cdfs->motion_mode_cdf, default_motion_mode_cdf,
+ sizeof(cdfs->motion_mode_cdf));
+
+ memcpy(cdfs->inter_compound_mode_cdf, default_inter_compound_mode_cdf,
+ sizeof(cdfs->inter_compound_mode_cdf));
+ memcpy(cdfs->compound_type_cdf, default_compound_type_cdf,
+ sizeof(cdfs->compound_type_cdf));
+ memcpy(cdfs->interintra_cdf, default_interintra_cdf,
+ sizeof(cdfs->interintra_cdf));
+ memcpy(cdfs->interintra_mode_cdf, default_interintra_mode_cdf,
+ sizeof(cdfs->interintra_mode_cdf));
+ memcpy(cdfs->wedge_interintra_cdf, default_wedge_interintra_cdf,
+ sizeof(cdfs->wedge_interintra_cdf));
+ memcpy(cdfs->wedge_idx_cdf, default_wedge_idx_cdf,
+ sizeof(cdfs->wedge_idx_cdf));
+
+ memcpy(cdfs->palette_y_mode_cdf, default_palette_y_mode_cdf,
+ sizeof(cdfs->palette_y_mode_cdf));
+ memcpy(cdfs->palette_uv_mode_cdf, default_palette_uv_mode_cdf,
+ sizeof(cdfs->palette_uv_mode_cdf));
+ memcpy(cdfs->palette_y_size_cdf, default_palette_y_size_cdf,
+ sizeof(cdfs->palette_y_size_cdf));
+ memcpy(cdfs->palette_uv_size_cdf, default_palette_uv_size_cdf,
+ sizeof(cdfs->palette_uv_size_cdf));
+ memcpy(cdfs->palette_y_color_index_cdf,
+ default_palette_y_color_index_cdf,
+ sizeof(cdfs->palette_y_color_index_cdf));
+ memcpy(cdfs->palette_uv_color_index_cdf,
+ default_palette_uv_color_index_cdf,
+ sizeof(cdfs->palette_uv_color_index_cdf));
+
+ memcpy(cdfs->cfl_sign_cdf, default_cfl_sign_cdf,
+ sizeof(cdfs->cfl_sign_cdf));
+ memcpy(cdfs->cfl_alpha_cdf, default_cfl_alpha_cdf,
+ sizeof(cdfs->cfl_alpha_cdf));
+
+ memcpy(cdfs->intrabc_cdf, default_intrabc_cdf,
+ sizeof(cdfs->intrabc_cdf));
+ memcpy(cdfs->angle_delta_cdf, default_angle_delta_cdf,
+ sizeof(cdfs->angle_delta_cdf));
+ memcpy(cdfs->filter_intra_mode_cdf, default_filter_intra_mode_cdf,
+ sizeof(cdfs->filter_intra_mode_cdf));
+ memcpy(cdfs->filter_intra_cdf, default_filter_intra_cdfs,
+ sizeof(cdfs->filter_intra_cdf));
+ memcpy(cdfs->comp_group_idx_cdf, default_comp_group_idx_cdfs,
+ sizeof(cdfs->comp_group_idx_cdf));
+ memcpy(cdfs->compound_idx_cdf, default_compound_idx_cdfs,
+ sizeof(cdfs->compound_idx_cdf));
+}
+
+void rockchip_av1_get_cdfs(struct hantro_ctx *ctx, u32 ref_idx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ av1_dec->cdfs = &av1_dec->cdfs_last[ref_idx];
+ av1_dec->cdfs_ndvc = &av1_dec->cdfs_last_ndvc[ref_idx];
+}
+
+void rockchip_av1_store_cdfs(struct hantro_ctx *ctx,
+ u32 refresh_frame_flags)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ int i;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ if (refresh_frame_flags & (1 << i)) {
+ if (&av1_dec->cdfs_last[i] != av1_dec->cdfs) {
+ av1_dec->cdfs_last[i] = *av1_dec->cdfs;
+ av1_dec->cdfs_last_ndvc[i] =
+ *av1_dec->cdfs_ndvc;
+ }
+ }
+ }
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h
new file mode 100644
index 000000000000..bbf8424c7d2c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ROCKCHIP_AV1_ENTROPYMODE_H_
+#define _ROCKCHIP_AV1_ENTROPYMODE_H_
+
+#include <linux/types.h>
+
+struct hantro_ctx;
+
+#define AV1_INTER_MODE_CONTEXTS 15
+#define AV1_INTRA_MODES 13
+#define AV1_REF_CONTEXTS 3
+#define AV1_SWITCHABLE_FILTERS 3 /* number of switchable filters */
+#define AV1_TX_SIZE_CONTEXTS 3
+#define BLOCK_SIZE_GROUPS 4
+#define BR_CDF_SIZE 4
+#define BWD_REFS 3
+#define CFL_ALLOWED_TYPES 2
+#define CFL_ALPHA_CONTEXTS 6
+#define CFL_ALPHABET_SIZE 16
+#define CFL_JOINT_SIGNS 8
+#define CDF_SIZE(x) ((x) - 1)
+#define COMP_GROUP_IDX_CONTEXTS 7
+#define COMP_INDEX_CONTEXTS 6
+#define COMP_INTER_CONTEXTS 5
+#define COMP_REF_TYPE_CONTEXTS 5
+#define COMPOUND_TYPES 3
+#define DC_SIGN_CONTEXTS 3
+#define DELTA_LF_PROBS 3
+#define DELTA_Q_PROBS 3
+#define DIRECTIONAL_MODES 8
+#define DRL_MODE_CONTEXTS 3
+#define EOB_COEF_CONTEXTS 9
+#define EXT_TX_SIZES 3
+#define EXT_TX_TYPES 16
+#define EXTTX_SIZES 4
+#define FRAME_LF_COUNT 4
+#define FWD_REFS 4
+#define GLOBALMV_MODE_CONTEXTS 2
+#define ICDF(x) (32768U - (x))
+#define INTER_COMPOUND_MODES 8
+#define INTERINTRA_MODES 4
+#define INTRA_INTER_CONTEXTS 4
+#define KF_MODE_CONTEXTS 5
+#define LEVEL_CONTEXTS 21
+#define MAX_ANGLE_DELTA 3
+#define MAX_MB_SEGMENTS 8
+#define MAX_SEGMENTS 8
+#define MAX_TX_CATS 4
+#define MAX_TX_DEPTH 2
+#define MBSKIP_CONTEXTS 3
+#define MOTION_MODES 3
+#define MOTION_MODE_CONTEXTS 10
+#define NEWMV_MODE_CONTEXTS 6
+#define NUM_BASE_LEVELS 2
+#define NUM_REF_FRAMES 8
+#define PALETTE_BLOCK_SIZES 7
+#define PALETTE_IDX_CONTEXTS 18
+#define PALETTE_SIZES 7
+#define PALETTE_UV_MODE_CONTEXTS 2
+#define PALETTE_Y_MODE_CONTEXTS 3
+#define PARTITION_PLOFFSET 4
+#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
+#define PLANE_TYPES 2
+#define PREDICTION_PROBS 3
+#define REF_CONTEXTS 5
+#define REFMV_MODE_CONTEXTS 9
+#define SEG_TEMPORAL_PRED_CTXS 3
+#define SIG_COEF_CONTEXTS 42
+#define SIG_COEF_CONTEXTS_EOB 4
+#define SINGLE_REFS 7
+#define SKIP_CONTEXTS 3
+#define SKIP_MODE_CONTEXTS 3
+#define SPATIAL_PREDICTION_PROBS 3
+#define SWITCHABLE_FILTER_CONTEXTS ((AV1_SWITCHABLE_FILTERS + 1) * 4)
+#define TOKEN_CDF_Q_CTXS 4
+#define TX_SIZES 5
+#define TX_SIZE_CONTEXTS 2
+#define TX_TYPES 4
+#define TXB_SKIP_CONTEXTS 13
+#define TXFM_PARTITION_CONTEXTS 22
+#define UNI_COMP_REF_CONTEXTS 3
+#define UNIDIR_COMP_REFS 4
+#define UV_INTRA_MODES 14
+#define VARTX_PART_CONTEXTS 22
+#define ZEROMV_MODE_CONTEXTS 2
+
+enum blocksizetype {
+ BLOCK_SIZE_AB4X4,
+ BLOCK_SIZE_SB4X8,
+ BLOCK_SIZE_SB8X4,
+ BLOCK_SIZE_SB8X8,
+ BLOCK_SIZE_SB8X16,
+ BLOCK_SIZE_SB16X8,
+ BLOCK_SIZE_MB16X16,
+ BLOCK_SIZE_SB16X32,
+ BLOCK_SIZE_SB32X16,
+ BLOCK_SIZE_SB32X32,
+ BLOCK_SIZE_SB32X64,
+ BLOCK_SIZE_SB64X32,
+ BLOCK_SIZE_SB64X64,
+ BLOCK_SIZE_SB64X128,
+ BLOCK_SIZE_SB128X64,
+ BLOCK_SIZE_SB128X128,
+ BLOCK_SIZE_SB4X16,
+ BLOCK_SIZE_SB16X4,
+ BLOCK_SIZE_SB8X32,
+ BLOCK_SIZE_SB32X8,
+ BLOCK_SIZE_SB16X64,
+ BLOCK_SIZE_SB64X16,
+ BLOCK_SIZE_TYPES,
+ BLOCK_SIZES_ALL = BLOCK_SIZE_TYPES
+};
+
+enum filterintramodetype {
+ FILTER_DC_PRED,
+ FILTER_V_PRED,
+ FILTER_H_PRED,
+ FILTER_D153_PRED,
+ FILTER_PAETH_PRED,
+ FILTER_INTRA_MODES,
+ FILTER_INTRA_UNUSED = 7
+};
+
+enum frametype {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ NUM_FRAME_TYPES,
+};
+
+enum txsize {
+ TX_4X4 = 0,
+ TX_8X8 = 1,
+ TX_16X16 = 2,
+ TX_32X32 = 3,
+ TX_SIZE_MAX_SB,
+};
+
+enum { SIMPLE_TRANSLATION, OBMC_CAUSAL, MOTION_MODE_COUNT };
+
+enum mb_prediction_mode {
+ DC_PRED, /* average of above and left pixels */
+ V_PRED, /* vertical prediction */
+ H_PRED, /* horizontal prediction */
+ D45_PRED, /* Directional 45 deg prediction [anti-clockwise from 0 deg hor] */
+ D135_PRED, /* Directional 135 deg prediction [anti-clockwise from 0 deg hor] */
+ D117_PRED, /* Directional 112 deg prediction [anti-clockwise from 0 deg hor] */
+ D153_PRED, /* Directional 157 deg prediction [anti-clockwise from 0 deg hor] */
+ D27_PRED, /* Directional 22 deg prediction [anti-clockwise from 0 deg hor] */
+ D63_PRED, /* Directional 67 deg prediction [anti-clockwise from 0 deg hor] */
+ SMOOTH_PRED,
+ TM_PRED_AV1 = SMOOTH_PRED,
+ SMOOTH_V_PRED, // Vertical interpolation
+ SMOOTH_H_PRED, // Horizontal interpolation
+ TM_PRED, /* Truemotion prediction */
+ PAETH_PRED = TM_PRED,
+ NEARESTMV,
+ NEARMV,
+ ZEROMV,
+ NEWMV,
+ NEAREST_NEARESTMV,
+ NEAR_NEARMV,
+ NEAREST_NEWMV,
+ NEW_NEARESTMV,
+ NEAR_NEWMV,
+ NEW_NEARMV,
+ ZERO_ZEROMV,
+ NEW_NEWMV,
+ SPLITMV,
+ MB_MODE_COUNT
+};
+
+enum partitiontype {
+ PARTITION_NONE,
+ PARTITION_HORZ,
+ PARTITION_VERT,
+ PARTITION_SPLIT,
+ PARTITION_TYPES
+};
+
+struct mvcdfs {
+ u16 joint_cdf[3];
+ u16 sign_cdf[2];
+ u16 clsss_cdf[2][10];
+ u16 clsss0_fp_cdf[2][2][3];
+ u16 fp_cdf[2][3];
+ u16 class0_hp_cdf[2];
+ u16 hp_cdf[2];
+ u16 class0_cdf[2];
+ u16 bits_cdf[2][10];
+};
+
+struct av1cdfs {
+ u16 partition_cdf[13][16];
+ u16 kf_ymode_cdf[KF_MODE_CONTEXTS][KF_MODE_CONTEXTS][AV1_INTRA_MODES - 1];
+ u16 segment_pred_cdf[PREDICTION_PROBS];
+ u16 spatial_pred_seg_tree_cdf[SPATIAL_PREDICTION_PROBS][MAX_MB_SEGMENTS - 1];
+ u16 mbskip_cdf[MBSKIP_CONTEXTS];
+ u16 delta_q_cdf[DELTA_Q_PROBS];
+ u16 delta_lf_multi_cdf[FRAME_LF_COUNT][DELTA_LF_PROBS];
+ u16 delta_lf_cdf[DELTA_LF_PROBS];
+ u16 skip_mode_cdf[SKIP_MODE_CONTEXTS];
+ u16 vartx_part_cdf[VARTX_PART_CONTEXTS][1];
+ u16 tx_size_cdf[MAX_TX_CATS][AV1_TX_SIZE_CONTEXTS][MAX_TX_DEPTH];
+ u16 if_ymode_cdf[BLOCK_SIZE_GROUPS][AV1_INTRA_MODES - 1];
+ u16 uv_mode_cdf[2][AV1_INTRA_MODES][AV1_INTRA_MODES - 1 + 1];
+ u16 intra_inter_cdf[INTRA_INTER_CONTEXTS];
+ u16 comp_inter_cdf[COMP_INTER_CONTEXTS];
+ u16 single_ref_cdf[AV1_REF_CONTEXTS][SINGLE_REFS - 1];
+ u16 comp_ref_type_cdf[COMP_REF_TYPE_CONTEXTS][1];
+ u16 uni_comp_ref_cdf[UNI_COMP_REF_CONTEXTS][UNIDIR_COMP_REFS - 1][1];
+ u16 comp_ref_cdf[AV1_REF_CONTEXTS][FWD_REFS - 1];
+ u16 comp_bwdref_cdf[AV1_REF_CONTEXTS][BWD_REFS - 1];
+ u16 newmv_cdf[NEWMV_MODE_CONTEXTS];
+ u16 zeromv_cdf[ZEROMV_MODE_CONTEXTS];
+ u16 refmv_cdf[REFMV_MODE_CONTEXTS];
+ u16 drl_cdf[DRL_MODE_CONTEXTS];
+ u16 interp_filter_cdf[SWITCHABLE_FILTER_CONTEXTS][AV1_SWITCHABLE_FILTERS - 1];
+ struct mvcdfs mv_cdf;
+ u16 obmc_cdf[BLOCK_SIZE_TYPES];
+ u16 motion_mode_cdf[BLOCK_SIZE_TYPES][2];
+ u16 inter_compound_mode_cdf[AV1_INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1];
+ u16 compound_type_cdf[BLOCK_SIZE_TYPES][CDF_SIZE(COMPOUND_TYPES - 1)];
+ u16 interintra_cdf[BLOCK_SIZE_GROUPS];
+ u16 interintra_mode_cdf[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
+ u16 wedge_interintra_cdf[BLOCK_SIZE_TYPES];
+ u16 wedge_idx_cdf[BLOCK_SIZE_TYPES][CDF_SIZE(16)];
+ u16 palette_y_mode_cdf[PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS][1];
+ u16 palette_uv_mode_cdf[PALETTE_UV_MODE_CONTEXTS][1];
+ u16 palette_y_size_cdf[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ u16 palette_uv_size_cdf[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ u16 cfl_sign_cdf[CFL_JOINT_SIGNS - 1];
+ u16 cfl_alpha_cdf[CFL_ALPHA_CONTEXTS][CFL_ALPHABET_SIZE - 1];
+ u16 intrabc_cdf[1];
+ u16 angle_delta_cdf[DIRECTIONAL_MODES][6];
+ u16 filter_intra_mode_cdf[FILTER_INTRA_MODES - 1];
+ u16 filter_intra_cdf[BLOCK_SIZES_ALL];
+ u16 comp_group_idx_cdf[COMP_GROUP_IDX_CONTEXTS][CDF_SIZE(2)];
+ u16 compound_idx_cdf[COMP_INDEX_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy0[14];
+ // Palette index contexts; sizes 1/7, 2/6, 3/5 packed together
+ u16 palette_y_color_index_cdf[PALETTE_IDX_CONTEXTS][8];
+ u16 palette_uv_color_index_cdf[PALETTE_IDX_CONTEXTS][8];
+ u16 tx_type_intra0_cdf[EXTTX_SIZES][AV1_INTRA_MODES][8];
+ u16 tx_type_intra1_cdf[EXTTX_SIZES][AV1_INTRA_MODES][4];
+ u16 tx_type_inter_cdf[2][EXTTX_SIZES][EXT_TX_TYPES];
+ u16 txb_skip_cdf[TX_SIZES][TXB_SKIP_CONTEXTS][CDF_SIZE(2)];
+ u16 eob_extra_cdf[TX_SIZES][PLANE_TYPES][EOB_COEF_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy1[5];
+ u16 eob_flag_cdf16[PLANE_TYPES][2][4];
+ u16 eob_flag_cdf32[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf64[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf128[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf256[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf512[PLANE_TYPES][2][16];
+ u16 eob_flag_cdf1024[PLANE_TYPES][2][16];
+ u16 coeff_base_eob_cdf[TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS_EOB][CDF_SIZE(3)];
+ u16 coeff_base_cdf[TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS][CDF_SIZE(4) + 1];
+ u16 dc_sign_cdf[PLANE_TYPES][DC_SIGN_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy2[2];
+ u16 coeff_br_cdf[TX_SIZES][PLANE_TYPES][LEVEL_CONTEXTS][CDF_SIZE(BR_CDF_SIZE) + 1];
+ u16 dummy3[16];
+};
+
+void rockchip_av1_store_cdfs(struct hantro_ctx *ctx,
+ u32 refresh_frame_flags);
+void rockchip_av1_get_cdfs(struct hantro_ctx *ctx, u32 ref_idx);
+void rockchip_av1_set_default_cdfs(struct av1cdfs *cdfs,
+ struct mvcdfs *cdfs_ndvc);
+void rockchip_av1_default_coeff_probs(u32 base_qindex, void *ptr);
+
+#endif /* _ROCKCHIP_AV1_ENTROPYMODE_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
new file mode 100644
index 000000000000..f2ae84f0b436
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only or Apache-2.0
+
+#include "rockchip_av1_filmgrain.h"
+
+static const s32 gaussian_sequence[2048] = {
+ 56, 568, -180, 172, 124, -84, 172, -64, -900, 24, 820,
+ 224, 1248, 996, 272, -8, -916, -388, -732, -104, -188, 800,
+ 112, -652, -320, -376, 140, -252, 492, -168, 44, -788, 588,
+ -584, 500, -228, 12, 680, 272, -476, 972, -100, 652, 368,
+ 432, -196, -720, -192, 1000, -332, 652, -136, -552, -604, -4,
+ 192, -220, -136, 1000, -52, 372, -96, -624, 124, -24, 396,
+ 540, -12, -104, 640, 464, 244, -208, -84, 368, -528, -740,
+ 248, -968, -848, 608, 376, -60, -292, -40, -156, 252, -292,
+ 248, 224, -280, 400, -244, 244, -60, 76, -80, 212, 532,
+ 340, 128, -36, 824, -352, -60, -264, -96, -612, 416, -704,
+ 220, -204, 640, -160, 1220, -408, 900, 336, 20, -336, -96,
+ -792, 304, 48, -28, -1232, -1172, -448, 104, -292, -520, 244,
+ 60, -948, 0, -708, 268, 108, 356, -548, 488, -344, -136,
+ 488, -196, -224, 656, -236, -1128, 60, 4, 140, 276, -676,
+ -376, 168, -108, 464, 8, 564, 64, 240, 308, -300, -400,
+ -456, -136, 56, 120, -408, -116, 436, 504, -232, 328, 844,
+ -164, -84, 784, -168, 232, -224, 348, -376, 128, 568, 96,
+ -1244, -288, 276, 848, 832, -360, 656, 464, -384, -332, -356,
+ 728, -388, 160, -192, 468, 296, 224, 140, -776, -100, 280,
+ 4, 196, 44, -36, -648, 932, 16, 1428, 28, 528, 808,
+ 772, 20, 268, 88, -332, -284, 124, -384, -448, 208, -228,
+ -1044, -328, 660, 380, -148, -300, 588, 240, 540, 28, 136,
+ -88, -436, 256, 296, -1000, 1400, 0, -48, 1056, -136, 264,
+ -528, -1108, 632, -484, -592, -344, 796, 124, -668, -768, 388,
+ 1296, -232, -188, -200, -288, -4, 308, 100, -168, 256, -500,
+ 204, -508, 648, -136, 372, -272, -120, -1004, -552, -548, -384,
+ 548, -296, 428, -108, -8, -912, -324, -224, -88, -112, -220,
+ -100, 996, -796, 548, 360, -216, 180, 428, -200, -212, 148,
+ 96, 148, 284, 216, -412, -320, 120, -300, -384, -604, -572,
+ -332, -8, -180, -176, 696, 116, -88, 628, 76, 44, -516,
+ 240, -208, -40, 100, -592, 344, -308, -452, -228, 20, 916,
+ -1752, -136, -340, -804, 140, 40, 512, 340, 248, 184, -492,
+ 896, -156, 932, -628, 328, -688, -448, -616, -752, -100, 560,
+ -1020, 180, -800, -64, 76, 576, 1068, 396, 660, 552, -108,
+ -28, 320, -628, 312, -92, -92, -472, 268, 16, 560, 516,
+ -672, -52, 492, -100, 260, 384, 284, 292, 304, -148, 88,
+ -152, 1012, 1064, -228, 164, -376, -684, 592, -392, 156, 196,
+ -524, -64, -884, 160, -176, 636, 648, 404, -396, -436, 864,
+ 424, -728, 988, -604, 904, -592, 296, -224, 536, -176, -920,
+ 436, -48, 1176, -884, 416, -776, -824, -884, 524, -548, -564,
+ -68, -164, -96, 692, 364, -692, -1012, -68, 260, -480, 876,
+ -1116, 452, -332, -352, 892, -1088, 1220, -676, 12, -292, 244,
+ 496, 372, -32, 280, 200, 112, -440, -96, 24, -644, -184,
+ 56, -432, 224, -980, 272, -260, 144, -436, 420, 356, 364,
+ -528, 76, 172, -744, -368, 404, -752, -416, 684, -688, 72,
+ 540, 416, 92, 444, 480, -72, -1416, 164, -1172, -68, 24,
+ 424, 264, 1040, 128, -912, -524, -356, 64, 876, -12, 4,
+ -88, 532, 272, -524, 320, 276, -508, 940, 24, -400, -120,
+ 756, 60, 236, -412, 100, 376, -484, 400, -100, -740, -108,
+ -260, 328, -268, 224, -200, -416, 184, -604, -564, -20, 296,
+ 60, 892, -888, 60, 164, 68, -760, 216, -296, 904, -336,
+ -28, 404, -356, -568, -208, -1480, -512, 296, 328, -360, -164,
+ -1560, -776, 1156, -428, 164, -504, -112, 120, -216, -148, -264,
+ 308, 32, 64, -72, 72, 116, 176, -64, -272, 460, -536,
+ -784, -280, 348, 108, -752, -132, 524, -540, -776, 116, -296,
+ -1196, -288, -560, 1040, -472, 116, -848, -1116, 116, 636, 696,
+ 284, -176, 1016, 204, -864, -648, -248, 356, 972, -584, -204,
+ 264, 880, 528, -24, -184, 116, 448, -144, 828, 524, 212,
+ -212, 52, 12, 200, 268, -488, -404, -880, 824, -672, -40,
+ 908, -248, 500, 716, -576, 492, -576, 16, 720, -108, 384,
+ 124, 344, 280, 576, -500, 252, 104, -308, 196, -188, -8,
+ 1268, 296, 1032, -1196, 436, 316, 372, -432, -200, -660, 704,
+ -224, 596, -132, 268, 32, -452, 884, 104, -1008, 424, -1348,
+ -280, 4, -1168, 368, 476, 696, 300, -8, 24, 180, -592,
+ -196, 388, 304, 500, 724, -160, 244, -84, 272, -256, -420,
+ 320, 208, -144, -156, 156, 364, 452, 28, 540, 316, 220,
+ -644, -248, 464, 72, 360, 32, -388, 496, -680, -48, 208,
+ -116, -408, 60, -604, -392, 548, -840, 784, -460, 656, -544,
+ -388, -264, 908, -800, -628, -612, -568, 572, -220, 164, 288,
+ -16, -308, 308, -112, -636, -760, 280, -668, 432, 364, 240,
+ -196, 604, 340, 384, 196, 592, -44, -500, 432, -580, -132,
+ 636, -76, 392, 4, -412, 540, 508, 328, -356, -36, 16,
+ -220, -64, -248, -60, 24, -192, 368, 1040, 92, -24, -1044,
+ -32, 40, 104, 148, 192, -136, -520, 56, -816, -224, 732,
+ 392, 356, 212, -80, -424, -1008, -324, 588, -1496, 576, 460,
+ -816, -848, 56, -580, -92, -1372, -112, -496, 200, 364, 52,
+ -140, 48, -48, -60, 84, 72, 40, 132, -356, -268, -104,
+ -284, -404, 732, -520, 164, -304, -540, 120, 328, -76, -460,
+ 756, 388, 588, 236, -436, -72, -176, -404, -316, -148, 716,
+ -604, 404, -72, -88, -888, -68, 944, 88, -220, -344, 960,
+ 472, 460, -232, 704, 120, 832, -228, 692, -508, 132, -476,
+ 844, -748, -364, -44, 1116, -1104, -1056, 76, 428, 552, -692,
+ 60, 356, 96, -384, -188, -612, -576, 736, 508, 892, 352,
+ -1132, 504, -24, -352, 324, 332, -600, -312, 292, 508, -144,
+ -8, 484, 48, 284, -260, -240, 256, -100, -292, -204, -44,
+ 472, -204, 908, -188, -1000, -256, 92, 1164, -392, 564, 356,
+ 652, -28, -884, 256, 484, -192, 760, -176, 376, -524, -452,
+ -436, 860, -736, 212, 124, 504, -476, 468, 76, -472, 552,
+ -692, -944, -620, 740, -240, 400, 132, 20, 192, -196, 264,
+ -668, -1012, -60, 296, -316, -828, 76, -156, 284, -768, -448,
+ -832, 148, 248, 652, 616, 1236, 288, -328, -400, -124, 588,
+ 220, 520, -696, 1032, 768, -740, -92, -272, 296, 448, -464,
+ 412, -200, 392, 440, -200, 264, -152, -260, 320, 1032, 216,
+ 320, -8, -64, 156, -1016, 1084, 1172, 536, 484, -432, 132,
+ 372, -52, -256, 84, 116, -352, 48, 116, 304, -384, 412,
+ 924, -300, 528, 628, 180, 648, 44, -980, -220, 1320, 48,
+ 332, 748, 524, -268, -720, 540, -276, 564, -344, -208, -196,
+ 436, 896, 88, -392, 132, 80, -964, -288, 568, 56, -48,
+ -456, 888, 8, 552, -156, -292, 948, 288, 128, -716, -292,
+ 1192, -152, 876, 352, -600, -260, -812, -468, -28, -120, -32,
+ -44, 1284, 496, 192, 464, 312, -76, -516, -380, -456, -1012,
+ -48, 308, -156, 36, 492, -156, -808, 188, 1652, 68, -120,
+ -116, 316, 160, -140, 352, 808, -416, 592, 316, -480, 56,
+ 528, -204, -568, 372, -232, 752, -344, 744, -4, 324, -416,
+ -600, 768, 268, -248, -88, -132, -420, -432, 80, -288, 404,
+ -316, -1216, -588, 520, -108, 92, -320, 368, -480, -216, -92,
+ 1688, -300, 180, 1020, -176, 820, -68, -228, -260, 436, -904,
+ 20, 40, -508, 440, -736, 312, 332, 204, 760, -372, 728,
+ 96, -20, -632, -520, -560, 336, 1076, -64, -532, 776, 584,
+ 192, 396, -728, -520, 276, -188, 80, -52, -612, -252, -48,
+ 648, 212, -688, 228, -52, -260, 428, -412, -272, -404, 180,
+ 816, -796, 48, 152, 484, -88, -216, 988, 696, 188, -528,
+ 648, -116, -180, 316, 476, 12, -564, 96, 476, -252, -364,
+ -376, -392, 556, -256, -576, 260, -352, 120, -16, -136, -260,
+ -492, 72, 556, 660, 580, 616, 772, 436, 424, -32, -324,
+ -1268, 416, -324, -80, 920, 160, 228, 724, 32, -516, 64,
+ 384, 68, -128, 136, 240, 248, -204, -68, 252, -932, -120,
+ -480, -628, -84, 192, 852, -404, -288, -132, 204, 100, 168,
+ -68, -196, -868, 460, 1080, 380, -80, 244, 0, 484, -888,
+ 64, 184, 352, 600, 460, 164, 604, -196, 320, -64, 588,
+ -184, 228, 12, 372, 48, -848, -344, 224, 208, -200, 484,
+ 128, -20, 272, -468, -840, 384, 256, -720, -520, -464, -580,
+ 112, -120, 644, -356, -208, -608, -528, 704, 560, -424, 392,
+ 828, 40, 84, 200, -152, 0, -144, 584, 280, -120, 80,
+ -556, -972, -196, -472, 724, 80, 168, -32, 88, 160, -688,
+ 0, 160, 356, 372, -776, 740, -128, 676, -248, -480, 4,
+ -364, 96, 544, 232, -1032, 956, 236, 356, 20, -40, 300,
+ 24, -676, -596, 132, 1120, -104, 532, -1096, 568, 648, 444,
+ 508, 380, 188, -376, -604, 1488, 424, 24, 756, -220, -192,
+ 716, 120, 920, 688, 168, 44, -460, 568, 284, 1144, 1160,
+ 600, 424, 888, 656, -356, -320, 220, 316, -176, -724, -188,
+ -816, -628, -348, -228, -380, 1012, -452, -660, 736, 928, 404,
+ -696, -72, -268, -892, 128, 184, -344, -780, 360, 336, 400,
+ 344, 428, 548, -112, 136, -228, -216, -820, -516, 340, 92,
+ -136, 116, -300, 376, -244, 100, -316, -520, -284, -12, 824,
+ 164, -548, -180, -128, 116, -924, -828, 268, -368, -580, 620,
+ 192, 160, 0, -1676, 1068, 424, -56, -360, 468, -156, 720,
+ 288, -528, 556, -364, 548, -148, 504, 316, 152, -648, -620,
+ -684, -24, -376, -384, -108, -920, -1032, 768, 180, -264, -508,
+ -1268, -260, -60, 300, -240, 988, 724, -376, -576, -212, -736,
+ 556, 192, 1092, -620, -880, 376, -56, -4, -216, -32, 836,
+ 268, 396, 1332, 864, -600, 100, 56, -412, -92, 356, 180,
+ 884, -468, -436, 292, -388, -804, -704, -840, 368, -348, 140,
+ -724, 1536, 940, 372, 112, -372, 436, -480, 1136, 296, -32,
+ -228, 132, -48, -220, 868, -1016, -60, -1044, -464, 328, 916,
+ 244, 12, -736, -296, 360, 468, -376, -108, -92, 788, 368,
+ -56, 544, 400, -672, -420, 728, 16, 320, 44, -284, -380,
+ -796, 488, 132, 204, -596, -372, 88, -152, -908, -636, -572,
+ -624, -116, -692, -200, -56, 276, -88, 484, -324, 948, 864,
+ 1000, -456, -184, -276, 292, -296, 156, 676, 320, 160, 908,
+ -84, -1236, -288, -116, 260, -372, -644, 732, -756, -96, 84,
+ 344, -520, 348, -688, 240, -84, 216, -1044, -136, -676, -396,
+ -1500, 960, -40, 176, 168, 1516, 420, -504, -344, -364, -360,
+ 1216, -940, -380, -212, 252, -660, -708, 484, -444, -152, 928,
+ -120, 1112, 476, -260, 560, -148, -344, 108, -196, 228, -288,
+ 504, 560, -328, -88, 288, -1008, 460, -228, 468, -836, -196,
+ 76, 388, 232, 412, -1168, -716, -644, 756, -172, -356, -504,
+ 116, 432, 528, 48, 476, -168, -608, 448, 160, -532, -272,
+ 28, -676, -12, 828, 980, 456, 520, 104, -104, 256, -344,
+ -4, -28, -368, -52, -524, -572, -556, -200, 768, 1124, -208,
+ -512, 176, 232, 248, -148, -888, 604, -600, -304, 804, -156,
+ -212, 488, -192, -804, -256, 368, -360, -916, -328, 228, -240,
+ -448, -472, 856, -556, -364, 572, -12, -156, -368, -340, 432,
+ 252, -752, -152, 288, 268, -580, -848, -592, 108, -76, 244,
+ 312, -716, 592, -80, 436, 360, 4, -248, 160, 516, 584,
+ 732, 44, -468, -280, -292, -156, -588, 28, 308, 912, 24,
+ 124, 156, 180, -252, 944, -924, -772, -520, -428, -624, 300,
+ -212, -1144, 32, -724, 800, -1128, -212, -1288, -848, 180, -416,
+ 440, 192, -576, -792, -76, -1080, 80, -532, -352, -132, 380,
+ -820, 148, 1112, 128, 164, 456, 700, -924, 144, -668, -384,
+ 648, -832, 508, 552, -52, -100, -656, 208, -568, 748, -88,
+ 680, 232, 300, 192, -408, -1012, -152, -252, -268, 272, -876,
+ -664, -648, -332, -136, 16, 12, 1152, -28, 332, -536, 320,
+ -672, -460, -316, 532, -260, 228, -40, 1052, -816, 180, 88,
+ -496, -556, -672, -368, 428, 92, 356, 404, -408, 252, 196,
+ -176, -556, 792, 268, 32, 372, 40, 96, -332, 328, 120,
+ 372, -900, -40, 472, -264, -592, 952, 128, 656, 112, 664,
+ -232, 420, 4, -344, -464, 556, 244, -416, -32, 252, 0,
+ -412, 188, -696, 508, -476, 324, -1096, 656, -312, 560, 264,
+ -136, 304, 160, -64, -580, 248, 336, -720, 560, -348, -288,
+ -276, -196, -500, 852, -544, -236, -1128, -992, -776, 116, 56,
+ 52, 860, 884, 212, -12, 168, 1020, 512, -552, 924, -148,
+ 716, 188, 164, -340, -520, -184, 880, -152, -680, -208, -1156,
+ -300, -528, -472, 364, 100, -744, -1056, -32, 540, 280, 144,
+ -676, -32, -232, -280, -224, 96, 568, -76, 172, 148, 148,
+ 104, 32, -296, -32, 788, -80, 32, -16, 280, 288, 944,
+ 428, -484
+};
+
+static inline s32 clamp(s32 value, s32 low, s32 high)
+{
+ return value < low ? low : (value > high ? high : value);
+}
+
+static inline s32 round_power_of_two(const s32 val, s32 n)
+{
+ const s32 a = (s32)1 << (n - 1);
+
+ return (val + a) >> n;
+}
+
+static void rockchip_av1_init_random_generator(u8 luma_num, u16 seed,
+ u16 *random_register)
+{
+ u16 random_reg = seed;
+
+ random_reg ^= ((luma_num * 37 + 178) & 255) << 8;
+ random_reg ^= ((luma_num * 173 + 105) & 255);
+ *random_register = random_reg;
+}
+
+static inline void rockchip_av1_update_random_register(u16 *random_register)
+{
+ u16 bit;
+ u16 random_reg = *random_register;
+
+ bit = ((random_reg >> 0) ^ (random_reg >> 1) ^ (random_reg >> 3) ^
+ (random_reg >> 12)) & 1;
+ *random_register = (random_reg >> 1) | (bit << 15);
+}
+
+static inline s32 rockchip_av1_get_random_number(u16 random_register)
+{
+ return (random_register >> 5) & ((1 << 11) - 1);
+}
+
+void rockchip_av1_generate_luma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 bitdepth,
+ u8 num_y_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_y)[24],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u16 random_seed)
+{
+ s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
+ u16 grain_random_register = random_seed;
+ s32 i, j;
+
+ for (i = 0; i < 73; i++) {
+ for (j = 0; j < 82; j++) {
+ if (num_y_points > 0) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*luma_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*luma_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ for (i = 3; i < 73; i++)
+ for (j = 3; j < 82 - 3; j++) {
+ s32 pos = 0;
+ s32 wsum = 0;
+ s32 deltarow, deltacol;
+
+ for (deltarow = -ar_coeff_lag; deltarow <= 0;
+ deltarow++) {
+ for (deltacol = -ar_coeff_lag;
+ deltacol <= ar_coeff_lag; deltacol++) {
+ if (deltarow == 0 && deltacol == 0)
+ break;
+ wsum = wsum + (*ar_coeffs_y)[pos] *
+ (*luma_grain_block)[i + deltarow][j + deltacol];
+ ++pos;
+ }
+ }
+ (*luma_grain_block)[i][j] =
+ clamp((*luma_grain_block)[i][j] +
+ round_power_of_two(wsum, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+}
+
+// Calculate chroma grain noise once per frame
+void rockchip_av1_generate_chroma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 (*cb_grain_block)[38][44],
+ s32 (*cr_grain_block)[38][44],
+ s32 bitdepth,
+ u8 num_y_points,
+ u8 num_cb_points,
+ u8 num_cr_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_cb)[25],
+ s32 (*ar_coeffs_cr)[25],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u8 chroma_scaling_from_luma,
+ u16 random_seed)
+{
+ s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
+ u16 grain_random_register = 0;
+ s32 i, j;
+
+ rockchip_av1_init_random_generator(7, random_seed,
+ &grain_random_register);
+ for (i = 0; i < 38; i++) {
+ for (j = 0; j < 44; j++) {
+ if (num_cb_points || chroma_scaling_from_luma) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*cb_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*cb_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ rockchip_av1_init_random_generator(11, random_seed,
+ &grain_random_register);
+ for (i = 0; i < 38; i++) {
+ for (j = 0; j < 44; j++) {
+ if (num_cr_points || chroma_scaling_from_luma) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*cr_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*cr_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ for (i = 3; i < 38; i++) {
+ for (j = 3; j < 44 - 3; j++) {
+ s32 wsum_cb = 0;
+ s32 wsum_cr = 0;
+ s32 pos = 0;
+ s32 deltarow, deltacol;
+
+ for (deltarow = -ar_coeff_lag; deltarow <= 0;
+ deltarow++) {
+ for (deltacol = -ar_coeff_lag;
+ deltacol <= ar_coeff_lag; deltacol++) {
+ if (deltarow == 0 && deltacol == 0)
+ break;
+ wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] *
+ (*cb_grain_block)[i + deltarow][j + deltacol];
+ wsum_cr =
+ wsum_cr +
+ (*ar_coeffs_cr)[pos] *
+ (*cr_grain_block)[i + deltarow][j + deltacol];
+ ++pos;
+ }
+ }
+
+ if (num_y_points > 0) {
+ s32 av_luma = 0;
+ s32 luma_coord_y = (i << 1) - 3;
+ s32 luma_coord_x = (j << 1) - 3;
+
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y][luma_coord_x];
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y][luma_coord_x + 1];
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y + 1][luma_coord_x];
+ av_luma +=
+ (*luma_grain_block)[(luma_coord_y + 1)][luma_coord_x + 1];
+ av_luma = round_power_of_two(av_luma, 2);
+
+ wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] * av_luma;
+ wsum_cr = wsum_cr + (*ar_coeffs_cr)[pos] * av_luma;
+ }
+
+ if (num_cb_points || chroma_scaling_from_luma) {
+ (*cb_grain_block)[i][j] =
+ clamp((*cb_grain_block)[i][j] +
+ round_power_of_two(wsum_cb, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+ if (num_cr_points || chroma_scaling_from_luma) {
+ (*cr_grain_block)[i][j] =
+ clamp((*cr_grain_block)[i][j] +
+ round_power_of_two(wsum_cr, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+ }
+ }
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h
new file mode 100644
index 000000000000..31a8b7920c31
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ROCKCHIP_AV1_FILMGRAIN_H_
+#define _ROCKCHIP_AV1_FILMGRAIN_H_
+
+#include <linux/types.h>
+
+void rockchip_av1_generate_luma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 bitdepth,
+ u8 num_y_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_y)[24],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u16 random_seed);
+
+void rockchip_av1_generate_chroma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 (*cb_grain_block)[38][44],
+ s32 (*cr_grain_block)[38][44],
+ s32 bitdepth,
+ u8 num_y_points,
+ u8 num_cb_points,
+ u8 num_cr_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_cb)[25],
+ s32 (*ar_coeffs_cr)[25],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u8 chroma_scaling_from_luma,
+ u16 random_seed);
+
+#endif
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
new file mode 100644
index 000000000000..cc4483857489
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -0,0 +1,2232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "rockchip_vpu981_regs.h"
+
+#define AV1_DEC_MODE 17
+#define GM_GLOBAL_MODELS_PER_FRAME 7
+#define GLOBAL_MODEL_TOTAL_SIZE (6 * 4 + 4 * 2)
+#define GLOBAL_MODEL_SIZE ALIGN(GM_GLOBAL_MODELS_PER_FRAME * GLOBAL_MODEL_TOTAL_SIZE, 2048)
+#define AV1_MAX_TILES 128
+#define AV1_TILE_INFO_SIZE (AV1_MAX_TILES * 16)
+#define AV1DEC_MAX_PIC_BUFFERS 24
+#define AV1_REF_SCALE_SHIFT 14
+#define AV1_INVALID_IDX -1
+#define MAX_FRAME_DISTANCE 31
+#define AV1_PRIMARY_REF_NONE 7
+#define AV1_TILE_SIZE ALIGN(32 * 128, 4096)
+/*
+ * These 3 values aren't defined enum v4l2_av1_segment_feature because
+ * they are not part of the specification
+ */
+#define V4L2_AV1_SEG_LVL_ALT_LF_Y_H 2
+#define V4L2_AV1_SEG_LVL_ALT_LF_U 3
+#define V4L2_AV1_SEG_LVL_ALT_LF_V 4
+
+#define SUPERRES_SCALE_BITS 3
+#define SCALE_NUMERATOR 8
+#define SUPERRES_SCALE_DENOMINATOR_MIN (SCALE_NUMERATOR + 1)
+
+#define RS_SUBPEL_BITS 6
+#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1)
+#define RS_SCALE_SUBPEL_BITS 14
+#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1)
+#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS)
+#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1))
+
+#define IS_INTRA(type) ((type == V4L2_AV1_KEY_FRAME) || (type == V4L2_AV1_INTRA_ONLY_FRAME))
+
+#define LST_BUF_IDX (V4L2_AV1_REF_LAST_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LST2_BUF_IDX (V4L2_AV1_REF_LAST2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LST3_BUF_IDX (V4L2_AV1_REF_LAST3_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define GLD_BUF_IDX (V4L2_AV1_REF_GOLDEN_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define BWD_BUF_IDX (V4L2_AV1_REF_BWDREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT2_BUF_IDX (V4L2_AV1_REF_ALTREF2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT_BUF_IDX (V4L2_AV1_REF_ALTREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+
+#define DIV_LUT_PREC_BITS 14
+#define DIV_LUT_BITS 8
+#define DIV_LUT_NUM BIT(DIV_LUT_BITS)
+#define WARP_PARAM_REDUCE_BITS 6
+#define WARPEDMODEL_PREC_BITS 16
+
+#define AV1_DIV_ROUND_UP_POW2(value, n) \
+({ \
+ typeof(n) _n = n; \
+ typeof(value) _value = value; \
+ (_value + (BIT(_n) >> 1)) >> _n; \
+})
+
+#define AV1_DIV_ROUND_UP_POW2_SIGNED(value, n) \
+({ \
+ typeof(n) _n_ = n; \
+ typeof(value) _value_ = value; \
+ (((_value_) < 0) ? -AV1_DIV_ROUND_UP_POW2(-(_value_), (_n_)) \
+ : AV1_DIV_ROUND_UP_POW2((_value_), (_n_))); \
+})
+
+struct rockchip_av1_film_grain {
+ u8 scaling_lut_y[256];
+ u8 scaling_lut_cb[256];
+ u8 scaling_lut_cr[256];
+ s16 cropped_luma_grain_block[4096];
+ s16 cropped_chroma_grain_block[1024 * 2];
+};
+
+static const short div_lut[DIV_LUT_NUM + 1] = {
+ 16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
+ 15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
+ 15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
+ 14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
+ 13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
+ 13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
+ 13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
+ 12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
+ 12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
+ 11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
+ 11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
+ 11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
+ 10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
+ 10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
+ 10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
+ 9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
+ 9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
+ 9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
+ 9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
+ 9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
+ 8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
+ 8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
+ 8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
+ 8240, 8224, 8208, 8192,
+};
+
+static int rockchip_vpu981_get_frame_index(struct hantro_ctx *ctx, int ref)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ u64 timestamp;
+ int i, idx = frame->ref_frame_idx[ref];
+
+ if (idx >= V4L2_AV1_TOTAL_REFS_PER_FRAME || idx < 0)
+ return AV1_INVALID_IDX;
+
+ timestamp = frame->reference_frame_ts[idx];
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ if (!av1_dec->frame_refs[i].used)
+ continue;
+ if (av1_dec->frame_refs[i].timestamp == timestamp)
+ return i;
+ }
+
+ return AV1_INVALID_IDX;
+}
+
+static int rockchip_vpu981_get_order_hint(struct hantro_ctx *ctx, int ref)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ int idx = rockchip_vpu981_get_frame_index(ctx, ref);
+
+ if (idx != AV1_INVALID_IDX)
+ return av1_dec->frame_refs[idx].order_hint;
+
+ return 0;
+}
+
+static int rockchip_vpu981_av1_dec_frame_ref(struct hantro_ctx *ctx,
+ u64 timestamp)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ int i;
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ int j;
+
+ if (av1_dec->frame_refs[i].used)
+ continue;
+
+ av1_dec->frame_refs[i].width = frame->frame_width_minus_1 + 1;
+ av1_dec->frame_refs[i].height = frame->frame_height_minus_1 + 1;
+ av1_dec->frame_refs[i].mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ av1_dec->frame_refs[i].mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ av1_dec->frame_refs[i].timestamp = timestamp;
+ av1_dec->frame_refs[i].frame_type = frame->frame_type;
+ av1_dec->frame_refs[i].order_hint = frame->order_hint;
+ if (!av1_dec->frame_refs[i].vb2_ref)
+ av1_dec->frame_refs[i].vb2_ref = hantro_get_dst_buf(ctx);
+
+ for (j = 0; j < V4L2_AV1_TOTAL_REFS_PER_FRAME; j++)
+ av1_dec->frame_refs[i].order_hints[j] = frame->order_hints[j];
+ av1_dec->frame_refs[i].used = true;
+ av1_dec->current_frame_index = i;
+
+ return i;
+ }
+
+ return AV1_INVALID_IDX;
+}
+
+static void rockchip_vpu981_av1_dec_frame_unref(struct hantro_ctx *ctx, int idx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (idx >= 0)
+ av1_dec->frame_refs[idx].used = false;
+}
+
+static void rockchip_vpu981_av1_dec_clean_refs(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ int ref, idx;
+
+ for (idx = 0; idx < AV1_MAX_FRAME_BUF_COUNT; idx++) {
+ u64 timestamp = av1_dec->frame_refs[idx].timestamp;
+ bool used = false;
+
+ if (!av1_dec->frame_refs[idx].used)
+ continue;
+
+ for (ref = 0; ref < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref++) {
+ if (ctrls->frame->reference_frame_ts[ref] == timestamp)
+ used = true;
+ }
+
+ if (!used)
+ rockchip_vpu981_av1_dec_frame_unref(ctx, idx);
+ }
+}
+
+static size_t rockchip_vpu981_av1_dec_luma_size(struct hantro_ctx *ctx)
+{
+ return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+}
+
+static size_t rockchip_vpu981_av1_dec_chroma_size(struct hantro_ctx *ctx)
+{
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+
+ return ALIGN((cr_offset * 3) / 2, 64);
+}
+
+static void rockchip_vpu981_av1_dec_tiles_free(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (av1_dec->db_data_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->db_data_col.size,
+ av1_dec->db_data_col.cpu,
+ av1_dec->db_data_col.dma);
+ av1_dec->db_data_col.cpu = NULL;
+
+ if (av1_dec->db_ctrl_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->db_ctrl_col.size,
+ av1_dec->db_ctrl_col.cpu,
+ av1_dec->db_ctrl_col.dma);
+ av1_dec->db_ctrl_col.cpu = NULL;
+
+ if (av1_dec->cdef_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->cdef_col.size,
+ av1_dec->cdef_col.cpu, av1_dec->cdef_col.dma);
+ av1_dec->cdef_col.cpu = NULL;
+
+ if (av1_dec->sr_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->sr_col.size,
+ av1_dec->sr_col.cpu, av1_dec->sr_col.dma);
+ av1_dec->sr_col.cpu = NULL;
+
+ if (av1_dec->lr_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->lr_col.size,
+ av1_dec->lr_col.cpu, av1_dec->lr_col.dma);
+ av1_dec->lr_col.cpu = NULL;
+}
+
+static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col;
+ unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64);
+ unsigned int height_in_sb = height / 64;
+ unsigned int stripe_num = ((height + 8) + 63) / 64;
+ size_t size;
+
+ if (av1_dec->db_data_col.size >=
+ ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols)
+ return 0;
+
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+
+ size = ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols;
+ av1_dec->db_data_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->db_data_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->db_data_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->db_data_col.size = size;
+
+ size = ALIGN(height * 2 * 16 / 4, 128) * num_tile_cols;
+ av1_dec->db_ctrl_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->db_ctrl_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->db_ctrl_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->db_ctrl_col.size = size;
+
+ size = ALIGN(height_in_sb * 44 * ctx->bit_depth * 16 / 8, 128) * num_tile_cols;
+ av1_dec->cdef_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->cdef_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->cdef_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->cdef_col.size = size;
+
+ size = ALIGN(height_in_sb * (3040 + 1280), 128) * num_tile_cols;
+ av1_dec->sr_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->sr_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->sr_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->sr_col.size = size;
+
+ size = ALIGN(stripe_num * 1536 * ctx->bit_depth / 8, 128) * num_tile_cols;
+ av1_dec->lr_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->lr_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->lr_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->lr_col.size = size;
+
+ av1_dec->num_tile_cols_allocated = num_tile_cols;
+ return 0;
+
+buffer_allocation_error:
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+ return -ENOMEM;
+}
+
+void rockchip_vpu981_av1_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (av1_dec->global_model.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->global_model.size,
+ av1_dec->global_model.cpu,
+ av1_dec->global_model.dma);
+ av1_dec->global_model.cpu = NULL;
+
+ if (av1_dec->tile_info.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->tile_info.size,
+ av1_dec->tile_info.cpu,
+ av1_dec->tile_info.dma);
+ av1_dec->tile_info.cpu = NULL;
+
+ if (av1_dec->film_grain.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->film_grain.size,
+ av1_dec->film_grain.cpu,
+ av1_dec->film_grain.dma);
+ av1_dec->film_grain.cpu = NULL;
+
+ if (av1_dec->prob_tbl.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->prob_tbl.size,
+ av1_dec->prob_tbl.cpu, av1_dec->prob_tbl.dma);
+ av1_dec->prob_tbl.cpu = NULL;
+
+ if (av1_dec->prob_tbl_out.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->prob_tbl_out.size,
+ av1_dec->prob_tbl_out.cpu,
+ av1_dec->prob_tbl_out.dma);
+ av1_dec->prob_tbl_out.cpu = NULL;
+
+ if (av1_dec->tile_buf.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->tile_buf.size,
+ av1_dec->tile_buf.cpu, av1_dec->tile_buf.dma);
+ av1_dec->tile_buf.cpu = NULL;
+
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+}
+
+int rockchip_vpu981_av1_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ memset(av1_dec, 0, sizeof(*av1_dec));
+
+ av1_dec->global_model.cpu = dma_alloc_coherent(vpu->dev, GLOBAL_MODEL_SIZE,
+ &av1_dec->global_model.dma,
+ GFP_KERNEL);
+ if (!av1_dec->global_model.cpu)
+ return -ENOMEM;
+ av1_dec->global_model.size = GLOBAL_MODEL_SIZE;
+
+ av1_dec->tile_info.cpu = dma_alloc_coherent(vpu->dev, AV1_MAX_TILES,
+ &av1_dec->tile_info.dma,
+ GFP_KERNEL);
+ if (!av1_dec->tile_info.cpu)
+ return -ENOMEM;
+ av1_dec->tile_info.size = AV1_MAX_TILES;
+
+ av1_dec->film_grain.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct rockchip_av1_film_grain), 2048),
+ &av1_dec->film_grain.dma,
+ GFP_KERNEL);
+ if (!av1_dec->film_grain.cpu)
+ return -ENOMEM;
+ av1_dec->film_grain.size = ALIGN(sizeof(struct rockchip_av1_film_grain), 2048);
+
+ av1_dec->prob_tbl.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct av1cdfs), 2048),
+ &av1_dec->prob_tbl.dma,
+ GFP_KERNEL);
+ if (!av1_dec->prob_tbl.cpu)
+ return -ENOMEM;
+ av1_dec->prob_tbl.size = ALIGN(sizeof(struct av1cdfs), 2048);
+
+ av1_dec->prob_tbl_out.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct av1cdfs), 2048),
+ &av1_dec->prob_tbl_out.dma,
+ GFP_KERNEL);
+ if (!av1_dec->prob_tbl_out.cpu)
+ return -ENOMEM;
+ av1_dec->prob_tbl_out.size = ALIGN(sizeof(struct av1cdfs), 2048);
+ av1_dec->cdfs = &av1_dec->default_cdfs;
+ av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
+
+ rockchip_av1_set_default_cdfs(av1_dec->cdfs, av1_dec->cdfs_ndvc);
+
+ av1_dec->tile_buf.cpu = dma_alloc_coherent(vpu->dev,
+ AV1_TILE_SIZE,
+ &av1_dec->tile_buf.dma,
+ GFP_KERNEL);
+ if (!av1_dec->tile_buf.cpu)
+ return -ENOMEM;
+ av1_dec->tile_buf.size = AV1_TILE_SIZE;
+
+ return 0;
+}
+
+static int rockchip_vpu981_av1_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ ctrls->sequence = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_SEQUENCE);
+ if (WARN_ON(!ctrls->sequence))
+ return -EINVAL;
+
+ ctrls->tile_group_entry =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY);
+ if (WARN_ON(!ctrls->tile_group_entry))
+ return -EINVAL;
+
+ ctrls->frame = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FRAME);
+ if (WARN_ON(!ctrls->frame))
+ return -EINVAL;
+
+ ctrls->film_grain =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FILM_GRAIN);
+
+ return rockchip_vpu981_av1_dec_tiles_reallocate(ctx);
+}
+
+static inline int rockchip_vpu981_av1_dec_get_msb(u32 n)
+{
+ if (n == 0)
+ return 0;
+ return 31 ^ __builtin_clz(n);
+}
+
+static short rockchip_vpu981_av1_dec_resolve_divisor_32(u32 d, short *shift)
+{
+ int f;
+ u64 e;
+
+ *shift = rockchip_vpu981_av1_dec_get_msb(d);
+ /* e is obtained from D after resetting the most significant 1 bit. */
+ e = d - ((u32)1 << *shift);
+ /* Get the most significant DIV_LUT_BITS (8) bits of e into f */
+ if (*shift > DIV_LUT_BITS)
+ f = AV1_DIV_ROUND_UP_POW2(e, *shift - DIV_LUT_BITS);
+ else
+ f = e << (DIV_LUT_BITS - *shift);
+ if (f > DIV_LUT_NUM)
+ return -1;
+ *shift += DIV_LUT_PREC_BITS;
+ /* Use f as lookup into the precomputed table of multipliers */
+ return div_lut[f];
+}
+
+static void
+rockchip_vpu981_av1_dec_get_shear_params(const u32 *params, s64 *alpha,
+ s64 *beta, s64 *gamma, s64 *delta)
+{
+ const int *mat = params;
+ short shift;
+ short y;
+ long long gv, dv;
+
+ if (mat[2] <= 0)
+ return;
+
+ *alpha = clamp_val(mat[2] - (1 << WARPEDMODEL_PREC_BITS), S16_MIN, S16_MAX);
+ *beta = clamp_val(mat[3], S16_MIN, S16_MAX);
+
+ y = rockchip_vpu981_av1_dec_resolve_divisor_32(abs(mat[2]), &shift) * (mat[2] < 0 ? -1 : 1);
+
+ gv = ((long long)mat[4] * (1 << WARPEDMODEL_PREC_BITS)) * y;
+
+ *gamma = clamp_val((int)AV1_DIV_ROUND_UP_POW2_SIGNED(gv, shift), S16_MIN, S16_MAX);
+
+ dv = ((long long)mat[3] * mat[4]) * y;
+ *delta = clamp_val(mat[5] -
+ (int)AV1_DIV_ROUND_UP_POW2_SIGNED(dv, shift) - (1 << WARPEDMODEL_PREC_BITS),
+ S16_MIN, S16_MAX);
+
+ *alpha = AV1_DIV_ROUND_UP_POW2_SIGNED(*alpha, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *beta = AV1_DIV_ROUND_UP_POW2_SIGNED(*beta, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *gamma = AV1_DIV_ROUND_UP_POW2_SIGNED(*gamma, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *delta = AV1_DIV_ROUND_UP_POW2_SIGNED(*delta, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+}
+
+static void rockchip_vpu981_av1_dec_set_global_model(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_global_motion *gm = &frame->global_motion;
+ u8 *dst = av1_dec->global_model.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ int ref_frame, i;
+
+ memset(dst, 0, GLOBAL_MODEL_SIZE);
+ for (ref_frame = 0; ref_frame < V4L2_AV1_REFS_PER_FRAME; ++ref_frame) {
+ s64 alpha = 0, beta = 0, gamma = 0, delta = 0;
+
+ for (i = 0; i < 6; ++i) {
+ if (i == 2)
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][3];
+ else if (i == 3)
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][2];
+ else
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][i];
+ dst += 4;
+ }
+
+ if (gm->type[V4L2_AV1_REF_LAST_FRAME + ref_frame] <= V4L2_AV1_WARP_MODEL_AFFINE)
+ rockchip_vpu981_av1_dec_get_shear_params(&gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][0],
+ &alpha, &beta, &gamma, &delta);
+
+ *(s16 *)dst = alpha;
+ dst += 2;
+ *(s16 *)dst = beta;
+ dst += 2;
+ *(s16 *)dst = gamma;
+ dst += 2;
+ *(s16 *)dst = delta;
+ dst += 2;
+ }
+
+ hantro_write_addr(vpu, AV1_GLOBAL_MODEL, av1_dec->global_model.dma);
+}
+
+static int rockchip_vpu981_av1_tile_log2(int target)
+{
+ int k;
+
+ /*
+ * returns the smallest value for k such that 1 << k is greater
+ * than or equal to target
+ */
+ for (k = 0; (1 << k) < target; k++);
+
+ return k;
+}
+
+static void rockchip_vpu981_av1_dec_set_tile_info(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info;
+ const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
+ ctrls->tile_group_entry;
+ int context_update_y =
+ tile_info->context_update_tile_id / tile_info->tile_cols;
+ int context_update_x =
+ tile_info->context_update_tile_id % tile_info->tile_cols;
+ int context_update_tile_id =
+ context_update_x * tile_info->tile_rows + context_update_y;
+ u8 *dst = av1_dec->tile_info.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ int tile0, tile1;
+
+ memset(dst, 0, av1_dec->tile_info.size);
+
+ for (tile0 = 0; tile0 < tile_info->tile_cols; tile0++) {
+ for (tile1 = 0; tile1 < tile_info->tile_rows; tile1++) {
+ int tile_id = tile1 * tile_info->tile_cols + tile0;
+ u32 start, end;
+ u32 y0 =
+ tile_info->height_in_sbs_minus_1[tile1] + 1;
+ u32 x0 = tile_info->width_in_sbs_minus_1[tile0] + 1;
+
+ /* tile size in SB units (width,height) */
+ *dst++ = x0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = y0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = 0;
+
+ /* tile start position */
+ start = group_entry[tile_id].tile_offset - group_entry[0].tile_offset;
+ *dst++ = start & 255;
+ *dst++ = (start >> 8) & 255;
+ *dst++ = (start >> 16) & 255;
+ *dst++ = (start >> 24) & 255;
+
+ /* number of bytes in tile data */
+ end = start + group_entry[tile_id].tile_size;
+ *dst++ = end & 255;
+ *dst++ = (end >> 8) & 255;
+ *dst++ = (end >> 16) & 255;
+ *dst++ = (end >> 24) & 255;
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_multicore_expect_context_update, !!(context_update_x == 0));
+ hantro_reg_write(vpu, &av1_tile_enable,
+ !!((tile_info->tile_cols > 1) || (tile_info->tile_rows > 1)));
+ hantro_reg_write(vpu, &av1_num_tile_cols_8k, tile_info->tile_cols);
+ hantro_reg_write(vpu, &av1_num_tile_rows_8k, tile_info->tile_rows);
+ hantro_reg_write(vpu, &av1_context_update_tile_id, context_update_tile_id);
+ hantro_reg_write(vpu, &av1_tile_transpose, 1);
+ if (rockchip_vpu981_av1_tile_log2(tile_info->tile_cols) ||
+ rockchip_vpu981_av1_tile_log2(tile_info->tile_rows))
+ hantro_reg_write(vpu, &av1_dec_tile_size_mag, tile_info->tile_size_bytes - 1);
+ else
+ hantro_reg_write(vpu, &av1_dec_tile_size_mag, 3);
+
+ hantro_write_addr(vpu, AV1_TILE_BASE, av1_dec->tile_info.dma);
+}
+
+static int rockchip_vpu981_av1_dec_get_dist(struct hantro_ctx *ctx,
+ int a, int b)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ int bits = ctrls->sequence->order_hint_bits - 1;
+ int diff, m;
+
+ if (!ctrls->sequence->order_hint_bits)
+ return 0;
+
+ diff = a - b;
+ m = 1 << bits;
+ diff = (diff & (m - 1)) - (diff & m);
+
+ return diff;
+}
+
+static void rockchip_vpu981_av1_dec_set_frame_sign_bias(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_ctrl_av1_sequence *sequence = ctrls->sequence;
+ int i;
+
+ if (!sequence->order_hint_bits || IS_INTRA(frame->frame_type)) {
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++)
+ av1_dec->ref_frame_sign_bias[i] = 0;
+
+ return;
+ }
+ // Identify the nearest forward and backward references.
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; i++) {
+ if (rockchip_vpu981_get_frame_index(ctx, i) >= 0) {
+ int rel_off =
+ rockchip_vpu981_av1_dec_get_dist(ctx,
+ rockchip_vpu981_get_order_hint(ctx, i),
+ frame->order_hint);
+ av1_dec->ref_frame_sign_bias[i + 1] = (rel_off <= 0) ? 0 : 1;
+ }
+ }
+}
+
+static bool
+rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
+ int width, int height)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_decoded_buffer *dst;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+ int cur_width = frame->frame_width_minus_1 + 1;
+ int cur_height = frame->frame_height_minus_1 + 1;
+ int scale_width =
+ ((width << AV1_REF_SCALE_SHIFT) + cur_width / 2) / cur_width;
+ int scale_height =
+ ((height << AV1_REF_SCALE_SHIFT) + cur_height / 2) / cur_height;
+
+ switch (ref) {
+ case 0:
+ hantro_reg_write(vpu, &av1_ref0_height, height);
+ hantro_reg_write(vpu, &av1_ref0_width, width);
+ hantro_reg_write(vpu, &av1_ref0_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref0_hor_scale, scale_height);
+ break;
+ case 1:
+ hantro_reg_write(vpu, &av1_ref1_height, height);
+ hantro_reg_write(vpu, &av1_ref1_width, width);
+ hantro_reg_write(vpu, &av1_ref1_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref1_hor_scale, scale_height);
+ break;
+ case 2:
+ hantro_reg_write(vpu, &av1_ref2_height, height);
+ hantro_reg_write(vpu, &av1_ref2_width, width);
+ hantro_reg_write(vpu, &av1_ref2_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref2_hor_scale, scale_height);
+ break;
+ case 3:
+ hantro_reg_write(vpu, &av1_ref3_height, height);
+ hantro_reg_write(vpu, &av1_ref3_width, width);
+ hantro_reg_write(vpu, &av1_ref3_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref3_hor_scale, scale_height);
+ break;
+ case 4:
+ hantro_reg_write(vpu, &av1_ref4_height, height);
+ hantro_reg_write(vpu, &av1_ref4_width, width);
+ hantro_reg_write(vpu, &av1_ref4_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref4_hor_scale, scale_height);
+ break;
+ case 5:
+ hantro_reg_write(vpu, &av1_ref5_height, height);
+ hantro_reg_write(vpu, &av1_ref5_width, width);
+ hantro_reg_write(vpu, &av1_ref5_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref5_hor_scale, scale_height);
+ break;
+ case 6:
+ hantro_reg_write(vpu, &av1_ref6_height, height);
+ hantro_reg_write(vpu, &av1_ref6_width, width);
+ hantro_reg_write(vpu, &av1_ref6_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref6_hor_scale, scale_height);
+ break;
+ default:
+ pr_warn("AV1 invalid reference frame index\n");
+ }
+
+ dst = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_REFERENCE_Y(ref), luma_addr);
+ hantro_write_addr(vpu, AV1_REFERENCE_CB(ref), chroma_addr);
+ hantro_write_addr(vpu, AV1_REFERENCE_MV(ref), mv_addr);
+
+ return (scale_width != (1 << AV1_REF_SCALE_SHIFT)) ||
+ (scale_height != (1 << AV1_REF_SCALE_SHIFT));
+}
+
+static void rockchip_vpu981_av1_dec_set_sign_bias(struct hantro_ctx *ctx,
+ int ref, int val)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ switch (ref) {
+ case 0:
+ hantro_reg_write(vpu, &av1_ref0_sign_bias, val);
+ break;
+ case 1:
+ hantro_reg_write(vpu, &av1_ref1_sign_bias, val);
+ break;
+ case 2:
+ hantro_reg_write(vpu, &av1_ref2_sign_bias, val);
+ break;
+ case 3:
+ hantro_reg_write(vpu, &av1_ref3_sign_bias, val);
+ break;
+ case 4:
+ hantro_reg_write(vpu, &av1_ref4_sign_bias, val);
+ break;
+ case 5:
+ hantro_reg_write(vpu, &av1_ref5_sign_bias, val);
+ break;
+ case 6:
+ hantro_reg_write(vpu, &av1_ref6_sign_bias, val);
+ break;
+ default:
+ pr_warn("AV1 invalid sign bias index\n");
+ break;
+ }
+}
+
+static void rockchip_vpu981_av1_dec_set_segmentation(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_segmentation *seg = &frame->segmentation;
+ u32 segval[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX] = { 0 };
+ struct hantro_dev *vpu = ctx->dev;
+ u8 segsign = 0, preskip_segid = 0, last_active_seg = 0, i, j;
+
+ if (!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) &&
+ frame->primary_ref_frame < V4L2_AV1_REFS_PER_FRAME) {
+ int idx = rockchip_vpu981_get_frame_index(ctx, frame->primary_ref_frame);
+
+ if (idx >= 0) {
+ dma_addr_t luma_addr, mv_addr = 0;
+ struct hantro_decoded_buffer *seg;
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+
+ seg = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &seg->base.vb.vb2_buf);
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_SEGMENTATION, mv_addr);
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 1);
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_segment_temp_upd_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
+ hantro_reg_write(vpu, &av1_segment_upd_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP));
+ hantro_reg_write(vpu, &av1_segment_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED));
+
+ hantro_reg_write(vpu, &av1_error_resilient,
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE));
+
+ if (IS_INTRA(frame->frame_type) ||
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE)) {
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
+ }
+
+ if (seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) {
+ int s;
+
+ for (s = 0; s < V4L2_AV1_MAX_SEGMENTS; s++) {
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
+ segval[s][V4L2_AV1_SEG_LVL_ALT_Q] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q]),
+ 0, 255);
+ segsign |=
+ (seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q] < 0) << s;
+ }
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_V))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_H))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_U))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_U] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_U]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_V))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_V] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_V]),
+ -63, 63);
+
+ if (frame->frame_type && seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_FRAME))
+ segval[s][V4L2_AV1_SEG_LVL_REF_FRAME]++;
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_SKIP))
+ segval[s][V4L2_AV1_SEG_LVL_REF_SKIP] = 1;
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_GLOBALMV))
+ segval[s][V4L2_AV1_SEG_LVL_REF_GLOBALMV] = 1;
+ }
+ }
+
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ for (j = 0; j < V4L2_AV1_SEG_LVL_MAX; j++) {
+ if (seg->feature_enabled[i]
+ & V4L2_AV1_SEGMENT_FEATURE_ENABLED(j)) {
+ preskip_segid |= (j >= V4L2_AV1_SEG_LVL_REF_FRAME);
+ last_active_seg = max(i, last_active_seg);
+ }
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_last_active_seg, last_active_seg);
+ hantro_reg_write(vpu, &av1_preskip_segid, preskip_segid);
+
+ hantro_reg_write(vpu, &av1_seg_quant_sign, segsign);
+
+ /* Write QP, filter level, ref frame and skip for every segment */
+ hantro_reg_write(vpu, &av1_quant_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+}
+
+static bool rockchip_vpu981_av1_dec_is_lossless(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_segmentation *segmentation = &frame->segmentation;
+ const struct v4l2_av1_quantization *quantization = &frame->quantization;
+ int i;
+
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ int qindex = quantization->base_q_idx;
+
+ if (segmentation->feature_enabled[i] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
+ qindex += segmentation->feature_data[i][V4L2_AV1_SEG_LVL_ALT_Q];
+ }
+ qindex = clamp(qindex, 0, 255);
+
+ if (qindex ||
+ quantization->delta_q_y_dc ||
+ quantization->delta_q_u_dc ||
+ quantization->delta_q_u_ac ||
+ quantization->delta_q_v_dc ||
+ quantization->delta_q_v_ac)
+ return false;
+ }
+ return true;
+}
+
+static void rockchip_vpu981_av1_dec_set_loopfilter(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_loop_filter *loop_filter = &frame->loop_filter;
+ bool filtering_dis = (loop_filter->level[0] == 0) && (loop_filter->level[1] == 0);
+ struct hantro_dev *vpu = ctx->dev;
+
+ hantro_reg_write(vpu, &av1_filtering_dis, filtering_dis);
+ hantro_reg_write(vpu, &av1_filt_level_base_gt32, loop_filter->level[0] > 32);
+ hantro_reg_write(vpu, &av1_filt_sharpness, loop_filter->sharpness);
+
+ hantro_reg_write(vpu, &av1_filt_level0, loop_filter->level[0]);
+ hantro_reg_write(vpu, &av1_filt_level1, loop_filter->level[1]);
+ hantro_reg_write(vpu, &av1_filt_level2, loop_filter->level[2]);
+ hantro_reg_write(vpu, &av1_filt_level3, loop_filter->level[3]);
+
+ if (loop_filter->flags & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED &&
+ !rockchip_vpu981_av1_dec_is_lossless(ctx) &&
+ !(frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC)) {
+ hantro_reg_write(vpu, &av1_filt_ref_adj_0,
+ loop_filter->ref_deltas[0]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_1,
+ loop_filter->ref_deltas[1]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_2,
+ loop_filter->ref_deltas[2]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_3,
+ loop_filter->ref_deltas[3]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_4,
+ loop_filter->ref_deltas[4]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_5,
+ loop_filter->ref_deltas[5]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_6,
+ loop_filter->ref_deltas[6]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_7,
+ loop_filter->ref_deltas[7]);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_0,
+ loop_filter->mode_deltas[0]);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_1,
+ loop_filter->mode_deltas[1]);
+ } else {
+ hantro_reg_write(vpu, &av1_filt_ref_adj_0, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_1, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_2, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_3, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_4, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_5, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_6, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_7, 0);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_0, 0);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_1, 0);
+ }
+
+ hantro_write_addr(vpu, AV1_DB_DATA_COL, av1_dec->db_data_col.dma);
+ hantro_write_addr(vpu, AV1_DB_CTRL_COL, av1_dec->db_ctrl_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_update_prob(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ bool frame_is_intra = IS_INTRA(frame->frame_type);
+ struct av1cdfs *out_cdfs = (struct av1cdfs *)av1_dec->prob_tbl_out.cpu;
+ int i;
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF)
+ return;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ if (frame->refresh_frame_flags & BIT(i)) {
+ struct mvcdfs stored_mv_cdf;
+
+ rockchip_av1_get_cdfs(ctx, i);
+ stored_mv_cdf = av1_dec->cdfs->mv_cdf;
+ *av1_dec->cdfs = *out_cdfs;
+ if (frame_is_intra) {
+ av1_dec->cdfs->mv_cdf = stored_mv_cdf;
+ *av1_dec->cdfs_ndvc = out_cdfs->mv_cdf;
+ }
+ rockchip_av1_store_cdfs(ctx,
+ frame->refresh_frame_flags);
+ break;
+ }
+ }
+}
+
+void rockchip_vpu981_av1_dec_done(struct hantro_ctx *ctx)
+{
+ rockchip_vpu981_av1_dec_update_prob(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_prob(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_quantization *quantization = &frame->quantization;
+ struct hantro_dev *vpu = ctx->dev;
+ bool error_resilient_mode =
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE);
+ bool frame_is_intra = IS_INTRA(frame->frame_type);
+
+ if (error_resilient_mode || frame_is_intra ||
+ frame->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ av1_dec->cdfs = &av1_dec->default_cdfs;
+ av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
+ rockchip_av1_default_coeff_probs(quantization->base_q_idx,
+ av1_dec->cdfs);
+ } else {
+ rockchip_av1_get_cdfs(ctx, frame->ref_frame_idx[frame->primary_ref_frame]);
+ }
+ rockchip_av1_store_cdfs(ctx, frame->refresh_frame_flags);
+
+ memcpy(av1_dec->prob_tbl.cpu, av1_dec->cdfs, sizeof(struct av1cdfs));
+
+ if (frame_is_intra) {
+ int mv_offset = offsetof(struct av1cdfs, mv_cdf);
+ /* Overwrite MV context area with intrabc MV context */
+ memcpy(av1_dec->prob_tbl.cpu + mv_offset, av1_dec->cdfs_ndvc,
+ sizeof(struct mvcdfs));
+ }
+
+ hantro_write_addr(vpu, AV1_PROP_TABLE_OUT, av1_dec->prob_tbl_out.dma);
+ hantro_write_addr(vpu, AV1_PROP_TABLE, av1_dec->prob_tbl.dma);
+}
+
+static void
+rockchip_vpu981_av1_dec_init_scaling_function(const u8 *values, const u8 *scaling,
+ u8 num_points, u8 *scaling_lut)
+{
+ int i, point;
+
+ if (num_points == 0) {
+ memset(scaling_lut, 0, 256);
+ return;
+ }
+
+ for (point = 0; point < num_points - 1; point++) {
+ int x;
+ s32 delta_y = scaling[point + 1] - scaling[point];
+ s32 delta_x = values[point + 1] - values[point];
+ s64 delta =
+ delta_x ? delta_y * ((65536 + (delta_x >> 1)) /
+ delta_x) : 0;
+
+ for (x = 0; x < delta_x; x++) {
+ scaling_lut[values[point] + x] =
+ scaling[point] +
+ (s32)((x * delta + 32768) >> 16);
+ }
+ }
+
+ for (i = values[num_points - 1]; i < 256; i++)
+ scaling_lut[i] = scaling[num_points - 1];
+}
+
+static void rockchip_vpu981_av1_dec_set_fgs(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_film_grain *film_grain = ctrls->film_grain;
+ struct rockchip_av1_film_grain *fgmem = av1_dec->film_grain.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ bool scaling_from_luma =
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA);
+ s32 (*ar_coeffs_y)[24];
+ s32 (*ar_coeffs_cb)[25];
+ s32 (*ar_coeffs_cr)[25];
+ s32 (*luma_grain_block)[73][82];
+ s32 (*cb_grain_block)[38][44];
+ s32 (*cr_grain_block)[38][44];
+ s32 ar_coeff_lag, ar_coeff_shift;
+ s32 grain_scale_shift, bitdepth;
+ s32 grain_center, grain_min, grain_max;
+ int i, j;
+
+ hantro_reg_write(vpu, &av1_apply_grain, 0);
+
+ if (!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) {
+ hantro_reg_write(vpu, &av1_num_y_points_b, 0);
+ hantro_reg_write(vpu, &av1_num_cb_points_b, 0);
+ hantro_reg_write(vpu, &av1_num_cr_points_b, 0);
+ hantro_reg_write(vpu, &av1_scaling_shift, 0);
+ hantro_reg_write(vpu, &av1_cb_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_offset, 0);
+ hantro_reg_write(vpu, &av1_cr_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_offset, 0);
+ hantro_reg_write(vpu, &av1_overlap_flag, 0);
+ hantro_reg_write(vpu, &av1_clip_to_restricted_range, 0);
+ hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, 0);
+ hantro_reg_write(vpu, &av1_random_seed, 0);
+ hantro_write_addr(vpu, AV1_FILM_GRAIN, 0);
+ return;
+ }
+
+ ar_coeffs_y = kzalloc(sizeof(int32_t) * 24, GFP_KERNEL);
+ ar_coeffs_cb = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
+ ar_coeffs_cr = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
+ luma_grain_block = kzalloc(sizeof(int32_t) * 73 * 82, GFP_KERNEL);
+ cb_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
+ cr_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
+
+ if (!ar_coeffs_y || !ar_coeffs_cb || !ar_coeffs_cr ||
+ !luma_grain_block || !cb_grain_block || !cr_grain_block) {
+ pr_warn("Fail allocating memory for film grain parameters\n");
+ goto alloc_fail;
+ }
+
+ hantro_reg_write(vpu, &av1_apply_grain, 1);
+
+ hantro_reg_write(vpu, &av1_num_y_points_b,
+ film_grain->num_y_points > 0);
+ hantro_reg_write(vpu, &av1_num_cb_points_b,
+ film_grain->num_cb_points > 0);
+ hantro_reg_write(vpu, &av1_num_cr_points_b,
+ film_grain->num_cr_points > 0);
+ hantro_reg_write(vpu, &av1_scaling_shift,
+ film_grain->grain_scaling_minus_8 + 8);
+
+ if (!scaling_from_luma) {
+ hantro_reg_write(vpu, &av1_cb_mult, film_grain->cb_mult - 128);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, film_grain->cb_luma_mult - 128);
+ hantro_reg_write(vpu, &av1_cb_offset, film_grain->cb_offset - 256);
+ hantro_reg_write(vpu, &av1_cr_mult, film_grain->cr_mult - 128);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, film_grain->cr_luma_mult - 128);
+ hantro_reg_write(vpu, &av1_cr_offset, film_grain->cr_offset - 256);
+ } else {
+ hantro_reg_write(vpu, &av1_cb_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_offset, 0);
+ hantro_reg_write(vpu, &av1_cr_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_offset, 0);
+ }
+
+ hantro_reg_write(vpu, &av1_overlap_flag,
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP));
+ hantro_reg_write(vpu, &av1_clip_to_restricted_range,
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE));
+ hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, scaling_from_luma);
+ hantro_reg_write(vpu, &av1_random_seed, film_grain->grain_seed);
+
+ rockchip_vpu981_av1_dec_init_scaling_function(film_grain->point_y_value,
+ film_grain->point_y_scaling,
+ film_grain->num_y_points,
+ fgmem->scaling_lut_y);
+
+ if (film_grain->flags &
+ V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA) {
+ memcpy(fgmem->scaling_lut_cb, fgmem->scaling_lut_y,
+ sizeof(*fgmem->scaling_lut_y) * 256);
+ memcpy(fgmem->scaling_lut_cr, fgmem->scaling_lut_y,
+ sizeof(*fgmem->scaling_lut_y) * 256);
+ } else {
+ rockchip_vpu981_av1_dec_init_scaling_function
+ (film_grain->point_cb_value, film_grain->point_cb_scaling,
+ film_grain->num_cb_points, fgmem->scaling_lut_cb);
+ rockchip_vpu981_av1_dec_init_scaling_function
+ (film_grain->point_cr_value, film_grain->point_cr_scaling,
+ film_grain->num_cr_points, fgmem->scaling_lut_cr);
+ }
+
+ for (i = 0; i < V4L2_AV1_AR_COEFFS_SIZE; i++) {
+ if (i < 24)
+ (*ar_coeffs_y)[i] = film_grain->ar_coeffs_y_plus_128[i] - 128;
+ (*ar_coeffs_cb)[i] = film_grain->ar_coeffs_cb_plus_128[i] - 128;
+ (*ar_coeffs_cr)[i] = film_grain->ar_coeffs_cr_plus_128[i] - 128;
+ }
+
+ ar_coeff_lag = film_grain->ar_coeff_lag;
+ ar_coeff_shift = film_grain->ar_coeff_shift_minus_6 + 6;
+ grain_scale_shift = film_grain->grain_scale_shift;
+ bitdepth = ctx->bit_depth;
+ grain_center = 128 << (bitdepth - 8);
+ grain_min = 0 - grain_center;
+ grain_max = (256 << (bitdepth - 8)) - 1 - grain_center;
+
+ rockchip_av1_generate_luma_grain_block(luma_grain_block, bitdepth,
+ film_grain->num_y_points, grain_scale_shift,
+ ar_coeff_lag, ar_coeffs_y, ar_coeff_shift,
+ grain_min, grain_max, film_grain->grain_seed);
+
+ rockchip_av1_generate_chroma_grain_block(luma_grain_block, cb_grain_block,
+ cr_grain_block, bitdepth,
+ film_grain->num_y_points,
+ film_grain->num_cb_points,
+ film_grain->num_cr_points,
+ grain_scale_shift, ar_coeff_lag, ar_coeffs_cb,
+ ar_coeffs_cr, ar_coeff_shift, grain_min,
+ grain_max,
+ scaling_from_luma,
+ film_grain->grain_seed);
+
+ for (i = 0; i < 64; i++) {
+ for (j = 0; j < 64; j++)
+ fgmem->cropped_luma_grain_block[i * 64 + j] =
+ (*luma_grain_block)[i + 9][j + 9];
+ }
+
+ for (i = 0; i < 32; i++) {
+ for (j = 0; j < 32; j++) {
+ fgmem->cropped_chroma_grain_block[i * 64 + 2 * j] =
+ (*cb_grain_block)[i + 6][j + 6];
+ fgmem->cropped_chroma_grain_block[i * 64 + 2 * j + 1] =
+ (*cr_grain_block)[i + 6][j + 6];
+ }
+ }
+
+ hantro_write_addr(vpu, AV1_FILM_GRAIN, av1_dec->film_grain.dma);
+
+alloc_fail:
+ kfree(ar_coeffs_y);
+ kfree(ar_coeffs_cb);
+ kfree(ar_coeffs_cr);
+ kfree(luma_grain_block);
+ kfree(cb_grain_block);
+ kfree(cr_grain_block);
+}
+
+static void rockchip_vpu981_av1_dec_set_cdef(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_cdef *cdef = &frame->cdef;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 luma_pri_strength = 0;
+ u16 luma_sec_strength = 0;
+ u32 chroma_pri_strength = 0;
+ u16 chroma_sec_strength = 0;
+ int i;
+
+ hantro_reg_write(vpu, &av1_cdef_bits, cdef->bits);
+ hantro_reg_write(vpu, &av1_cdef_damping, cdef->damping_minus_3);
+
+ for (i = 0; i < BIT(cdef->bits); i++) {
+ luma_pri_strength |= cdef->y_pri_strength[i] << (i * 4);
+ if (cdef->y_sec_strength[i] == 4)
+ luma_sec_strength |= 3 << (i * 2);
+ else
+ luma_sec_strength |= cdef->y_sec_strength[i] << (i * 2);
+
+ chroma_pri_strength |= cdef->uv_pri_strength[i] << (i * 4);
+ if (cdef->uv_sec_strength[i] == 4)
+ chroma_sec_strength |= 3 << (i * 2);
+ else
+ chroma_sec_strength |= cdef->uv_sec_strength[i] << (i * 2);
+ }
+
+ hantro_reg_write(vpu, &av1_cdef_luma_primary_strength,
+ luma_pri_strength);
+ hantro_reg_write(vpu, &av1_cdef_luma_secondary_strength,
+ luma_sec_strength);
+ hantro_reg_write(vpu, &av1_cdef_chroma_primary_strength,
+ chroma_pri_strength);
+ hantro_reg_write(vpu, &av1_cdef_chroma_secondary_strength,
+ chroma_sec_strength);
+
+ hantro_write_addr(vpu, AV1_CDEF_COL, av1_dec->cdef_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_lr(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_loop_restoration *loop_restoration =
+ &frame->loop_restoration;
+ struct hantro_dev *vpu = ctx->dev;
+ u16 lr_type = 0, lr_unit_size = 0;
+ u8 restoration_unit_size[V4L2_AV1_NUM_PLANES_MAX] = { 3, 3, 3 };
+ int i;
+
+ if (loop_restoration->flags & V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR) {
+ restoration_unit_size[0] = 1 + loop_restoration->lr_unit_shift;
+ restoration_unit_size[1] =
+ 1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
+ restoration_unit_size[2] =
+ 1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
+ }
+
+ for (i = 0; i < V4L2_AV1_NUM_PLANES_MAX; i++) {
+ lr_type |=
+ loop_restoration->frame_restoration_type[i] << (i * 2);
+ lr_unit_size |= restoration_unit_size[i] << (i * 2);
+ }
+
+ hantro_reg_write(vpu, &av1_lr_type, lr_type);
+ hantro_reg_write(vpu, &av1_lr_unit_size, lr_unit_size);
+ hantro_write_addr(vpu, AV1_LR_COL, av1_dec->lr_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_superres_params(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ u8 superres_scale_denominator = SCALE_NUMERATOR;
+ int superres_luma_step = RS_SCALE_SUBPEL_BITS;
+ int superres_chroma_step = RS_SCALE_SUBPEL_BITS;
+ int superres_luma_step_invra = RS_SCALE_SUBPEL_BITS;
+ int superres_chroma_step_invra = RS_SCALE_SUBPEL_BITS;
+ int superres_init_luma_subpel_x = 0;
+ int superres_init_chroma_subpel_x = 0;
+ int superres_is_scaled = 0;
+ int min_w = min_t(uint32_t, 16, frame->upscaled_width);
+ int upscaled_luma, downscaled_luma;
+ int downscaled_chroma, upscaled_chroma;
+ int step_luma, step_chroma;
+ int err_luma, err_chroma;
+ int initial_luma, initial_chroma;
+ int width = 0;
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
+ superres_scale_denominator = frame->superres_denom;
+
+ if (superres_scale_denominator <= SCALE_NUMERATOR)
+ goto set_regs;
+
+ width = (frame->upscaled_width * SCALE_NUMERATOR +
+ (superres_scale_denominator / 2)) / superres_scale_denominator;
+
+ if (width < min_w)
+ width = min_w;
+
+ if (width == frame->upscaled_width)
+ goto set_regs;
+
+ superres_is_scaled = 1;
+ upscaled_luma = frame->upscaled_width;
+ downscaled_luma = width;
+ downscaled_chroma = (downscaled_luma + 1) >> 1;
+ upscaled_chroma = (upscaled_luma + 1) >> 1;
+ step_luma =
+ ((downscaled_luma << RS_SCALE_SUBPEL_BITS) +
+ (upscaled_luma / 2)) / upscaled_luma;
+ step_chroma =
+ ((downscaled_chroma << RS_SCALE_SUBPEL_BITS) +
+ (upscaled_chroma / 2)) / upscaled_chroma;
+ err_luma =
+ (upscaled_luma * step_luma)
+ - (downscaled_luma << RS_SCALE_SUBPEL_BITS);
+ err_chroma =
+ (upscaled_chroma * step_chroma)
+ - (downscaled_chroma << RS_SCALE_SUBPEL_BITS);
+ initial_luma =
+ ((-((upscaled_luma - downscaled_luma) << (RS_SCALE_SUBPEL_BITS - 1))
+ + upscaled_luma / 2)
+ / upscaled_luma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_luma / 2)
+ & RS_SCALE_SUBPEL_MASK;
+ initial_chroma =
+ ((-((upscaled_chroma - downscaled_chroma) << (RS_SCALE_SUBPEL_BITS - 1))
+ + upscaled_chroma / 2)
+ / upscaled_chroma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_chroma / 2)
+ & RS_SCALE_SUBPEL_MASK;
+ superres_luma_step = step_luma;
+ superres_chroma_step = step_chroma;
+ superres_luma_step_invra =
+ ((upscaled_luma << RS_SCALE_SUBPEL_BITS) + (downscaled_luma / 2))
+ / downscaled_luma;
+ superres_chroma_step_invra =
+ ((upscaled_chroma << RS_SCALE_SUBPEL_BITS) + (downscaled_chroma / 2))
+ / downscaled_chroma;
+ superres_init_luma_subpel_x = initial_luma;
+ superres_init_chroma_subpel_x = initial_chroma;
+
+set_regs:
+ hantro_reg_write(vpu, &av1_superres_pic_width, frame->upscaled_width);
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
+ hantro_reg_write(vpu, &av1_scale_denom_minus9,
+ frame->superres_denom - SUPERRES_SCALE_DENOMINATOR_MIN);
+ else
+ hantro_reg_write(vpu, &av1_scale_denom_minus9, frame->superres_denom);
+
+ hantro_reg_write(vpu, &av1_superres_luma_step, superres_luma_step);
+ hantro_reg_write(vpu, &av1_superres_chroma_step, superres_chroma_step);
+ hantro_reg_write(vpu, &av1_superres_luma_step_invra,
+ superres_luma_step_invra);
+ hantro_reg_write(vpu, &av1_superres_chroma_step_invra,
+ superres_chroma_step_invra);
+ hantro_reg_write(vpu, &av1_superres_init_luma_subpel_x,
+ superres_init_luma_subpel_x);
+ hantro_reg_write(vpu, &av1_superres_init_chroma_subpel_x,
+ superres_init_chroma_subpel_x);
+ hantro_reg_write(vpu, &av1_superres_is_scaled, superres_is_scaled);
+
+ hantro_write_addr(vpu, AV1_SR_COL, av1_dec->sr_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_picture_dimensions(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ int pic_width_in_cbs = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ int pic_height_in_cbs = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ int pic_width_pad = ALIGN(frame->frame_width_minus_1 + 1, 8)
+ - (frame->frame_width_minus_1 + 1);
+ int pic_height_pad = ALIGN(frame->frame_height_minus_1 + 1, 8)
+ - (frame->frame_height_minus_1 + 1);
+
+ hantro_reg_write(vpu, &av1_pic_width_in_cbs, pic_width_in_cbs);
+ hantro_reg_write(vpu, &av1_pic_height_in_cbs, pic_height_in_cbs);
+ hantro_reg_write(vpu, &av1_pic_width_pad, pic_width_pad);
+ hantro_reg_write(vpu, &av1_pic_height_pad, pic_height_pad);
+
+ rockchip_vpu981_av1_dec_set_superres_params(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_other_frames(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ bool use_ref_frame_mvs =
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS);
+ int cur_frame_offset = frame->order_hint;
+ int alt_frame_offset = 0;
+ int gld_frame_offset = 0;
+ int bwd_frame_offset = 0;
+ int alt2_frame_offset = 0;
+ int refs_selected[3] = { 0, 0, 0 };
+ int cur_mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ int cur_mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ int cur_offset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
+ int cur_roffset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
+ int mf_types[3] = { 0, 0, 0 };
+ int ref_stamp = 2;
+ int ref_ind = 0;
+ int rf, idx;
+
+ alt_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT_BUF_IDX);
+ gld_frame_offset = rockchip_vpu981_get_order_hint(ctx, GLD_BUF_IDX);
+ bwd_frame_offset = rockchip_vpu981_get_order_hint(ctx, BWD_BUF_IDX);
+ alt2_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT2_BUF_IDX);
+
+ idx = rockchip_vpu981_get_frame_index(ctx, LST_BUF_IDX);
+ if (idx >= 0) {
+ int alt_frame_offset_in_lst =
+ av1_dec->frame_refs[idx].order_hints[V4L2_AV1_REF_ALTREF_FRAME];
+ bool is_lst_overlay =
+ (alt_frame_offset_in_lst == gld_frame_offset);
+
+ if (!is_lst_overlay) {
+ int lst_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int lst_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool lst_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (lst_mi_cols == cur_mi_cols &&
+ lst_mi_rows == cur_mi_rows && !lst_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_LAST_FRAME;
+ refs_selected[ref_ind++] = LST_BUF_IDX;
+ }
+ }
+ ref_stamp--;
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, BWD_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, bwd_frame_offset, cur_frame_offset) > 0) {
+ int bwd_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int bwd_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool bwd_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (bwd_mi_cols == cur_mi_cols && bwd_mi_rows == cur_mi_rows &&
+ !bwd_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_BWDREF_FRAME;
+ refs_selected[ref_ind++] = BWD_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, ALT2_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, alt2_frame_offset, cur_frame_offset) > 0) {
+ int alt2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int alt2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool alt2_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (alt2_mi_cols == cur_mi_cols && alt2_mi_rows == cur_mi_rows &&
+ !alt2_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_ALTREF2_FRAME;
+ refs_selected[ref_ind++] = ALT2_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, ALT_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, alt_frame_offset, cur_frame_offset) > 0 &&
+ ref_stamp >= 0) {
+ int alt_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int alt_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool alt_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (alt_mi_cols == cur_mi_cols && alt_mi_rows == cur_mi_rows &&
+ !alt_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_ALTREF_FRAME;
+ refs_selected[ref_ind++] = ALT_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, LST2_BUF_IDX);
+ if (idx >= 0 && ref_stamp >= 0) {
+ int lst2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int lst2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool lst2_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (lst2_mi_cols == cur_mi_cols && lst2_mi_rows == cur_mi_rows &&
+ !lst2_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_LAST2_FRAME;
+ refs_selected[ref_ind++] = LST2_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ for (rf = 0; rf < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; ++rf) {
+ idx = rockchip_vpu981_get_frame_index(ctx, rf);
+ if (idx >= 0) {
+ int rf_order_hint = rockchip_vpu981_get_order_hint(ctx, rf);
+
+ cur_offset[rf] =
+ rockchip_vpu981_av1_dec_get_dist(ctx, cur_frame_offset, rf_order_hint);
+ cur_roffset[rf] =
+ rockchip_vpu981_av1_dec_get_dist(ctx, rf_order_hint, cur_frame_offset);
+ } else {
+ cur_offset[rf] = 0;
+ cur_roffset[rf] = 0;
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_use_temporal0_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal1_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal2_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
+
+ hantro_reg_write(vpu, &av1_mf1_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 0 &&
+ cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[0]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[0]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal0_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_mf2_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 1 &&
+ cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[1]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[1]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal1_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_mf3_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 2 &&
+ cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[2]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[2]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal2_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_cur_last_offset, cur_offset[0]);
+ hantro_reg_write(vpu, &av1_cur_last2_offset, cur_offset[1]);
+ hantro_reg_write(vpu, &av1_cur_last3_offset, cur_offset[2]);
+ hantro_reg_write(vpu, &av1_cur_golden_offset, cur_offset[3]);
+ hantro_reg_write(vpu, &av1_cur_bwdref_offset, cur_offset[4]);
+ hantro_reg_write(vpu, &av1_cur_altref2_offset, cur_offset[5]);
+ hantro_reg_write(vpu, &av1_cur_altref_offset, cur_offset[6]);
+
+ hantro_reg_write(vpu, &av1_cur_last_roffset, cur_roffset[0]);
+ hantro_reg_write(vpu, &av1_cur_last2_roffset, cur_roffset[1]);
+ hantro_reg_write(vpu, &av1_cur_last3_roffset, cur_roffset[2]);
+ hantro_reg_write(vpu, &av1_cur_golden_roffset, cur_roffset[3]);
+ hantro_reg_write(vpu, &av1_cur_bwdref_roffset, cur_roffset[4]);
+ hantro_reg_write(vpu, &av1_cur_altref2_roffset, cur_roffset[5]);
+ hantro_reg_write(vpu, &av1_cur_altref_roffset, cur_roffset[6]);
+
+ hantro_reg_write(vpu, &av1_mf1_type, mf_types[0] - V4L2_AV1_REF_LAST_FRAME);
+ hantro_reg_write(vpu, &av1_mf2_type, mf_types[1] - V4L2_AV1_REF_LAST_FRAME);
+ hantro_reg_write(vpu, &av1_mf3_type, mf_types[2] - V4L2_AV1_REF_LAST_FRAME);
+}
+
+static void rockchip_vpu981_av1_dec_set_reference_frames(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ int frame_type = frame->frame_type;
+ bool allow_intrabc = !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC);
+ int ref_count[AV1DEC_MAX_PIC_BUFFERS] = { 0 };
+ struct hantro_dev *vpu = ctx->dev;
+ int i, ref_frames = 0;
+ bool scale_enable = false;
+
+ if (IS_INTRA(frame_type) && !allow_intrabc)
+ return;
+
+ if (!allow_intrabc) {
+ for (i = 0; i < V4L2_AV1_REFS_PER_FRAME; i++) {
+ int idx = rockchip_vpu981_get_frame_index(ctx, i);
+
+ if (idx >= 0)
+ ref_count[idx]++;
+ }
+
+ for (i = 0; i < AV1DEC_MAX_PIC_BUFFERS; i++) {
+ if (ref_count[i])
+ ref_frames++;
+ }
+ } else {
+ ref_frames = 1;
+ }
+ hantro_reg_write(vpu, &av1_ref_frames, ref_frames);
+
+ rockchip_vpu981_av1_dec_set_frame_sign_bias(ctx);
+
+ for (i = V4L2_AV1_REF_LAST_FRAME; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++) {
+ u32 ref = i - 1;
+ int idx = 0;
+ int width, height;
+
+ if (allow_intrabc) {
+ idx = av1_dec->current_frame_index;
+ width = frame->frame_width_minus_1 + 1;
+ height = frame->frame_height_minus_1 + 1;
+ } else {
+ if (rockchip_vpu981_get_frame_index(ctx, ref) > 0)
+ idx = rockchip_vpu981_get_frame_index(ctx, ref);
+ width = av1_dec->frame_refs[idx].width;
+ height = av1_dec->frame_refs[idx].height;
+ }
+
+ scale_enable |=
+ rockchip_vpu981_av1_dec_set_ref(ctx, ref, idx, width,
+ height);
+
+ rockchip_vpu981_av1_dec_set_sign_bias(ctx, ref,
+ av1_dec->ref_frame_sign_bias[i]);
+ }
+ hantro_reg_write(vpu, &av1_ref_scaling_enable, scale_enable);
+
+ hantro_reg_write(vpu, &av1_ref0_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_ref1_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_ref2_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_ref3_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_ref4_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_ref5_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_ref6_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_ALTREF_FRAME]);
+
+ rockchip_vpu981_av1_dec_set_other_frames(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_parameters(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ hantro_reg_write(vpu, &av1_skip_mode,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT));
+ hantro_reg_write(vpu, &av1_tempor_mvp_e,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS));
+ hantro_reg_write(vpu, &av1_delta_lf_res_log,
+ ctrls->frame->loop_filter.delta_lf_res);
+ hantro_reg_write(vpu, &av1_delta_lf_multi,
+ !!(ctrls->frame->loop_filter.flags
+ & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI));
+ hantro_reg_write(vpu, &av1_delta_lf_present,
+ !!(ctrls->frame->loop_filter.flags
+ & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT));
+ hantro_reg_write(vpu, &av1_disable_cdf_update,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE));
+ hantro_reg_write(vpu, &av1_allow_warp,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION));
+ hantro_reg_write(vpu, &av1_show_frame,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SHOW_FRAME));
+ hantro_reg_write(vpu, &av1_switchable_motion_mode,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE));
+ hantro_reg_write(vpu, &av1_enable_cdef,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF));
+ hantro_reg_write(vpu, &av1_allow_masked_compound,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND));
+ hantro_reg_write(vpu, &av1_allow_interintra,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND));
+ hantro_reg_write(vpu, &av1_enable_intra_edge_filter,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER));
+ hantro_reg_write(vpu, &av1_allow_filter_intra,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA));
+ hantro_reg_write(vpu, &av1_enable_jnt_comp,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP));
+ hantro_reg_write(vpu, &av1_enable_dual_filter,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER));
+ hantro_reg_write(vpu, &av1_reduced_tx_set_used,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET));
+ hantro_reg_write(vpu, &av1_allow_screen_content_tools,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS));
+ hantro_reg_write(vpu, &av1_allow_intrabc,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC));
+
+ if (!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS))
+ hantro_reg_write(vpu, &av1_force_interger_mv, 0);
+ else
+ hantro_reg_write(vpu, &av1_force_interger_mv,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV));
+
+ hantro_reg_write(vpu, &av1_blackwhite_e, 0);
+ hantro_reg_write(vpu, &av1_delta_q_res_log, ctrls->frame->quantization.delta_q_res);
+ hantro_reg_write(vpu, &av1_delta_q_present,
+ !!(ctrls->frame->quantization.flags
+ & V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT));
+
+ hantro_reg_write(vpu, &av1_idr_pic_e, !ctrls->frame->frame_type);
+ hantro_reg_write(vpu, &av1_quant_base_qindex, ctrls->frame->quantization.base_q_idx);
+ hantro_reg_write(vpu, &av1_bit_depth_y_minus8, ctx->bit_depth - 8);
+ hantro_reg_write(vpu, &av1_bit_depth_c_minus8, ctx->bit_depth - 8);
+
+ hantro_reg_write(vpu, &av1_mcomp_filt_type, ctrls->frame->interpolation_filter);
+ hantro_reg_write(vpu, &av1_high_prec_mv_e,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV));
+ hantro_reg_write(vpu, &av1_comp_pred_mode,
+ (ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT) ? 2 : 0);
+ hantro_reg_write(vpu, &av1_transform_mode, (ctrls->frame->tx_mode == 1) ? 3 : 4);
+ hantro_reg_write(vpu, &av1_max_cb_size,
+ (ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK) ? 7 : 6);
+ hantro_reg_write(vpu, &av1_min_cb_size, 3);
+
+ hantro_reg_write(vpu, &av1_comp_pred_fixed_ref, 0);
+ hantro_reg_write(vpu, &av1_comp_pred_var_ref0_av1, 0);
+ hantro_reg_write(vpu, &av1_comp_pred_var_ref1_av1, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg0, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg1, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg2, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg3, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg4, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg5, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg6, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg7, 0);
+
+ hantro_reg_write(vpu, &av1_qp_delta_y_dc_av1, ctrls->frame->quantization.delta_q_y_dc);
+ hantro_reg_write(vpu, &av1_qp_delta_ch_dc_av1, ctrls->frame->quantization.delta_q_u_dc);
+ hantro_reg_write(vpu, &av1_qp_delta_ch_ac_av1, ctrls->frame->quantization.delta_q_u_ac);
+ if (ctrls->frame->quantization.flags & V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX) {
+ hantro_reg_write(vpu, &av1_qmlevel_y, ctrls->frame->quantization.qm_y);
+ hantro_reg_write(vpu, &av1_qmlevel_u, ctrls->frame->quantization.qm_u);
+ hantro_reg_write(vpu, &av1_qmlevel_v, ctrls->frame->quantization.qm_v);
+ } else {
+ hantro_reg_write(vpu, &av1_qmlevel_y, 0xff);
+ hantro_reg_write(vpu, &av1_qmlevel_u, 0xff);
+ hantro_reg_write(vpu, &av1_qmlevel_v, 0xff);
+ }
+
+ hantro_reg_write(vpu, &av1_lossless_e, rockchip_vpu981_av1_dec_is_lossless(ctx));
+ hantro_reg_write(vpu, &av1_quant_delta_v_dc, ctrls->frame->quantization.delta_q_v_dc);
+ hantro_reg_write(vpu, &av1_quant_delta_v_ac, ctrls->frame->quantization.delta_q_v_ac);
+
+ hantro_reg_write(vpu, &av1_skip_ref0,
+ (ctrls->frame->skip_mode_frame[0]) ? ctrls->frame->skip_mode_frame[0] : 1);
+ hantro_reg_write(vpu, &av1_skip_ref1,
+ (ctrls->frame->skip_mode_frame[1]) ? ctrls->frame->skip_mode_frame[1] : 1);
+
+ hantro_write_addr(vpu, AV1_MC_SYNC_CURR, av1_dec->tile_buf.dma);
+ hantro_write_addr(vpu, AV1_MC_SYNC_LEFT, av1_dec->tile_buf.dma);
+}
+
+static void
+rockchip_vpu981_av1_dec_set_input_buffer(struct hantro_ctx *ctx,
+ struct vb2_v4l2_buffer *vb2_src)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
+ ctrls->tile_group_entry;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma;
+ u32 src_len, src_buf_len;
+ int start_bit, offset;
+
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+ src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
+ src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
+
+ start_bit = (group_entry[0].tile_offset & 0xf) * 8;
+ offset = group_entry[0].tile_offset & ~0xf;
+
+ hantro_reg_write(vpu, &av1_strm_buffer_len, src_buf_len);
+ hantro_reg_write(vpu, &av1_strm_start_bit, start_bit);
+ hantro_reg_write(vpu, &av1_stream_len, src_len);
+ hantro_reg_write(vpu, &av1_strm_start_offset, 0);
+ hantro_write_addr(vpu, AV1_INPUT_STREAM, src_dma + offset);
+}
+
+static void
+rockchip_vpu981_av1_dec_set_output_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_decoded_buffer *dst;
+ struct vb2_v4l2_buffer *vb2_dst;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+
+ vb2_dst = av1_dec->frame_refs[av1_dec->current_frame_index].vb2_ref;
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_TILE_OUT_LU, luma_addr);
+ hantro_write_addr(vpu, AV1_TILE_OUT_CH, chroma_addr);
+ hantro_write_addr(vpu, AV1_TILE_OUT_MV, mv_addr);
+}
+
+int rockchip_vpu981_av1_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ int ret;
+
+ hantro_start_prepare_run(ctx);
+
+ ret = rockchip_vpu981_av1_dec_prepare_run(ctx);
+ if (ret)
+ goto prepare_error;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ if (!vb2_src) {
+ ret = -EINVAL;
+ goto prepare_error;
+ }
+
+ rockchip_vpu981_av1_dec_clean_refs(ctx);
+ rockchip_vpu981_av1_dec_frame_ref(ctx, vb2_src->vb2_buf.timestamp);
+
+ rockchip_vpu981_av1_dec_set_parameters(ctx);
+ rockchip_vpu981_av1_dec_set_global_model(ctx);
+ rockchip_vpu981_av1_dec_set_tile_info(ctx);
+ rockchip_vpu981_av1_dec_set_reference_frames(ctx);
+ rockchip_vpu981_av1_dec_set_segmentation(ctx);
+ rockchip_vpu981_av1_dec_set_loopfilter(ctx);
+ rockchip_vpu981_av1_dec_set_picture_dimensions(ctx);
+ rockchip_vpu981_av1_dec_set_cdef(ctx);
+ rockchip_vpu981_av1_dec_set_lr(ctx);
+ rockchip_vpu981_av1_dec_set_fgs(ctx);
+ rockchip_vpu981_av1_dec_set_prob(ctx);
+
+ hantro_reg_write(vpu, &av1_dec_mode, AV1_DEC_MODE);
+ hantro_reg_write(vpu, &av1_dec_out_ec_byte_word, 0);
+ hantro_reg_write(vpu, &av1_write_mvs_e, 1);
+ hantro_reg_write(vpu, &av1_dec_out_ec_bypass, 1);
+ hantro_reg_write(vpu, &av1_dec_clk_gate_e, 1);
+
+ hantro_reg_write(vpu, &av1_dec_abort_e, 0);
+ hantro_reg_write(vpu, &av1_dec_tile_int_e, 0);
+
+ hantro_reg_write(vpu, &av1_dec_alignment, 64);
+ hantro_reg_write(vpu, &av1_apf_disable, 0);
+ hantro_reg_write(vpu, &av1_apf_threshold, 8);
+ hantro_reg_write(vpu, &av1_dec_buswidth, 2);
+ hantro_reg_write(vpu, &av1_dec_max_burst, 16);
+ hantro_reg_write(vpu, &av1_error_conceal_e, 0);
+ hantro_reg_write(vpu, &av1_axi_rd_ostd_threshold, 64);
+ hantro_reg_write(vpu, &av1_axi_wr_ostd_threshold, 64);
+
+ hantro_reg_write(vpu, &av1_ext_timeout_cycles, 0xfffffff);
+ hantro_reg_write(vpu, &av1_ext_timeout_override_e, 1);
+ hantro_reg_write(vpu, &av1_timeout_cycles, 0xfffffff);
+ hantro_reg_write(vpu, &av1_timeout_override_e, 1);
+
+ rockchip_vpu981_av1_dec_set_output_buffer(ctx);
+ rockchip_vpu981_av1_dec_set_input_buffer(ctx, vb2_src);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &av1_dec_e, 1);
+
+ return 0;
+
+prepare_error:
+ hantro_end_prepare_run(ctx);
+ hantro_irq_done(vpu, VB2_BUF_STATE_ERROR);
+ return ret;
+}
+
+static void rockchip_vpu981_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int width = ctx->dst_fmt.width;
+ int height = ctx->dst_fmt.height;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t chroma_offset;
+ dma_addr_t dst_dma;
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
+ ctx->dst_fmt.height;
+
+ /* enable post processor */
+ hantro_reg_write(vpu, &av1_pp_out_e, 1);
+ hantro_reg_write(vpu, &av1_pp_in_format, 0);
+ hantro_reg_write(vpu, &av1_pp0_dup_hor, 1);
+ hantro_reg_write(vpu, &av1_pp0_dup_ver, 1);
+
+ hantro_reg_write(vpu, &av1_pp_in_height, height / 2);
+ hantro_reg_write(vpu, &av1_pp_in_width, width / 2);
+ hantro_reg_write(vpu, &av1_pp_out_height, height);
+ hantro_reg_write(vpu, &av1_pp_out_width, width);
+ hantro_reg_write(vpu, &av1_pp_out_y_stride,
+ ctx->dst_fmt.plane_fmt[0].bytesperline);
+ hantro_reg_write(vpu, &av1_pp_out_c_stride,
+ ctx->dst_fmt.plane_fmt[0].bytesperline);
+ switch (ctx->dst_fmt.pixelformat) {
+ case V4L2_PIX_FMT_P010:
+ hantro_reg_write(vpu, &av1_pp_out_format, 1);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ hantro_reg_write(vpu, &av1_pp_out_format, 3);
+ break;
+ default:
+ hantro_reg_write(vpu, &av1_pp_out_format, 0);
+ }
+
+ hantro_reg_write(vpu, &av1_ppd_blend_exist, 0);
+ hantro_reg_write(vpu, &av1_ppd_dith_exist, 0);
+ hantro_reg_write(vpu, &av1_ablend_crop_e, 0);
+ hantro_reg_write(vpu, &av1_pp_format_customer1_e, 0);
+ hantro_reg_write(vpu, &av1_pp_crop_exist, 0);
+ hantro_reg_write(vpu, &av1_pp_up_level, 0);
+ hantro_reg_write(vpu, &av1_pp_down_level, 0);
+ hantro_reg_write(vpu, &av1_pp_exist, 0);
+
+ hantro_write_addr(vpu, AV1_PP_OUT_LU, dst_dma);
+ hantro_write_addr(vpu, AV1_PP_OUT_CH, dst_dma + chroma_offset);
+}
+
+static void rockchip_vpu981_postproc_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ /* disable post processor */
+ hantro_reg_write(vpu, &av1_pp_out_e, 0);
+}
+
+const struct hantro_postproc_ops rockchip_vpu981_postproc_ops = {
+ .enable = rockchip_vpu981_postproc_enable,
+ .disable = rockchip_vpu981_postproc_disable,
+};
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
new file mode 100644
index 000000000000..182e6c830ff6
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#ifndef _ROCKCHIP_VPU981_REGS_H_
+#define _ROCKCHIP_VPU981_REGS_H_
+
+#include "hantro.h"
+
+#define AV1_SWREG(nr) ((nr) * 4)
+
+#define AV1_DEC_REG(b, s, m) \
+ ((const struct hantro_reg) { \
+ .base = AV1_SWREG(b), \
+ .shift = s, \
+ .mask = m, \
+ })
+
+#define AV1_REG_INTERRUPT AV1_SWREG(1)
+#define AV1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+
+#define AV1_REG_CONFIG AV1_SWREG(2)
+#define AV1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+
+#define av1_dec_e AV1_DEC_REG(1, 0, 0x1)
+#define av1_dec_abort_e AV1_DEC_REG(1, 5, 0x1)
+#define av1_dec_tile_int_e AV1_DEC_REG(1, 7, 0x1)
+
+#define av1_dec_clk_gate_e AV1_DEC_REG(2, 10, 0x1)
+
+#define av1_dec_out_ec_bypass AV1_DEC_REG(3, 8, 0x1)
+#define av1_write_mvs_e AV1_DEC_REG(3, 12, 0x1)
+#define av1_filtering_dis AV1_DEC_REG(3, 14, 0x1)
+#define av1_dec_out_dis AV1_DEC_REG(3, 15, 0x1)
+#define av1_dec_out_ec_byte_word AV1_DEC_REG(3, 16, 0x1)
+#define av1_skip_mode AV1_DEC_REG(3, 26, 0x1)
+#define av1_dec_mode AV1_DEC_REG(3, 27, 0x1f)
+
+#define av1_ref_frames AV1_DEC_REG(4, 0, 0xf)
+#define av1_pic_height_in_cbs AV1_DEC_REG(4, 6, 0x1fff)
+#define av1_pic_width_in_cbs AV1_DEC_REG(4, 19, 0x1fff)
+
+#define av1_ref_scaling_enable AV1_DEC_REG(5, 0, 0x1)
+#define av1_filt_level_base_gt32 AV1_DEC_REG(5, 1, 0x1)
+#define av1_error_resilient AV1_DEC_REG(5, 2, 0x1)
+#define av1_force_interger_mv AV1_DEC_REG(5, 3, 0x1)
+#define av1_allow_intrabc AV1_DEC_REG(5, 4, 0x1)
+#define av1_allow_screen_content_tools AV1_DEC_REG(5, 5, 0x1)
+#define av1_reduced_tx_set_used AV1_DEC_REG(5, 6, 0x1)
+#define av1_enable_dual_filter AV1_DEC_REG(5, 7, 0x1)
+#define av1_enable_jnt_comp AV1_DEC_REG(5, 8, 0x1)
+#define av1_allow_filter_intra AV1_DEC_REG(5, 9, 0x1)
+#define av1_enable_intra_edge_filter AV1_DEC_REG(5, 10, 0x1)
+#define av1_tempor_mvp_e AV1_DEC_REG(5, 11, 0x1)
+#define av1_allow_interintra AV1_DEC_REG(5, 12, 0x1)
+#define av1_allow_masked_compound AV1_DEC_REG(5, 13, 0x1)
+#define av1_enable_cdef AV1_DEC_REG(5, 14, 0x1)
+#define av1_switchable_motion_mode AV1_DEC_REG(5, 15, 0x1)
+#define av1_show_frame AV1_DEC_REG(5, 16, 0x1)
+#define av1_superres_is_scaled AV1_DEC_REG(5, 17, 0x1)
+#define av1_allow_warp AV1_DEC_REG(5, 18, 0x1)
+#define av1_disable_cdf_update AV1_DEC_REG(5, 19, 0x1)
+#define av1_preskip_segid AV1_DEC_REG(5, 20, 0x1)
+#define av1_delta_lf_present AV1_DEC_REG(5, 21, 0x1)
+#define av1_delta_lf_multi AV1_DEC_REG(5, 22, 0x1)
+#define av1_delta_lf_res_log AV1_DEC_REG(5, 23, 0x3)
+#define av1_strm_start_bit AV1_DEC_REG(5, 25, 0x7f)
+
+#define av1_stream_len AV1_DEC_REG(6, 0, 0xffffffff)
+
+#define av1_delta_q_present AV1_DEC_REG(7, 0, 0x1)
+#define av1_delta_q_res_log AV1_DEC_REG(7, 1, 0x3)
+#define av1_cdef_damping AV1_DEC_REG(7, 3, 0x3)
+#define av1_cdef_bits AV1_DEC_REG(7, 5, 0x3)
+#define av1_apply_grain AV1_DEC_REG(7, 7, 0x1)
+#define av1_num_y_points_b AV1_DEC_REG(7, 8, 0x1)
+#define av1_num_cb_points_b AV1_DEC_REG(7, 9, 0x1)
+#define av1_num_cr_points_b AV1_DEC_REG(7, 10, 0x1)
+#define av1_overlap_flag AV1_DEC_REG(7, 11, 0x1)
+#define av1_clip_to_restricted_range AV1_DEC_REG(7, 12, 0x1)
+#define av1_chroma_scaling_from_luma AV1_DEC_REG(7, 13, 0x1)
+#define av1_random_seed AV1_DEC_REG(7, 14, 0xffff)
+#define av1_blackwhite_e AV1_DEC_REG(7, 30, 0x1)
+
+#define av1_scaling_shift AV1_DEC_REG(8, 0, 0xf)
+#define av1_bit_depth_c_minus8 AV1_DEC_REG(8, 4, 0x3)
+#define av1_bit_depth_y_minus8 AV1_DEC_REG(8, 6, 0x3)
+#define av1_quant_base_qindex AV1_DEC_REG(8, 8, 0xff)
+#define av1_idr_pic_e AV1_DEC_REG(8, 16, 0x1)
+#define av1_superres_pic_width AV1_DEC_REG(8, 17, 0x7fff)
+
+#define av1_ref4_sign_bias AV1_DEC_REG(9, 2, 0x1)
+#define av1_ref5_sign_bias AV1_DEC_REG(9, 3, 0x1)
+#define av1_ref6_sign_bias AV1_DEC_REG(9, 4, 0x1)
+#define av1_mf1_type AV1_DEC_REG(9, 5, 0x7)
+#define av1_mf2_type AV1_DEC_REG(9, 8, 0x7)
+#define av1_mf3_type AV1_DEC_REG(9, 11, 0x7)
+#define av1_scale_denom_minus9 AV1_DEC_REG(9, 14, 0x7)
+#define av1_last_active_seg AV1_DEC_REG(9, 17, 0x7)
+#define av1_context_update_tile_id AV1_DEC_REG(9, 20, 0xfff)
+
+#define av1_tile_transpose AV1_DEC_REG(10, 0, 0x1)
+#define av1_tile_enable AV1_DEC_REG(10, 1, 0x1)
+#define av1_multicore_full_width AV1_DEC_REG(10, 2, 0xff)
+#define av1_num_tile_rows_8k AV1_DEC_REG(10, 10, 0x7f)
+#define av1_num_tile_cols_8k AV1_DEC_REG(10, 17, 0x7f)
+#define av1_multicore_tile_start_x AV1_DEC_REG(10, 24, 0xff)
+
+#define av1_use_temporal3_mvs AV1_DEC_REG(11, 0, 0x1)
+#define av1_use_temporal2_mvs AV1_DEC_REG(11, 1, 0x1)
+#define av1_use_temporal1_mvs AV1_DEC_REG(11, 2, 0x1)
+#define av1_use_temporal0_mvs AV1_DEC_REG(11, 3, 0x1)
+#define av1_comp_pred_mode AV1_DEC_REG(11, 4, 0x3)
+#define av1_high_prec_mv_e AV1_DEC_REG(11, 7, 0x1)
+#define av1_mcomp_filt_type AV1_DEC_REG(11, 8, 0x7)
+#define av1_multicore_expect_context_update AV1_DEC_REG(11, 11, 0x1)
+#define av1_multicore_sbx_offset AV1_DEC_REG(11, 12, 0x7f)
+#define av1_ulticore_tile_col AV1_DEC_REG(11, 19, 0x7f)
+#define av1_transform_mode AV1_DEC_REG(11, 27, 0x7)
+#define av1_dec_tile_size_mag AV1_DEC_REG(11, 30, 0x3)
+
+#define av1_seg_quant_sign AV1_DEC_REG(12, 2, 0xff)
+#define av1_max_cb_size AV1_DEC_REG(12, 10, 0x7)
+#define av1_min_cb_size AV1_DEC_REG(12, 13, 0x7)
+#define av1_comp_pred_fixed_ref AV1_DEC_REG(12, 16, 0x7)
+#define av1_multicore_tile_width AV1_DEC_REG(12, 19, 0x7f)
+#define av1_pic_height_pad AV1_DEC_REG(12, 26, 0x7)
+#define av1_pic_width_pad AV1_DEC_REG(12, 29, 0x7)
+
+#define av1_segment_e AV1_DEC_REG(13, 0, 0x1)
+#define av1_segment_upd_e AV1_DEC_REG(13, 1, 0x1)
+#define av1_segment_temp_upd_e AV1_DEC_REG(13, 2, 0x1)
+#define av1_comp_pred_var_ref0_av1 AV1_DEC_REG(13, 3, 0x7)
+#define av1_comp_pred_var_ref1_av1 AV1_DEC_REG(13, 6, 0x7)
+#define av1_lossless_e AV1_DEC_REG(13, 9, 0x1)
+#define av1_qp_delta_ch_ac_av1 AV1_DEC_REG(13, 11, 0x7f)
+#define av1_qp_delta_ch_dc_av1 AV1_DEC_REG(13, 18, 0x7f)
+#define av1_qp_delta_y_dc_av1 AV1_DEC_REG(13, 25, 0x7f)
+
+#define av1_quant_seg0 AV1_DEC_REG(14, 0, 0xff)
+#define av1_filt_level_seg0 AV1_DEC_REG(14, 8, 0x3f)
+#define av1_skip_seg0 AV1_DEC_REG(14, 14, 0x1)
+#define av1_refpic_seg0 AV1_DEC_REG(14, 15, 0xf)
+#define av1_filt_level_delta0_seg0 AV1_DEC_REG(14, 19, 0x7f)
+#define av1_filt_level0 AV1_DEC_REG(14, 26, 0x3f)
+
+#define av1_quant_seg1 AV1_DEC_REG(15, 0, 0xff)
+#define av1_filt_level_seg1 AV1_DEC_REG(15, 8, 0x3f)
+#define av1_skip_seg1 AV1_DEC_REG(15, 14, 0x1)
+#define av1_refpic_seg1 AV1_DEC_REG(15, 15, 0xf)
+#define av1_filt_level_delta0_seg1 AV1_DEC_REG(15, 19, 0x7f)
+#define av1_filt_level1 AV1_DEC_REG(15, 26, 0x3f)
+
+#define av1_quant_seg2 AV1_DEC_REG(16, 0, 0xff)
+#define av1_filt_level_seg2 AV1_DEC_REG(16, 8, 0x3f)
+#define av1_skip_seg2 AV1_DEC_REG(16, 14, 0x1)
+#define av1_refpic_seg2 AV1_DEC_REG(16, 15, 0xf)
+#define av1_filt_level_delta0_seg2 AV1_DEC_REG(16, 19, 0x7f)
+#define av1_filt_level2 AV1_DEC_REG(16, 26, 0x3f)
+
+#define av1_quant_seg3 AV1_DEC_REG(17, 0, 0xff)
+#define av1_filt_level_seg3 AV1_DEC_REG(17, 8, 0x3f)
+#define av1_skip_seg3 AV1_DEC_REG(17, 14, 0x1)
+#define av1_refpic_seg3 AV1_DEC_REG(17, 15, 0xf)
+#define av1_filt_level_delta0_seg3 AV1_DEC_REG(17, 19, 0x7f)
+#define av1_filt_level3 AV1_DEC_REG(17, 26, 0x3f)
+
+#define av1_quant_seg4 AV1_DEC_REG(18, 0, 0xff)
+#define av1_filt_level_seg4 AV1_DEC_REG(18, 8, 0x3f)
+#define av1_skip_seg4 AV1_DEC_REG(18, 14, 0x1)
+#define av1_refpic_seg4 AV1_DEC_REG(18, 15, 0xf)
+#define av1_filt_level_delta0_seg4 AV1_DEC_REG(18, 19, 0x7f)
+#define av1_lr_type AV1_DEC_REG(18, 26, 0x3f)
+
+#define av1_quant_seg5 AV1_DEC_REG(19, 0, 0xff)
+#define av1_filt_level_seg5 AV1_DEC_REG(19, 8, 0x3f)
+#define av1_skip_seg5 AV1_DEC_REG(19, 14, 0x1)
+#define av1_refpic_seg5 AV1_DEC_REG(19, 15, 0xf)
+#define av1_filt_level_delta0_seg5 AV1_DEC_REG(19, 19, 0x7f)
+#define av1_lr_unit_size AV1_DEC_REG(19, 26, 0x3f)
+
+#define av1_filt_level_delta1_seg0 AV1_DEC_REG(20, 0, 0x7f)
+#define av1_filt_level_delta2_seg0 AV1_DEC_REG(20, 7, 0x7f)
+#define av1_filt_level_delta3_seg0 AV1_DEC_REG(20, 14, 0x7f)
+#define av1_global_mv_seg0 AV1_DEC_REG(20, 21, 0x1)
+#define av1_mf1_last_offset AV1_DEC_REG(20, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg1 AV1_DEC_REG(21, 0, 0x7f)
+#define av1_filt_level_delta2_seg1 AV1_DEC_REG(21, 7, 0x7f)
+#define av1_filt_level_delta3_seg1 AV1_DEC_REG(21, 14, 0x7f)
+#define av1_global_mv_seg1 AV1_DEC_REG(21, 21, 0x1)
+#define av1_mf1_last2_offset AV1_DEC_REG(21, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg2 AV1_DEC_REG(22, 0, 0x7f)
+#define av1_filt_level_delta2_seg2 AV1_DEC_REG(22, 7, 0x7f)
+#define av1_filt_level_delta3_seg2 AV1_DEC_REG(22, 14, 0x7f)
+#define av1_global_mv_seg2 AV1_DEC_REG(22, 21, 0x1)
+#define av1_mf1_last3_offset AV1_DEC_REG(22, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg3 AV1_DEC_REG(23, 0, 0x7f)
+#define av1_filt_level_delta2_seg3 AV1_DEC_REG(23, 7, 0x7f)
+#define av1_filt_level_delta3_seg3 AV1_DEC_REG(23, 14, 0x7f)
+#define av1_global_mv_seg3 AV1_DEC_REG(23, 21, 0x1)
+#define av1_mf1_golden_offset AV1_DEC_REG(23, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg4 AV1_DEC_REG(24, 0, 0x7f)
+#define av1_filt_level_delta2_seg4 AV1_DEC_REG(24, 7, 0x7f)
+#define av1_filt_level_delta3_seg4 AV1_DEC_REG(24, 14, 0x7f)
+#define av1_global_mv_seg4 AV1_DEC_REG(24, 21, 0x1)
+#define av1_mf1_bwdref_offset AV1_DEC_REG(24, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg5 AV1_DEC_REG(25, 0, 0x7f)
+#define av1_filt_level_delta2_seg5 AV1_DEC_REG(25, 7, 0x7f)
+#define av1_filt_level_delta3_seg5 AV1_DEC_REG(25, 14, 0x7f)
+#define av1_global_mv_seg5 AV1_DEC_REG(25, 21, 0x1)
+#define av1_mf1_altref2_offset AV1_DEC_REG(25, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg6 AV1_DEC_REG(26, 0, 0x7f)
+#define av1_filt_level_delta2_seg6 AV1_DEC_REG(26, 7, 0x7f)
+#define av1_filt_level_delta3_seg6 AV1_DEC_REG(26, 14, 0x7f)
+#define av1_global_mv_seg6 AV1_DEC_REG(26, 21, 0x1)
+#define av1_mf1_altref_offset AV1_DEC_REG(26, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg7 AV1_DEC_REG(27, 0, 0x7f)
+#define av1_filt_level_delta2_seg7 AV1_DEC_REG(27, 7, 0x7f)
+#define av1_filt_level_delta3_seg7 AV1_DEC_REG(27, 14, 0x7f)
+#define av1_global_mv_seg7 AV1_DEC_REG(27, 21, 0x1)
+#define av1_mf2_last_offset AV1_DEC_REG(27, 22, 0x1ff)
+
+#define av1_cb_offset AV1_DEC_REG(28, 0, 0x1ff)
+#define av1_cb_luma_mult AV1_DEC_REG(28, 9, 0xff)
+#define av1_cb_mult AV1_DEC_REG(28, 17, 0xff)
+#define av1_quant_delta_v_dc AV1_DEC_REG(28, 25, 0x7f)
+
+#define av1_cr_offset AV1_DEC_REG(29, 0, 0x1ff)
+#define av1_cr_luma_mult AV1_DEC_REG(29, 9, 0xff)
+#define av1_cr_mult AV1_DEC_REG(29, 17, 0xff)
+#define av1_quant_delta_v_ac AV1_DEC_REG(29, 25, 0x7f)
+
+#define av1_filt_ref_adj_5 AV1_DEC_REG(30, 0, 0x7f)
+#define av1_filt_ref_adj_4 AV1_DEC_REG(30, 7, 0x7f)
+#define av1_filt_mb_adj_1 AV1_DEC_REG(30, 14, 0x7f)
+#define av1_filt_mb_adj_0 AV1_DEC_REG(30, 21, 0x7f)
+#define av1_filt_sharpness AV1_DEC_REG(30, 28, 0x7)
+
+#define av1_quant_seg6 AV1_DEC_REG(31, 0, 0xff)
+#define av1_filt_level_seg6 AV1_DEC_REG(31, 8, 0x3f)
+#define av1_skip_seg6 AV1_DEC_REG(31, 14, 0x1)
+#define av1_refpic_seg6 AV1_DEC_REG(31, 15, 0xf)
+#define av1_filt_level_delta0_seg6 AV1_DEC_REG(31, 19, 0x7f)
+#define av1_skip_ref0 AV1_DEC_REG(31, 26, 0xf)
+
+#define av1_quant_seg7 AV1_DEC_REG(32, 0, 0xff)
+#define av1_filt_level_seg7 AV1_DEC_REG(32, 8, 0x3f)
+#define av1_skip_seg7 AV1_DEC_REG(32, 14, 0x1)
+#define av1_refpic_seg7 AV1_DEC_REG(32, 15, 0xf)
+#define av1_filt_level_delta0_seg7 AV1_DEC_REG(32, 19, 0x7f)
+#define av1_skip_ref1 AV1_DEC_REG(32, 26, 0xf)
+
+#define av1_ref0_height AV1_DEC_REG(33, 0, 0xffff)
+#define av1_ref0_width AV1_DEC_REG(33, 16, 0xffff)
+
+#define av1_ref1_height AV1_DEC_REG(34, 0, 0xffff)
+#define av1_ref1_width AV1_DEC_REG(34, 16, 0xffff)
+
+#define av1_ref2_height AV1_DEC_REG(35, 0, 0xffff)
+#define av1_ref2_width AV1_DEC_REG(35, 16, 0xffff)
+
+#define av1_ref0_ver_scale AV1_DEC_REG(36, 0, 0xffff)
+#define av1_ref0_hor_scale AV1_DEC_REG(36, 16, 0xffff)
+
+#define av1_ref1_ver_scale AV1_DEC_REG(37, 0, 0xffff)
+#define av1_ref1_hor_scale AV1_DEC_REG(37, 16, 0xffff)
+
+#define av1_ref2_ver_scale AV1_DEC_REG(38, 0, 0xffff)
+#define av1_ref2_hor_scale AV1_DEC_REG(38, 16, 0xffff)
+
+#define av1_ref3_ver_scale AV1_DEC_REG(39, 0, 0xffff)
+#define av1_ref3_hor_scale AV1_DEC_REG(39, 16, 0xffff)
+
+#define av1_ref4_ver_scale AV1_DEC_REG(40, 0, 0xffff)
+#define av1_ref4_hor_scale AV1_DEC_REG(40, 16, 0xffff)
+
+#define av1_ref5_ver_scale AV1_DEC_REG(41, 0, 0xffff)
+#define av1_ref5_hor_scale AV1_DEC_REG(41, 16, 0xffff)
+
+#define av1_ref6_ver_scale AV1_DEC_REG(42, 0, 0xffff)
+#define av1_ref6_hor_scale AV1_DEC_REG(42, 16, 0xffff)
+
+#define av1_ref3_height AV1_DEC_REG(43, 0, 0xffff)
+#define av1_ref3_width AV1_DEC_REG(43, 16, 0xffff)
+
+#define av1_ref4_height AV1_DEC_REG(44, 0, 0xffff)
+#define av1_ref4_width AV1_DEC_REG(44, 16, 0xffff)
+
+#define av1_ref5_height AV1_DEC_REG(45, 0, 0xffff)
+#define av1_ref5_width AV1_DEC_REG(45, 16, 0xffff)
+
+#define av1_ref6_height AV1_DEC_REG(46, 0, 0xffff)
+#define av1_ref6_width AV1_DEC_REG(46, 16, 0xffff)
+
+#define av1_mf2_last2_offset AV1_DEC_REG(47, 0, 0x1ff)
+#define av1_mf2_last3_offset AV1_DEC_REG(47, 9, 0x1ff)
+#define av1_mf2_golden_offset AV1_DEC_REG(47, 18, 0x1ff)
+#define av1_qmlevel_y AV1_DEC_REG(47, 27, 0xf)
+
+#define av1_mf2_bwdref_offset AV1_DEC_REG(48, 0, 0x1ff)
+#define av1_mf2_altref2_offset AV1_DEC_REG(48, 9, 0x1ff)
+#define av1_mf2_altref_offset AV1_DEC_REG(48, 18, 0x1ff)
+#define av1_qmlevel_u AV1_DEC_REG(48, 27, 0xf)
+
+#define av1_filt_ref_adj_6 AV1_DEC_REG(49, 0, 0x7f)
+#define av1_filt_ref_adj_7 AV1_DEC_REG(49, 7, 0x7f)
+#define av1_qmlevel_v AV1_DEC_REG(49, 14, 0xf)
+
+#define av1_superres_chroma_step AV1_DEC_REG(51, 0, 0x3fff)
+#define av1_superres_luma_step AV1_DEC_REG(51, 14, 0x3fff)
+
+#define av1_superres_init_chroma_subpel_x AV1_DEC_REG(52, 0, 0x3fff)
+#define av1_superres_init_luma_subpel_x AV1_DEC_REG(52, 14, 0x3fff)
+
+#define av1_cdef_chroma_secondary_strength AV1_DEC_REG(53, 0, 0xffff)
+#define av1_cdef_luma_secondary_strength AV1_DEC_REG(53, 16, 0xffff)
+
+#define av1_apf_threshold AV1_DEC_REG(55, 0, 0xffff)
+#define av1_apf_single_pu_mode AV1_DEC_REG(55, 30, 0x1)
+#define av1_apf_disable AV1_DEC_REG(55, 30, 0x1)
+
+#define av1_dec_max_burst AV1_DEC_REG(58, 0, 0xff)
+#define av1_dec_buswidth AV1_DEC_REG(58, 8, 0x7)
+#define av1_dec_multicore_mode AV1_DEC_REG(58, 11, 0x3)
+#define av1_dec_axi_wd_id_e AV1_DEC_REG(58, 13, 0x1)
+#define av1_dec_axi_rd_id_e AV1_DEC_REG(58, 14, 0x1)
+#define av1_dec_mc_polltime AV1_DEC_REG(58, 17, 0x3ff)
+#define av1_dec_mc_pollmode AV1_DEC_REG(58, 27, 0x3)
+
+#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x3f)
+#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x3f)
+#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x3f)
+#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x3f)
+#define av1_ref0_sign_bias AV1_DEC_REG(59, 28, 0x1)
+#define av1_ref1_sign_bias AV1_DEC_REG(59, 29, 0x1)
+#define av1_ref2_sign_bias AV1_DEC_REG(59, 30, 0x1)
+#define av1_ref3_sign_bias AV1_DEC_REG(59, 31, 0x1)
+
+#define av1_cur_last_roffset AV1_DEC_REG(184, 0, 0x1ff)
+#define av1_cur_last_offset AV1_DEC_REG(184, 9, 0x1ff)
+#define av1_mf3_last_offset AV1_DEC_REG(184, 18, 0x1ff)
+#define av1_ref0_gm_mode AV1_DEC_REG(184, 27, 0x3)
+
+#define av1_cur_last2_roffset AV1_DEC_REG(185, 0, 0x1ff)
+#define av1_cur_last2_offset AV1_DEC_REG(185, 9, 0x1ff)
+#define av1_mf3_last2_offset AV1_DEC_REG(185, 18, 0x1ff)
+#define av1_ref1_gm_mode AV1_DEC_REG(185, 27, 0x3)
+
+#define av1_cur_last3_roffset AV1_DEC_REG(186, 0, 0x1ff)
+#define av1_cur_last3_offset AV1_DEC_REG(186, 9, 0x1ff)
+#define av1_mf3_last3_offset AV1_DEC_REG(186, 18, 0x1ff)
+#define av1_ref2_gm_mode AV1_DEC_REG(186, 27, 0x3)
+
+#define av1_cur_golden_roffset AV1_DEC_REG(187, 0, 0x1ff)
+#define av1_cur_golden_offset AV1_DEC_REG(187, 9, 0x1ff)
+#define av1_mf3_golden_offset AV1_DEC_REG(187, 18, 0x1ff)
+#define av1_ref3_gm_mode AV1_DEC_REG(187, 27, 0x3)
+
+#define av1_cur_bwdref_roffset AV1_DEC_REG(188, 0, 0x1ff)
+#define av1_cur_bwdref_offset AV1_DEC_REG(188, 9, 0x1ff)
+#define av1_mf3_bwdref_offset AV1_DEC_REG(188, 18, 0x1ff)
+#define av1_ref4_gm_mode AV1_DEC_REG(188, 27, 0x3)
+
+#define av1_cur_altref2_roffset AV1_DEC_REG(257, 0, 0x1ff)
+#define av1_cur_altref2_offset AV1_DEC_REG(257, 9, 0x1ff)
+#define av1_mf3_altref2_offset AV1_DEC_REG(257, 18, 0x1ff)
+#define av1_ref5_gm_mode AV1_DEC_REG(257, 27, 0x3)
+
+#define av1_strm_buffer_len AV1_DEC_REG(258, 0, 0xffffffff)
+
+#define av1_strm_start_offset AV1_DEC_REG(259, 0, 0xffffffff)
+
+#define av1_ppd_blend_exist AV1_DEC_REG(260, 21, 0x1)
+#define av1_ppd_dith_exist AV1_DEC_REG(260, 23, 0x1)
+#define av1_ablend_crop_e AV1_DEC_REG(260, 24, 0x1)
+#define av1_pp_format_p010_e AV1_DEC_REG(260, 25, 0x1)
+#define av1_pp_format_customer1_e AV1_DEC_REG(260, 26, 0x1)
+#define av1_pp_crop_exist AV1_DEC_REG(260, 27, 0x1)
+#define av1_pp_up_level AV1_DEC_REG(260, 28, 0x1)
+#define av1_pp_down_level AV1_DEC_REG(260, 29, 0x3)
+#define av1_pp_exist AV1_DEC_REG(260, 31, 0x1)
+
+#define av1_cur_altref_roffset AV1_DEC_REG(262, 0, 0x1ff)
+#define av1_cur_altref_offset AV1_DEC_REG(262, 9, 0x1ff)
+#define av1_mf3_altref_offset AV1_DEC_REG(262, 18, 0x1ff)
+#define av1_ref6_gm_mode AV1_DEC_REG(262, 27, 0x3)
+
+#define av1_cdef_luma_primary_strength AV1_DEC_REG(263, 0, 0xffffffff)
+
+#define av1_cdef_chroma_primary_strength AV1_DEC_REG(264, 0, 0xffffffff)
+
+#define av1_axi_arqos AV1_DEC_REG(265, 0, 0xf)
+#define av1_axi_awqos AV1_DEC_REG(265, 4, 0xf)
+#define av1_axi_wr_ostd_threshold AV1_DEC_REG(265, 8, 0x3ff)
+#define av1_axi_rd_ostd_threshold AV1_DEC_REG(265, 18, 0x3ff)
+#define av1_axi_wr_4k_dis AV1_DEC_REG(265, 31, 0x1)
+
+#define av1_128bit_mode AV1_DEC_REG(266, 5, 0x1)
+#define av1_wr_shaper_bypass AV1_DEC_REG(266, 10, 0x1)
+#define av1_error_conceal_e AV1_DEC_REG(266, 30, 0x1)
+
+#define av1_superres_chroma_step_invra AV1_DEC_REG(298, 0, 0xffff)
+#define av1_superres_luma_step_invra AV1_DEC_REG(298, 16, 0xffff)
+
+#define av1_dec_alignment AV1_DEC_REG(314, 0, 0xffff)
+
+#define av1_ext_timeout_cycles AV1_DEC_REG(318, 0, 0x7fffffff)
+#define av1_ext_timeout_override_e AV1_DEC_REG(318, 31, 0x1)
+
+#define av1_timeout_cycles AV1_DEC_REG(319, 0, 0x7fffffff)
+#define av1_timeout_override_e AV1_DEC_REG(319, 31, 0x1)
+
+#define av1_pp_out_e AV1_DEC_REG(320, 0, 0x1)
+#define av1_pp_cr_first AV1_DEC_REG(320, 1, 0x1)
+#define av1_pp_out_mode AV1_DEC_REG(320, 2, 0x1)
+#define av1_pp_out_tile_e AV1_DEC_REG(320, 3, 0x1)
+#define av1_pp_status AV1_DEC_REG(320, 4, 0xf)
+#define av1_pp_in_blk_size AV1_DEC_REG(320, 8, 0x7)
+#define av1_pp_out_p010_fmt AV1_DEC_REG(320, 11, 0x3)
+#define av1_pp_out_rgb_fmt AV1_DEC_REG(320, 13, 0x1f)
+#define av1_rgb_range_max AV1_DEC_REG(320, 18, 0xfff)
+#define av1_pp_rgb_planar AV1_DEC_REG(320, 30, 0x1)
+
+#define av1_scale_hratio AV1_DEC_REG(322, 0, 0x3ffff)
+#define av1_pp_out_format AV1_DEC_REG(322, 18, 0x1f)
+#define av1_ver_scale_mode AV1_DEC_REG(322, 23, 0x3)
+#define av1_hor_scale_mode AV1_DEC_REG(322, 25, 0x3)
+#define av1_pp_in_format AV1_DEC_REG(322, 27, 0x1f)
+
+#define av1_pp_out_c_stride AV1_DEC_REG(329, 0, 0xffff)
+#define av1_pp_out_y_stride AV1_DEC_REG(329, 16, 0xffff)
+
+#define av1_pp_in_height AV1_DEC_REG(331, 0, 0xffff)
+#define av1_pp_in_width AV1_DEC_REG(331, 16, 0xffff)
+
+#define av1_pp_out_height AV1_DEC_REG(332, 0, 0xffff)
+#define av1_pp_out_width AV1_DEC_REG(332, 16, 0xffff)
+
+#define av1_pp1_dup_ver AV1_DEC_REG(394, 0, 0xff)
+#define av1_pp1_dup_hor AV1_DEC_REG(394, 8, 0xff)
+#define av1_pp0_dup_ver AV1_DEC_REG(394, 16, 0xff)
+#define av1_pp0_dup_hor AV1_DEC_REG(394, 24, 0xff)
+
+#define AV1_TILE_OUT_LU (AV1_SWREG(65))
+#define AV1_REFERENCE_Y(i) (AV1_SWREG(67) + ((i) * 0x8))
+#define AV1_SEGMENTATION (AV1_SWREG(81))
+#define AV1_GLOBAL_MODEL (AV1_SWREG(83))
+#define AV1_CDEF_COL (AV1_SWREG(85))
+#define AV1_SR_COL (AV1_SWREG(89))
+#define AV1_LR_COL (AV1_SWREG(91))
+#define AV1_FILM_GRAIN (AV1_SWREG(95))
+#define AV1_TILE_OUT_CH (AV1_SWREG(99))
+#define AV1_REFERENCE_CB(i) (AV1_SWREG(101) + ((i) * 0x8))
+#define AV1_TILE_OUT_MV (AV1_SWREG(133))
+#define AV1_REFERENCE_MV(i) (AV1_SWREG(135) + ((i) * 0x8))
+#define AV1_TILE_BASE (AV1_SWREG(167))
+#define AV1_INPUT_STREAM (AV1_SWREG(169))
+#define AV1_PROP_TABLE_OUT (AV1_SWREG(171))
+#define AV1_PROP_TABLE (AV1_SWREG(173))
+#define AV1_MC_SYNC_CURR (AV1_SWREG(175))
+#define AV1_MC_SYNC_LEFT (AV1_SWREG(177))
+#define AV1_DB_DATA_COL (AV1_SWREG(179))
+#define AV1_DB_CTRL_COL (AV1_SWREG(183))
+#define AV1_PP_OUT_LU (AV1_SWREG(326))
+#define AV1_PP_OUT_CH (AV1_SWREG(328))
+
+#endif /* _ROCKCHIP_VPU981_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index 8de6fd2e8eef..816ffa905a4b 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -13,9 +13,13 @@
#include "hantro_g1_regs.h"
#include "hantro_h1_regs.h"
#include "rockchip_vpu2_regs.h"
+#include "rockchip_vpu981_regs.h"
#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
+
+#define ROCKCHIP_VPU981_MIN_SIZE 64
/*
* Supported formats.
@@ -74,6 +78,37 @@ static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
},
};
+static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
@@ -277,6 +312,48 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
},
};
+static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV15_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_AV1_FRAME,
+ .codec_mode = HANTRO_MODE_AV1_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
static irqreturn_t rockchip_vpu1_vepu_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
@@ -331,6 +408,24 @@ static irqreturn_t rockchip_vpu2_vepu_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t rk3588_vpu981_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, AV1_REG_INTERRUPT);
+ state = (status & AV1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, AV1_REG_INTERRUPT);
+ vdpu_write(vpu, AV1_REG_CONFIG_DEC_CLK_GATE_E, AV1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
static int rk3036_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLK to max. possible freq. to improve performance. */
@@ -346,6 +441,13 @@ static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
return 0;
}
+static int rk3588_vpu981_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLKs to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ);
+ return 0;
+}
+
static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLK to max. possible freq. to improve performance. */
@@ -498,6 +600,14 @@ static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = {
},
};
+static const struct hantro_codec_ops rk3588_vpu981_codec_ops[] = {
+ [HANTRO_MODE_AV1_DEC] = {
+ .run = rockchip_vpu981_av1_dec_run,
+ .init = rockchip_vpu981_av1_dec_init,
+ .exit = rockchip_vpu981_av1_dec_exit,
+ .done = rockchip_vpu981_av1_dec_done,
+ },
+};
/*
* VPU variant.
*/
@@ -529,10 +639,18 @@ static const char * const rk3066_vpu_clk_names[] = {
"aclk_vepu", "hclk_vepu"
};
+static const struct hantro_irq rk3588_vpu981_irqs[] = {
+ { "vdpu", rk3588_vpu981_irq },
+};
+
static const char * const rockchip_vpu_clk_names[] = {
"aclk", "hclk"
};
+static const char * const rk3588_vpu981_vpu_clk_names[] = {
+ "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
+};
+
/* VDPU1/VEPU1 */
const struct hantro_variant rk3036_vpu_variant = {
@@ -678,3 +796,19 @@ const struct hantro_variant px30_vpu_variant = {
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
+
+const struct hantro_variant rk3588_vpu981_variant = {
+ .dec_offset = 0x0,
+ .dec_fmts = rockchip_vpu981_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vpu981_dec_fmts),
+ .postproc_fmts = rockchip_vpu981_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu981_postproc_fmts),
+ .postproc_ops = &rockchip_vpu981_postproc_ops,
+ .codec = HANTRO_AV1_DECODER,
+ .codec_ops = rk3588_vpu981_codec_ops,
+ .irqs = rk3588_vpu981_irqs,
+ .num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs),
+ .init = rk3588_vpu981_hw_init,
+ .clk_names = rk3588_vpu981_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names)
+};
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 1d9f32e5a917..6d273abfe16c 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -24,7 +24,6 @@ struct video_mux {
struct v4l2_subdev subdev;
struct v4l2_async_notifier notifier;
struct media_pad *pads;
- struct v4l2_mbus_framefmt *format_mbus;
struct mux_control *mux;
struct mutex lock;
int active;
@@ -71,6 +70,9 @@ static int video_mux_link_setup(struct media_entity *entity,
mutex_lock(&vmux->lock);
if (flags & MEDIA_LNK_FL_ENABLED) {
+ struct v4l2_subdev_state *sd_state;
+ struct v4l2_mbus_framefmt *source_mbusformat;
+
if (vmux->active == local->index)
goto out;
@@ -86,7 +88,12 @@ static int video_mux_link_setup(struct media_entity *entity,
vmux->active = local->index;
/* Propagate the active format to the source */
- vmux->format_mbus[source_pad] = vmux->format_mbus[vmux->active];
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+ source_mbusformat = v4l2_subdev_get_pad_format(sd, sd_state,
+ source_pad);
+ *source_mbusformat = *v4l2_subdev_get_pad_format(sd, sd_state,
+ vmux->active);
+ v4l2_subdev_unlock_state(sd_state);
} else {
if (vmux->active != local->index)
goto out;
@@ -138,40 +145,6 @@ static const struct v4l2_subdev_video_ops video_mux_subdev_video_ops = {
.s_stream = video_mux_s_stream,
};
-static struct v4l2_mbus_framefmt *
-__video_mux_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, u32 which)
-{
- struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
-
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(sd, sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &vmux->format_mbus[pad];
- default:
- return NULL;
- }
-}
-
-static int video_mux_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *sdformat)
-{
- struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
-
- mutex_lock(&vmux->lock);
-
- sdformat->format = *__video_mux_get_pad_format(sd, sd_state,
- sdformat->pad,
- sdformat->which);
-
- mutex_unlock(&vmux->lock);
-
- return 0;
-}
-
static int video_mux_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
@@ -181,14 +154,11 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
struct media_pad *pad = &vmux->pads[sdformat->pad];
u16 source_pad = sd->entity.num_pads - 1;
- mbusformat = __video_mux_get_pad_format(sd, sd_state, sdformat->pad,
- sdformat->which);
+ mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad);
if (!mbusformat)
return -EINVAL;
- source_mbusformat = __video_mux_get_pad_format(sd, sd_state,
- source_pad,
- sdformat->which);
+ source_mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, source_pad);
if (!source_mbusformat)
return -EINVAL;
@@ -298,7 +268,8 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
/* Source pad mirrors active sink pad, no limitations on sink pads */
if ((pad->flags & MEDIA_PAD_FL_SOURCE) && vmux->active >= 0)
- sdformat->format = vmux->format_mbus[vmux->active];
+ sdformat->format = *v4l2_subdev_get_pad_format(sd, sd_state,
+ vmux->active);
*mbusformat = sdformat->format;
@@ -321,7 +292,7 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&vmux->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
+ mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, i);
*mbusformat = video_mux_format_mbus_default;
}
@@ -332,7 +303,7 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
.init_cfg = video_mux_init_cfg,
- .get_fmt = video_mux_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = video_mux_set_format,
};
@@ -389,7 +360,7 @@ static int video_mux_async_register(struct video_mux *vmux,
ret = PTR_ERR(asd);
/* OK if asd already exists */
if (ret != -EEXIST)
- return ret;
+ goto err_nf_cleanup;
}
}
@@ -397,9 +368,19 @@ static int video_mux_async_register(struct video_mux *vmux,
ret = v4l2_async_subdev_nf_register(&vmux->subdev, &vmux->notifier);
if (ret)
- return ret;
+ goto err_nf_cleanup;
+
+ ret = v4l2_async_register_subdev(&vmux->subdev);
+ if (ret)
+ goto err_nf_unregister;
- return v4l2_async_register_subdev(&vmux->subdev);
+ return 0;
+
+err_nf_unregister:
+ v4l2_async_nf_unregister(&vmux->notifier);
+err_nf_cleanup:
+ v4l2_async_nf_cleanup(&vmux->notifier);
+ return ret;
}
static int video_mux_probe(struct platform_device *pdev)
@@ -452,17 +433,9 @@ static int video_mux_probe(struct platform_device *pdev)
if (!vmux->pads)
return -ENOMEM;
- vmux->format_mbus = devm_kcalloc(dev, num_pads,
- sizeof(*vmux->format_mbus),
- GFP_KERNEL);
- if (!vmux->format_mbus)
- return -ENOMEM;
-
- for (i = 0; i < num_pads; i++) {
+ for (i = 0; i < num_pads; i++)
vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
: MEDIA_PAD_FL_SOURCE;
- vmux->format_mbus[i] = video_mux_format_mbus_default;
- }
vmux->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
ret = media_entity_pads_init(&vmux->subdev.entity, num_pads,
@@ -472,12 +445,20 @@ static int video_mux_probe(struct platform_device *pdev)
vmux->subdev.entity.ops = &video_mux_ops;
+ ret = v4l2_subdev_init_finalize(&vmux->subdev);
+ if (ret < 0)
+ goto err_entity_cleanup;
+
ret = video_mux_async_register(vmux, num_pads - 1);
- if (ret) {
- v4l2_async_nf_unregister(&vmux->notifier);
- v4l2_async_nf_cleanup(&vmux->notifier);
- }
+ if (ret)
+ goto err_subdev_cleanup;
+
+ return 0;
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(&vmux->subdev);
+err_entity_cleanup:
+ media_entity_cleanup(&vmux->subdev.entity);
return ret;
}
@@ -489,6 +470,7 @@ static void video_mux_remove(struct platform_device *pdev)
v4l2_async_nf_unregister(&vmux->notifier);
v4l2_async_nf_cleanup(&vmux->notifier);
v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
}
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 616a38feb641..d52eccdc7eb9 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -15,7 +15,7 @@ if RADIO_ADAPTERS
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select RADIO_TEA575X
help
Choose Y here if you have this radio card. This card may also be
@@ -232,6 +232,7 @@ source "drivers/media/radio/wl128x/Kconfig"
menuconfig V4L_RADIO_ISA_DRIVERS
bool "ISA radio devices"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
help
Say Y here to enable support for these ISA drivers.
@@ -240,6 +241,7 @@ if V4L_RADIO_ISA_DRIVERS
config RADIO_AZTECH
tristate "Aztech/Packard Bell Radio"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
@@ -260,6 +262,7 @@ config RADIO_AZTECH_PORT
config RADIO_CADET
tristate "ADS Cadet AM/FM Tuner"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
help
Choose Y here if you have one of these AM/FM radio cards, and then
fill in the port address below.
@@ -270,6 +273,7 @@ config RADIO_CADET
config RADIO_GEMTEK
tristate "GemTek Radio card (or compatible) support"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have this FM radio card, and then fill in the
@@ -309,6 +313,7 @@ config RADIO_GEMTEK_PROBE
config RADIO_ISA
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
tristate
config RADIO_MIROPCM20
@@ -329,6 +334,7 @@ config RADIO_MIROPCM20
config RADIO_RTRACK
tristate "AIMSlab RadioTrack (aka RadioReveal) support"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
@@ -383,6 +389,7 @@ config RADIO_RTRACK_PORT
config RADIO_SF16FMI
tristate "SF16-FMI/SF16-FMP/SF16-FMD Radio"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
help
Choose Y here if you have one of these FM radio cards.
@@ -392,6 +399,7 @@ config RADIO_SF16FMI
config RADIO_SF16FMR2
tristate "SF16-FMR2/SF16-FMD2 Radio"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_TEA575X
help
Choose Y here if you have one of these FM radio cards.
@@ -402,6 +410,7 @@ config RADIO_SF16FMR2
config RADIO_TERRATEC
tristate "TerraTec ActiveRadio ISA Standalone"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have this FM radio card.
@@ -416,6 +425,7 @@ config RADIO_TERRATEC
config RADIO_TRUST
tristate "Trust FM radio card"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
This is a driver for the Trust FM radio cards. Say Y if you have
@@ -439,6 +449,7 @@ config RADIO_TRUST_PORT
config RADIO_TYPHOON
tristate "Typhoon Radio (a.k.a. EcoRadio)"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
@@ -473,6 +484,7 @@ config RADIO_TYPHOON_PORT
config RADIO_ZOLTRIX
tristate "Zoltrix Radio"
depends on ISA || COMPILE_TEST
+ depends on HAS_IOPORT
select RADIO_ISA
help
Choose Y here if you have one of these FM radio cards, and then fill
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 2cb74afba49c..14e7dd3889ff 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -511,7 +511,7 @@ static struct i2c_driver tea5764_i2c_driver = {
.driver = {
.name = "radio-tea5764",
},
- .probe_new = tea5764_i2c_probe,
+ .probe = tea5764_i2c_probe,
.remove = tea5764_i2c_remove,
.id_table = tea5764_id,
};
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 3c758a983344..91345198bbf1 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -405,7 +405,7 @@ static struct i2c_driver saa7706h_driver = {
.driver = {
.name = DRIVER_NAME,
},
- .probe_new = saa7706h_probe,
+ .probe = saa7706h_probe,
.remove = saa7706h_remove,
.id_table = saa7706h_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a6ad926c2b4e..fd449e42c191 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -532,7 +532,7 @@ static struct i2c_driver si470x_i2c_driver = {
.pm = &si470x_i2c_pm,
#endif
},
- .probe_new = si470x_i2c_probe,
+ .probe = si470x_i2c_probe,
.remove = si470x_i2c_remove,
.id_table = si470x_i2c_id,
};
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 93d847c294e8..ddaf7a60b7d0 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1657,7 +1657,7 @@ static struct i2c_driver si4713_i2c_driver = {
.name = "si4713",
.of_match_table = of_match_ptr(si4713_of_match),
},
- .probe_new = si4713_probe,
+ .probe = si4713_probe,
.remove = si4713_remove,
.id_table = si4713_id,
};
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index d14c97d79e83..215168aa1588 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -183,7 +183,7 @@ static struct i2c_driver tef6862_driver = {
.driver = {
.name = DRIVER_NAME,
},
- .probe_new = tef6862_probe,
+ .probe = tef6862_probe,
.remove = tef6862_remove,
.id_table = tef6862_id,
};
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index cbd49dff6d74..b31b7ed60bbe 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1234,9 +1234,8 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
struct bts_action *action;
struct bts_action_delay *delay;
u8 *fw_data;
- int ret, fw_len, cmd_cnt;
+ int ret, fw_len;
- cmd_cnt = 0;
set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
ret = request_firmware(&fw_entry, fw_name,
@@ -1272,7 +1271,6 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
if (ret)
goto rel_fw;
- cmd_cnt++;
break;
case ACTION_DELAY: /* Delay */
@@ -1284,7 +1282,7 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
fw_data += (sizeof(struct bts_action) + (action->size));
fw_len -= (sizeof(struct bts_action) + (action->size));
}
- fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
+ fmdbg("Transfered only %d of %d bytes of the firmware to chip\n", fw_entry->size - fw_len, fw_entry->size);
rel_fw:
release_firmware(fw_entry);
clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index ac4172feb6f9..922c790b577e 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -148,6 +148,7 @@ if RC_DEVICES
config IR_ENE
tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
depends on PNP || COMPILE_TEST
+ depends on HAS_IOPORT
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by ENE.
@@ -161,6 +162,7 @@ config IR_ENE
config IR_FINTEK
tristate "Fintek Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
+ depends on HAS_IOPORT
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Fintek. This chip is found on assorted
@@ -249,6 +251,7 @@ config IR_IMON_RAW
config IR_ITE_CIR
tristate "ITE Tech Inc. IT8712/IT8512 Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
+ depends on HAS_IOPORT
help
Say Y here to enable support for integrated infrared receivers
/transceivers made by ITE Tech Inc. These are found in
@@ -301,6 +304,7 @@ config IR_MTK
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP || COMPILE_TEST
+ depends on HAS_IOPORT
help
Say Y here to enable support for integrated infrared receiver
/transceiver made by Nuvoton (formerly Winbond). This chip is
@@ -345,6 +349,7 @@ config IR_RX51
config IR_SERIAL
tristate "Homebrew Serial Port Receiver"
+ depends on HAS_IOPORT
help
Say Y if you want to use Homebrew Serial Port Receivers and
Transceivers.
@@ -412,6 +417,7 @@ config IR_TTUSBIR
config IR_WINBOND_CIR
tristate "Winbond IR remote control"
depends on (X86 && PNP) || COMPILE_TEST
+ depends on HAS_IOPORT
select NEW_LEDS
select LEDS_CLASS
select BITREVERSE
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index b878db798686..7a0cd9601917 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -449,7 +449,7 @@ static struct i2c_driver vidtv_demod_i2c_driver = {
.name = "dvb_vidtv_demod",
.suppress_bind_attrs = true,
},
- .probe_new = vidtv_demod_i2c_probe,
+ .probe = vidtv_demod_i2c_probe,
.remove = vidtv_demod_i2c_remove,
.id_table = vidtv_demod_i2c_id_table,
};
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index 55a4387f3854..a748737d47f3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -425,7 +425,7 @@ static struct i2c_driver vidtv_tuner_i2c_driver = {
.name = "dvb_vidtv_tuner",
.suppress_bind_attrs = true,
},
- .probe_new = vidtv_tuner_i2c_probe,
+ .probe = vidtv_tuner_i2c_probe,
.remove = vidtv_tuner_i2c_remove,
.id_table = vidtv_tuner_i2c_id_table,
};
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 801286dc1448..3a06df35a2d7 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -21,13 +21,8 @@
#include "vivid-kthread-cap.h"
#include "vivid-vid-cap.h"
-/* The number of discrete webcam framesizes */
-#define VIVID_WEBCAM_SIZES 6
-/* The number of discrete webcam frameintervals */
-#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
-
/* Sizes must be in increasing order */
-static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
+static const struct v4l2_frmsize_discrete webcam_sizes[] = {
{ 320, 180 },
{ 640, 360 },
{ 640, 480 },
@@ -40,21 +35,43 @@ static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
* Intervals must be in increasing order and there must be twice as many
* elements in this array as there are in webcam_sizes.
*/
-static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
+static const struct v4l2_fract webcam_intervals[] = {
{ 1, 1 },
{ 1, 2 },
{ 1, 4 },
{ 1, 5 },
{ 1, 10 },
{ 2, 25 },
- { 1, 15 },
+ { 1, 15 }, /* 7 - maximum for 2160p */
{ 1, 25 },
- { 1, 30 },
+ { 1, 30 }, /* 9 - maximum for 1080p */
{ 1, 40 },
{ 1, 50 },
- { 1, 60 },
+ { 1, 60 }, /* 12 - maximum for 720p */
+ { 1, 120 },
};
+/* Limit maximum FPS rates for high resolutions */
+#define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */
+#define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */
+#define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */
+
+static inline unsigned int webcam_ival_count(const struct vivid_dev *dev,
+ unsigned int frmsize_idx)
+{
+ if (webcam_sizes[frmsize_idx].height >= 2160)
+ return IVAL_COUNT_2160P;
+
+ if (webcam_sizes[frmsize_idx].height >= 1080)
+ return IVAL_COUNT_1080P;
+
+ if (webcam_sizes[frmsize_idx].height >= 720)
+ return IVAL_COUNT_720P;
+
+ /* For low resolutions, allow all FPS rates */
+ return ARRAY_SIZE(webcam_intervals);
+}
+
static int vid_cap_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
unsigned sizes[], struct device *alloc_devs[])
@@ -560,7 +577,7 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
if (vivid_is_webcam(dev)) {
const struct v4l2_frmsize_discrete *sz =
v4l2_find_nearest_size(webcam_sizes,
- VIVID_WEBCAM_SIZES, width,
+ ARRAY_SIZE(webcam_sizes), width,
height, mp->width, mp->height);
w = sz->width;
@@ -736,14 +753,16 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
compose->height /= factor;
}
} else if (vivid_is_webcam(dev)) {
+ unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx);
+
/* Guaranteed to be a match */
for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
if (webcam_sizes[i].width == mp->width &&
webcam_sizes[i].height == mp->height)
break;
dev->webcam_size_idx = i;
- if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
- dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
+ if (dev->webcam_ival_idx >= ival_sz)
+ dev->webcam_ival_idx = ival_sz - 1;
vivid_update_format_cap(dev, false);
} else {
struct v4l2_rect r = { 0, 0, mp->width, mp->height };
@@ -1636,7 +1655,7 @@ int vidioc_enum_frameintervals(struct file *file, void *priv,
break;
if (i == ARRAY_SIZE(webcam_sizes))
return -EINVAL;
- if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
+ if (fival->index >= webcam_ival_count(dev, i))
return -EINVAL;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete = webcam_intervals[fival->index];
@@ -1663,7 +1682,7 @@ int vivid_vid_cap_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct vivid_dev *dev = video_drvdata(file);
- unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
+ unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx);
struct v4l2_fract tpf;
unsigned i;
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 7c269f3159ef..3893a00c18ce 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -729,7 +729,7 @@ static struct i2c_driver e4000_driver = {
.name = "e4000",
.suppress_bind_attrs = true,
},
- .probe_new = e4000_probe,
+ .probe = e4000_probe,
.remove = e4000_remove,
.id_table = e4000_id_table,
};
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 3cd8279f4f2e..f6613dcf40a3 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -610,7 +610,7 @@ static struct i2c_driver fc2580_driver = {
.name = "fc2580",
.suppress_bind_attrs = true,
},
- .probe_new = fc2580_probe,
+ .probe = fc2580_probe,
.remove = fc2580_remove,
.id_table = fc2580_id_table,
};
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 7d172a5a66d9..2cd7f0e0c70d 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -718,7 +718,7 @@ static struct i2c_driver m88rs6000t_driver = {
.driver = {
.name = "m88rs6000t",
},
- .probe_new = m88rs6000t_probe,
+ .probe = m88rs6000t_probe,
.remove = m88rs6000t_remove,
.id_table = m88rs6000t_id,
};
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index e5d86874adb3..0278a9f0aeef 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -524,7 +524,7 @@ static struct i2c_driver mt2060_driver = {
.name = "mt2060",
.suppress_bind_attrs = true,
},
- .probe_new = mt2060_probe,
+ .probe = mt2060_probe,
.remove = mt2060_remove,
.id_table = mt2060_id_table,
};
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index c35442a77ae5..9b2b237745ae 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -326,7 +326,7 @@ static struct i2c_driver mxl301rf_driver = {
.driver = {
.name = "mxl301rf",
},
- .probe_new = mxl301rf_probe,
+ .probe = mxl301rf_probe,
.remove = mxl301rf_remove,
.id_table = mxl301rf_id,
};
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index 0b6f750c54ad..af2d3618b9d5 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -253,7 +253,7 @@ static struct i2c_driver qm1d1b0004_driver = {
.driver = {
.name = "qm1d1b0004",
},
- .probe_new = qm1d1b0004_probe,
+ .probe = qm1d1b0004_probe,
.remove = qm1d1b0004_remove,
.id_table = qm1d1b0004_id,
};
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index f9be7a721d2c..ce7223315b0c 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -443,7 +443,7 @@ static struct i2c_driver qm1d1c0042_driver = {
.driver = {
.name = "qm1d1c0042",
},
- .probe_new = qm1d1c0042_probe,
+ .probe = qm1d1c0042_probe,
.remove = qm1d1c0042_remove,
.id_table = qm1d1c0042_id,
};
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 3fa3dcda917a..def06c262ea2 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -990,7 +990,7 @@ static struct i2c_driver si2157_driver = {
.name = "si2157",
.suppress_bind_attrs = true,
},
- .probe_new = si2157_probe,
+ .probe = si2157_probe,
.remove = si2157_remove,
.id_table = si2157_id_table,
};
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 5fdf05a97415..8d742bd61df0 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -263,7 +263,7 @@ static struct i2c_driver tda18212_driver = {
.driver = {
.name = "tda18212",
},
- .probe_new = tda18212_probe,
+ .probe = tda18212_probe,
.remove = tda18212_remove,
.id_table = tda18212_id,
};
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 66ff2d035de7..32ea473f3f49 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -877,7 +877,7 @@ static struct i2c_driver tda18250_driver = {
.driver = {
.name = "tda18250",
},
- .probe_new = tda18250_probe,
+ .probe = tda18250_probe,
.remove = tda18250_remove,
.id_table = tda18250_id_table,
};
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index ac38afd3441a..03a3a022b0a8 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -255,7 +255,7 @@ static struct i2c_driver tua9001_driver = {
.name = "tua9001",
.suppress_bind_attrs = true,
},
- .probe_new = tua9001_probe,
+ .probe = tua9001_probe,
.remove = tua9001_remove,
.id_table = tua9001_id_table,
};
diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c
index 50419e8ae56c..6b380144d6c2 100644
--- a/drivers/media/usb/as102/as102_usb_drv.c
+++ b/drivers/media/usb/as102/as102_usb_drv.c
@@ -303,10 +303,8 @@ static void as102_usb_release(struct kref *kref)
struct as102_dev_t *as102_dev;
as102_dev = container_of(kref, struct as102_dev_t, kref);
- if (as102_dev != NULL) {
- usb_put_dev(as102_dev->bus_adap.usb_dev);
- kfree(as102_dev);
- }
+ usb_put_dev(as102_dev->bus_adap.usb_dev);
+ kfree(as102_dev);
}
static void as102_usb_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index b3a09d3ac7d2..1e246b47766d 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -250,7 +250,7 @@ static void au0828_media_graph_notify(struct media_entity *new,
create_link:
if (decoder && mixer) {
- ret = media_get_pad_index(decoder, false,
+ ret = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_AUDIO);
if (ret >= 0)
ret = media_create_pad_link(decoder, ret,
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 62ee09f28a0b..2dcbb49d66da 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -202,7 +202,8 @@ static int az6007_rc_query(struct dvb_usb_device *d)
unsigned code;
enum rc_proto proto;
- az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10);
+ if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0)
+ return -EIO;
if (st->data[1] == 0x44)
return 0;
@@ -248,7 +249,7 @@ static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
@@ -290,7 +291,7 @@ static int az6007_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
int address,
u8 value)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
@@ -321,7 +322,7 @@ static int az6007_ci_read_cam_control(struct dvb_ca_en50221 *ca,
int slot,
u8 address)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
@@ -367,7 +368,7 @@ static int az6007_ci_write_cam_control(struct dvb_ca_en50221 *ca,
u8 address,
u8 value)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
@@ -398,7 +399,7 @@ failed:
static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
int ret;
u8 req;
@@ -429,7 +430,7 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
static int az6007_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret, i;
@@ -485,7 +486,7 @@ static int az6007_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
static int az6007_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
@@ -514,7 +515,7 @@ failed:
static int az6007_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
struct az6007_device_state *state = d_to_priv(d);
int ret;
u8 req;
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 9d6fa0556d7b..404e56b32145 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1412,8 +1412,7 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
static void af9005_fe_release(struct dvb_frontend *fe)
{
- struct af9005_fe_state *state =
- (struct af9005_fe_state *)fe->demodulator_priv;
+ struct af9005_fe_state *state = fe->demodulator_priv;
kfree(state);
}
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index a31c6f82f4e9..2bc27710427d 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -407,8 +407,8 @@ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
@@ -449,8 +449,8 @@ static int az6027_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
int address,
u8 value)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
@@ -480,8 +480,8 @@ static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
int slot,
u8 address)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
@@ -526,8 +526,8 @@ static int az6027_ci_write_cam_control(struct dvb_ca_en50221 *ca,
u8 address,
u8 value)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
@@ -557,7 +557,7 @@ failed:
static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
+ struct dvb_usb_device *d = ca->data;
int ret;
u8 req;
@@ -588,8 +588,8 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
static int az6027_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret, i;
u8 req;
@@ -644,8 +644,8 @@ static int az6027_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
static int az6027_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
@@ -673,8 +673,8 @@ failed:
static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct az6027_device_state *state = d->priv;
int ret;
u8 req;
u16 value;
@@ -719,7 +719,7 @@ static void az6027_ci_uninit(struct dvb_usb_device *d)
if (NULL == d)
return;
- state = (struct az6027_device_state *)d->priv;
+ state = d->priv;
if (NULL == state)
return;
@@ -735,7 +735,7 @@ static void az6027_ci_uninit(struct dvb_usb_device *d)
static int az6027_ci_init(struct dvb_usb_adapter *a)
{
struct dvb_usb_device *d = a->dev;
- struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
+ struct az6027_device_state *state = d->priv;
int ret;
deb_info("%s", __func__);
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index 9f83560ba63d..586afe22d817 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -195,7 +195,7 @@ static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
static void dtt200u_fe_release(struct dvb_frontend* fe)
{
- struct dtt200u_fe_state *state = (struct dtt200u_fe_state*) fe->demodulator_priv;
+ struct dtt200u_fe_state *state = fe->demodulator_priv;
kfree(state);
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 8747960e6146..970b84c3f0b5 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -830,7 +830,7 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
for (i = 0; i < 256; i++) {
if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) {
err("read eeprom failed.");
- return -1;
+ return -EIO;
} else {
eepromline[i%16] = ibuf[0];
eeprom[i] = ibuf[0];
@@ -869,7 +869,7 @@ static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2);
if (ret != 2) {
err("read eeprom failed.");
- return -1;
+ return -EIO;
} else {
eepromline[i % 16] = ibuf[0];
eeprom[i] = ibuf[0];
@@ -903,7 +903,7 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
{
- struct dw2102_state *state = (struct dw2102_state *)d->priv;
+ struct dw2102_state *state = d->priv;
int ret = 0;
info("%s: %d, initialized %d", __func__, i, state->initialized);
@@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
for (i = 0; i < 6; i++) {
obuf[1] = 0xf0 + i;
if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
- return -1;
+ return -EIO;
else
mac[i] = ibuf[0];
}
@@ -978,8 +978,7 @@ static int dw210x_set_voltage(struct dvb_frontend *fe,
.len = 2,
};
- struct dvb_usb_adapter *udev_adap =
- (struct dvb_usb_adapter *)(fe->dvb->priv);
+ struct dvb_usb_adapter *udev_adap = fe->dvb->priv;
if (voltage == SEC_VOLTAGE_18)
msg.buf = command_18v;
else if (voltage == SEC_VOLTAGE_13)
@@ -993,9 +992,8 @@ static int dw210x_set_voltage(struct dvb_frontend *fe,
static int s660_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct dvb_usb_adapter *d =
- (struct dvb_usb_adapter *)(fe->dvb->priv);
- struct dw2102_state *st = (struct dw2102_state *)d->dev->priv;
+ struct dvb_usb_adapter *d = fe->dvb->priv;
+ struct dw2102_state *st = d->dev->priv;
dw210x_set_voltage(fe, voltage);
if (st->old_set_voltage)
@@ -1014,8 +1012,7 @@ static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon)
.buf = led_off,
.len = 1
};
- struct dvb_usb_adapter *udev_adap =
- (struct dvb_usb_adapter *)(fe->dvb->priv);
+ struct dvb_usb_adapter *udev_adap = fe->dvb->priv;
if (offon)
msg.buf = led_on;
@@ -1025,9 +1022,8 @@ static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon)
static int tt_s2_4600_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
- struct dvb_usb_adapter *d =
- (struct dvb_usb_adapter *)(fe->dvb->priv);
- struct dw2102_state *st = (struct dw2102_state *)d->dev->priv;
+ struct dvb_usb_adapter *d = fe->dvb->priv;
+ struct dw2102_state *st = d->dev->priv;
int ret;
ret = st->fe_read_status(fe, status);
@@ -2576,7 +2572,7 @@ static int dw2102_probe(struct usb_interface *intf,
static void dw2102_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- struct dw2102_state *st = (struct dw2102_state *)d->priv;
+ struct dw2102_state *st = d->priv;
struct i2c_client *client;
/* remove I2C client for tuner */
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 0da86f58aff6..98b2177667d2 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -172,8 +172,7 @@ static int opera1_set_voltage(struct dvb_frontend *fe,
struct i2c_msg msg[] = {
{.addr = ADDR_B600_VOLTAGE_13V,.flags = 0,.buf = command_13v,.len = 1},
};
- struct dvb_usb_adapter *udev_adap =
- (struct dvb_usb_adapter *)(fe->dvb->priv);
+ struct dvb_usb_adapter *udev_adap = fe->dvb->priv;
if (voltage == SEC_VOLTAGE_18) {
msg[0].addr = ADDR_B601_VOLTAGE_18V;
msg[0].buf = command_18v;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index da42c989e071..2aab49003493 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -108,7 +108,7 @@ struct pctv452e_state {
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct pctv452e_state *state = d->priv;
u8 *buf;
u8 id;
unsigned int rlen;
@@ -159,8 +159,8 @@ static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca,
u8 cmd, u8 *data, unsigned int write_len,
unsigned int read_len)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct pctv452e_state *state = d->priv;
int ret;
mutex_lock(&state->ca_mutex);
@@ -292,8 +292,8 @@ static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
- struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct dvb_usb_device *d = ca->data;
+ struct pctv452e_state *state = d->priv;
u8 buf[1];
int ret;
@@ -361,7 +361,7 @@ static void tt3650_ci_uninit(struct dvb_usb_device *d)
if (NULL == d)
return;
- state = (struct pctv452e_state *)d->priv;
+ state = d->priv;
if (NULL == state)
return;
@@ -379,7 +379,7 @@ static void tt3650_ci_uninit(struct dvb_usb_device *d)
static int tt3650_ci_init(struct dvb_usb_adapter *a)
{
struct dvb_usb_device *d = a->dev;
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct pctv452e_state *state = d->priv;
int ret;
ci_dbg("%s", __func__);
@@ -417,7 +417,7 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
const u8 *snd_buf, u8 snd_len,
u8 *rcv_buf, u8 rcv_len)
{
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct pctv452e_state *state = d->priv;
u8 *buf;
u8 id;
int ret;
@@ -516,7 +516,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct pctv452e_state *state = d->priv;
u8 *b0, *rx;
int ret;
@@ -567,7 +567,7 @@ ret:
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
- struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ struct pctv452e_state *state = d->priv;
u8 *b, *rx;
int ret, i;
u8 id;
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 29dfcc6d0b0a..db1fab96d529 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -620,7 +620,7 @@ static struct i2c_driver s2250_driver = {
.driver = {
.name = "s2250",
},
- .probe_new = s2250_probe,
+ .probe = s2250_probe,
.remove = s2250_remove,
.id_table = s2250_id,
};
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 6f443c542c6d..640737d3b8ae 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -179,7 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
for (i = 0; i < MAX_URBS; i++) {
usb_kill_urb(&dev->surbs[i].urb);
- cancel_work_sync(&dev->surbs[i].wq);
+ if (dev->surbs[i].wq.func)
+ cancel_work_sync(&dev->surbs[i].wq);
if (dev->surbs[i].cb) {
smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
diff --git a/drivers/media/usb/stk1160/Kconfig b/drivers/media/usb/stk1160/Kconfig
index 4f50fb7db7b9..bf7c16baa9f8 100644
--- a/drivers/media/usb/stk1160/Kconfig
+++ b/drivers/media/usb/stk1160/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-config VIDEO_STK1160_COMMON
+config VIDEO_STK1160
tristate "STK1160 USB video capture support"
depends on VIDEO_DEV && I2C
-
+ select VIDEOBUF2_VMALLOC
+ select VIDEO_SAA711X
help
This is a video4linux driver for STK1160 based video capture devices.
@@ -12,10 +13,3 @@ config VIDEO_STK1160_COMMON
This driver only provides support for video capture. For audio
capture, you need to select the snd-usb-audio driver (i.e.
CONFIG_SND_USB_AUDIO).
-
-config VIDEO_STK1160
- tristate
- depends on VIDEO_STK1160_COMMON
- default y
- select VIDEOBUF2_VMALLOC
- select VIDEO_SAA711X
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index c4474d4c44e2..79faa2560613 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -1128,7 +1128,7 @@ static int ttusb_dec_stop_sec_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct ttusb_dec *dec = dvbdmxfeed->demux->priv;
u8 b0[] = { 0x00, 0x00 };
- struct filter_info *finfo = (struct filter_info *)dvbdmxfeed->priv;
+ struct filter_info *finfo = dvbdmxfeed->priv;
unsigned long flags;
b0[1] = finfo->stream_id;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d631ce4f9f7b..08fcd2ffa727 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -184,7 +184,7 @@ static void uvc_stream_delete(struct uvc_streaming *stream)
usb_put_intf(stream->intf);
- kfree(stream->format);
+ kfree(stream->formats);
kfree(stream->header.bmaControls);
kfree(stream);
}
@@ -221,7 +221,8 @@ static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
static int uvc_parse_format(struct uvc_device *dev,
struct uvc_streaming *streaming, struct uvc_format *format,
- u32 **intervals, unsigned char *buffer, int buflen)
+ struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer,
+ int buflen)
{
struct usb_interface *intf = streaming->intf;
struct usb_host_interface *alts = intf->cur_altsetting;
@@ -235,6 +236,7 @@ static int uvc_parse_format(struct uvc_device *dev,
format->type = buffer[2];
format->index = buffer[3];
+ format->frames = frames;
switch (buffer[2]) {
case UVC_VS_FORMAT_UNCOMPRESSED:
@@ -339,8 +341,8 @@ static int uvc_parse_format(struct uvc_device *dev,
ftype = 0;
/* Create a dummy frame descriptor. */
- frame = &format->frame[0];
- memset(&format->frame[0], 0, sizeof(format->frame[0]));
+ frame = &frames[0];
+ memset(frame, 0, sizeof(*frame));
frame->bFrameIntervalType = 1;
frame->dwDefaultFrameInterval = 1;
frame->dwFrameInterval = *intervals;
@@ -370,7 +372,9 @@ static int uvc_parse_format(struct uvc_device *dev,
*/
while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
buffer[2] == ftype) {
- frame = &format->frame[format->nframes];
+ unsigned int maxIntervalIndex;
+
+ frame = &frames[format->nframes];
if (ftype != UVC_VS_FRAME_FRAME_BASED)
n = buflen > 25 ? buffer[25] : 0;
else
@@ -405,8 +409,27 @@ static int uvc_parse_format(struct uvc_device *dev,
get_unaligned_le32(&buffer[17]);
frame->bFrameIntervalType = buffer[21];
}
+
+ /*
+ * Copy the frame intervals.
+ *
+ * Some bogus devices report dwMinFrameInterval equal to
+ * dwMaxFrameInterval and have dwFrameIntervalStep set to
+ * zero. Setting all null intervals to 1 fixes the problem and
+ * some other divisions by zero that could happen.
+ */
frame->dwFrameInterval = *intervals;
+ for (i = 0; i < n; ++i) {
+ interval = get_unaligned_le32(&buffer[26+4*i]);
+ (*intervals)[i] = interval ? interval : 1;
+ }
+
+ /*
+ * Apply more fixes, quirks and workarounds to handle incorrect
+ * or broken descriptors.
+ */
+
/*
* Several UVC chipsets screw up dwMaxVideoFrameBufferSize
* completely. Observed behaviours range from setting the
@@ -421,30 +444,25 @@ static int uvc_parse_format(struct uvc_device *dev,
* frame->wWidth * frame->wHeight / 8;
/*
- * Some bogus devices report dwMinFrameInterval equal to
- * dwMaxFrameInterval and have dwFrameIntervalStep set to
- * zero. Setting all null intervals to 1 fixes the problem and
- * some other divisions by zero that could happen.
+ * Clamp the default frame interval to the boundaries. A zero
+ * bFrameIntervalType value indicates a continuous frame
+ * interval range, with dwFrameInterval[0] storing the minimum
+ * value and dwFrameInterval[1] storing the maximum value.
*/
- for (i = 0; i < n; ++i) {
- interval = get_unaligned_le32(&buffer[26+4*i]);
- *(*intervals)++ = interval ? interval : 1;
- }
+ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1;
+ frame->dwDefaultFrameInterval =
+ clamp(frame->dwDefaultFrameInterval,
+ frame->dwFrameInterval[0],
+ frame->dwFrameInterval[maxIntervalIndex]);
/*
- * Make sure that the default frame interval stays between
- * the boundaries.
+ * Some devices report frame intervals that are not functional.
+ * If the corresponding quirk is set, restrict operation to the
+ * first interval only.
*/
- n -= frame->bFrameIntervalType ? 1 : 2;
- frame->dwDefaultFrameInterval =
- min(frame->dwFrameInterval[n],
- max(frame->dwFrameInterval[0],
- frame->dwDefaultFrameInterval));
-
if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
frame->bFrameIntervalType = 1;
- frame->dwFrameInterval[0] =
- frame->dwDefaultFrameInterval;
+ (*intervals)[0] = frame->dwDefaultFrameInterval;
}
uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
@@ -453,6 +471,8 @@ static int uvc_parse_format(struct uvc_device *dev,
(100000000 / frame->dwDefaultFrameInterval) % 10);
format->nframes++;
+ *intervals += n;
+
buflen -= buffer[0];
buffer += buffer[0];
}
@@ -493,7 +513,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
struct uvc_format *format;
struct uvc_frame *frame;
struct usb_host_interface *alts = &intf->altsetting[0];
- unsigned char *_buffer, *buffer = alts->extra;
+ const unsigned char *_buffer, *buffer = alts->extra;
int _buflen, buflen = alts->extralen;
unsigned int nformats = 0, nframes = 0, nintervals = 0;
unsigned int size, i, n, p;
@@ -677,7 +697,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
frame = (struct uvc_frame *)&format[nformats];
interval = (u32 *)&frame[nframes];
- streaming->format = format;
+ streaming->formats = format;
streaming->nformats = 0;
/* Parse the format descriptors. */
@@ -687,8 +707,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
case UVC_VS_FORMAT_MJPEG:
case UVC_VS_FORMAT_DV:
case UVC_VS_FORMAT_FRAME_BASED:
- format->frame = frame;
- ret = uvc_parse_format(dev, streaming, format,
+ ret = uvc_parse_format(dev, streaming, format, frame,
&interval, buffer, buflen);
if (ret < 0)
goto error;
@@ -1147,7 +1166,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
static int uvc_parse_control(struct uvc_device *dev)
{
struct usb_host_interface *alts = dev->intf->cur_altsetting;
- unsigned char *buffer = alts->extra;
+ const unsigned char *buffer = alts->extra;
int buflen = alts->extralen;
int ret;
@@ -3011,15 +3030,33 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
- /* Acer EasyCamera */
+ /* Intel D410/ASR depth camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x5986,
- .idProduct = 0x1180,
+ .idVendor = 0x8086,
+ .idProduct = 0x0ad2,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
- .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D415/ASRC depth camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0ad3,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D430/AWG depth camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0ad4,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
/* Intel RealSense D4M */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3029,6 +3066,42 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D435/AWGC depth camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0b07,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D435i depth camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0b3a,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D405 Depth Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0b5b,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D455 Depth Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0b5c,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 35453f81c1d9..5ac2a424b13d 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -161,7 +161,7 @@ free_map:
* the Video Probe and Commit negotiation, but some hardware don't implement
* that feature.
*/
-static u32 uvc_try_frame_interval(struct uvc_frame *frame, u32 interval)
+static u32 uvc_try_frame_interval(const struct uvc_frame *frame, u32 interval)
{
unsigned int i;
@@ -210,10 +210,11 @@ static u32 uvc_v4l2_get_bytesperline(const struct uvc_format *format,
static int uvc_v4l2_try_format(struct uvc_streaming *stream,
struct v4l2_format *fmt, struct uvc_streaming_control *probe,
- struct uvc_format **uvc_format, struct uvc_frame **uvc_frame)
+ const struct uvc_format **uvc_format,
+ const struct uvc_frame **uvc_frame)
{
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
+ const struct uvc_format *format = NULL;
+ const struct uvc_frame *frame = NULL;
u16 rw, rh;
unsigned int d, maxd;
unsigned int i;
@@ -235,7 +236,7 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
* format otherwise.
*/
for (i = 0; i < stream->nformats; ++i) {
- format = &stream->format[i];
+ format = &stream->formats[i];
if (format->fcc == fmt->fmt.pix.pixelformat)
break;
}
@@ -255,14 +256,14 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
maxd = (unsigned int)-1;
for (i = 0; i < format->nframes; ++i) {
- u16 w = format->frame[i].wWidth;
- u16 h = format->frame[i].wHeight;
+ u16 w = format->frames[i].wWidth;
+ u16 h = format->frames[i].wHeight;
d = min(w, rw) * min(h, rh);
d = w*h + rw*rh - 2*d;
if (d < maxd) {
maxd = d;
- frame = &format->frame[i];
+ frame = &format->frames[i];
}
if (maxd == 0)
@@ -319,8 +320,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
* accepted the requested format as-is.
*/
for (i = 0; i < stream->nformats; ++i) {
- if (probe->bFormatIndex == stream->format[i].index) {
- format = &stream->format[i];
+ if (probe->bFormatIndex == stream->formats[i].index) {
+ format = &stream->formats[i];
break;
}
}
@@ -331,8 +332,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
probe->bFormatIndex);
for (i = 0; i < format->nframes; ++i) {
- if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
- frame = &format->frame[i];
+ if (probe->bFrameIndex == format->frames[i].bFrameIndex) {
+ frame = &format->frames[i];
break;
}
}
@@ -363,8 +364,8 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
static int uvc_v4l2_get_format(struct uvc_streaming *stream,
struct v4l2_format *fmt)
{
- struct uvc_format *format;
- struct uvc_frame *frame;
+ const struct uvc_format *format;
+ const struct uvc_frame *frame;
int ret = 0;
if (fmt->type != stream->type)
@@ -398,8 +399,8 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
struct v4l2_format *fmt)
{
struct uvc_streaming_control probe;
- struct uvc_format *format;
- struct uvc_frame *frame;
+ const struct uvc_format *format;
+ const struct uvc_frame *frame;
int ret;
if (fmt->type != stream->type)
@@ -465,8 +466,8 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
{
struct uvc_streaming_control probe;
struct v4l2_fract timeperframe;
- struct uvc_format *format;
- struct uvc_frame *frame;
+ const struct uvc_format *format;
+ const struct uvc_frame *frame;
u32 interval, maxd;
unsigned int i;
int ret;
@@ -501,19 +502,19 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
for (i = 0; i < format->nframes && maxd != 0; i++) {
u32 d, ival;
- if (&format->frame[i] == stream->cur_frame)
+ if (&format->frames[i] == stream->cur_frame)
continue;
- if (format->frame[i].wWidth != stream->cur_frame->wWidth ||
- format->frame[i].wHeight != stream->cur_frame->wHeight)
+ if (format->frames[i].wWidth != stream->cur_frame->wWidth ||
+ format->frames[i].wHeight != stream->cur_frame->wHeight)
continue;
- ival = uvc_try_frame_interval(&format->frame[i], interval);
+ ival = uvc_try_frame_interval(&format->frames[i], interval);
d = abs((s32)ival - interval);
if (d >= maxd)
continue;
- frame = &format->frame[i];
+ frame = &format->frames[i];
probe.bFrameIndex = frame->bFrameIndex;
probe.dwFrameInterval = ival;
maxd = d;
@@ -697,7 +698,7 @@ static int uvc_ioctl_querycap(struct file *file, void *fh,
static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
struct v4l2_fmtdesc *fmt)
{
- struct uvc_format *format;
+ const struct uvc_format *format;
enum v4l2_buf_type type = fmt->type;
u32 index = fmt->index;
@@ -708,7 +709,7 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
fmt->index = index;
fmt->type = type;
- format = &stream->format[fmt->index];
+ format = &stream->formats[fmt->index];
fmt->flags = 0;
if (format->flags & UVC_FMT_FLAG_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
@@ -1249,15 +1250,15 @@ static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
struct uvc_streaming *stream = handle->stream;
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
+ const struct uvc_format *format = NULL;
+ const struct uvc_frame *frame = NULL;
unsigned int index;
unsigned int i;
/* Look for the given pixel format */
for (i = 0; i < stream->nformats; i++) {
- if (stream->format[i].fcc == fsize->pixel_format) {
- format = &stream->format[i];
+ if (stream->formats[i].fcc == fsize->pixel_format) {
+ format = &stream->formats[i];
break;
}
}
@@ -1266,10 +1267,10 @@ static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
/* Skip duplicate frame sizes */
for (i = 0, index = 0; i < format->nframes; i++) {
- if (frame && frame->wWidth == format->frame[i].wWidth &&
- frame->wHeight == format->frame[i].wHeight)
+ if (frame && frame->wWidth == format->frames[i].wWidth &&
+ frame->wHeight == format->frames[i].wHeight)
continue;
- frame = &format->frame[i];
+ frame = &format->frames[i];
if (index == fsize->index)
break;
index++;
@@ -1289,16 +1290,16 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
struct uvc_streaming *stream = handle->stream;
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
+ const struct uvc_format *format = NULL;
+ const struct uvc_frame *frame = NULL;
unsigned int nintervals;
unsigned int index;
unsigned int i;
/* Look for the given pixel format and frame size */
for (i = 0; i < stream->nformats; i++) {
- if (stream->format[i].fcc == fival->pixel_format) {
- format = &stream->format[i];
+ if (stream->formats[i].fcc == fival->pixel_format) {
+ format = &stream->formats[i];
break;
}
}
@@ -1307,9 +1308,9 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
index = fival->index;
for (i = 0; i < format->nframes; i++) {
- if (format->frame[i].wWidth == fival->width &&
- format->frame[i].wHeight == fival->height) {
- frame = &format->frame[i];
+ if (format->frames[i].wWidth == fival->width &&
+ format->frames[i].wHeight == fival->height) {
+ frame = &format->frames[i];
nintervals = frame->bFrameIntervalType ?: 1;
if (index < nintervals)
break;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index d4b023d4de7c..28dde08ec6c5 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -137,8 +137,8 @@ static const struct usb_device_id elgato_cam_link_4k = {
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
+ const struct uvc_format *format = NULL;
+ const struct uvc_frame *frame = NULL;
unsigned int i;
/*
@@ -166,8 +166,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
}
for (i = 0; i < stream->nformats; ++i) {
- if (stream->format[i].index == ctrl->bFormatIndex) {
- format = &stream->format[i];
+ if (stream->formats[i].index == ctrl->bFormatIndex) {
+ format = &stream->formats[i];
break;
}
}
@@ -176,8 +176,8 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
return;
for (i = 0; i < format->nframes; ++i) {
- if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
- frame = &format->frame[i];
+ if (format->frames[i].bFrameIndex == ctrl->bFrameIndex) {
+ frame = &format->frames[i];
break;
}
}
@@ -2100,8 +2100,8 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
int uvc_video_init(struct uvc_streaming *stream)
{
struct uvc_streaming_control *probe = &stream->ctrl;
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
+ const struct uvc_format *format = NULL;
+ const struct uvc_frame *frame = NULL;
struct uvc_urb *uvc_urb;
unsigned int i;
int ret;
@@ -2161,7 +2161,7 @@ int uvc_video_init(struct uvc_streaming *stream)
* available format otherwise.
*/
for (i = stream->nformats; i > 0; --i) {
- format = &stream->format[i-1];
+ format = &stream->formats[i-1];
if (format->index == probe->bFormatIndex)
break;
}
@@ -2179,7 +2179,7 @@ int uvc_video_init(struct uvc_streaming *stream)
* descriptor is not found, use the first available frame.
*/
for (i = format->nframes; i > 0; --i) {
- frame = &format->frame[i-1];
+ frame = &format->frames[i-1];
if (frame->bFrameIndex == probe->bFrameIndex)
break;
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 9a596c8d894a..6fb0a78b1b00 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -251,7 +251,7 @@ struct uvc_frame {
u32 dwMaxVideoFrameBufferSize;
u8 bFrameIntervalType;
u32 dwDefaultFrameInterval;
- u32 *dwFrameInterval;
+ const u32 *dwFrameInterval;
};
struct uvc_format {
@@ -265,7 +265,7 @@ struct uvc_format {
u32 flags;
unsigned int nframes;
- struct uvc_frame *frame;
+ const struct uvc_frame *frames;
};
struct uvc_streaming_header {
@@ -438,12 +438,12 @@ struct uvc_streaming {
enum v4l2_buf_type type;
unsigned int nformats;
- struct uvc_format *format;
+ const struct uvc_format *formats;
struct uvc_streaming_control ctrl;
- struct uvc_format *def_format;
- struct uvc_format *cur_format;
- struct uvc_frame *cur_frame;
+ const struct uvc_format *def_format;
+ const struct uvc_format *cur_format;
+ const struct uvc_frame *cur_frame;
/*
* Protect access to ctrl, cur_format, cur_frame and hardware video
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 1c0d23c52203..5687089bea6e 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1411,7 +1411,7 @@ static struct i2c_driver tuner_driver = {
.name = "tuner",
.pm = &tuner_pm_ops,
},
- .probe_new = tuner_probe,
+ .probe = tuner_probe,
.remove = tuner_remove,
.command = tuner_command,
.id_table = tuner_id,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 3c5ab5ecd678..bee1535b04d3 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -235,91 +235,93 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
- { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
- { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* YUV planar formats */
- { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 },
-
- { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* Tiled YUV formats */
- { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
+ { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
/* YUV planar formats, non contiguous variant */
- { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
/* Bayer RGB formats */
- { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
@@ -379,7 +381,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
if (info->mem_planes == 1) {
plane = &pixfmt->plane_fmt[0];
- plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
+ plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
plane->sizeimage = 0;
for (i = 0; i < info->comp_planes; i++) {
@@ -393,7 +395,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
plane->sizeimage += info->bpp[i] *
DIV_ROUND_UP(aligned_width, hdiv) *
- DIV_ROUND_UP(aligned_height, vdiv);
+ DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
}
} else {
for (i = 0; i < info->comp_planes; i++) {
@@ -407,7 +409,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
plane = &pixfmt->plane_fmt[i];
plane->bytesperline =
- info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv);
+ info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
plane->sizeimage =
plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
}
@@ -433,7 +435,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
pixfmt->width = width;
pixfmt->height = height;
pixfmt->pixelformat = pixelformat;
- pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
+ pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
pixfmt->sizeimage = 0;
for (i = 0; i < info->comp_planes; i++) {
@@ -447,7 +449,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
pixfmt->sizeimage += info->bpp[i] *
DIV_ROUND_UP(aligned_width, hdiv) *
- DIV_ROUND_UP(aligned_height, vdiv);
+ DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
}
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 29169170880a..a662fb60f73f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -111,6 +111,7 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_vp9_frame *p_vp9_frame;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
void *p = ptr.p + idx * ctrl->elem_size;
if (ctrl->p_def.p_const)
@@ -157,6 +158,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ p_av1_sequence = p;
+ p_av1_sequence->bit_depth = 8;
+ break;
case V4L2_CTRL_TYPE_FWHT_PARAMS:
p_fwht_params = p;
p_fwht_params->version = V4L2_FWHT_VERSION;
@@ -350,6 +355,19 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
pr_cont("HEVC_DECODE_PARAMS");
break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ pr_cont("AV1_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ pr_cont("AV1_TILE_GROUP_ENTRY");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ pr_cont("AV1_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ pr_cont("AV1_FILM_GRAIN");
+ break;
+
default:
pr_cont("unknown type %d", ctrl->type);
break;
@@ -547,6 +565,231 @@ validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
return 0;
}
+static int validate_av1_quantization(struct v4l2_av1_quantization *q)
+{
+ if (q->flags > GENMASK(2, 0))
+ return -EINVAL;
+
+ if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 ||
+ q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 ||
+ q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 ||
+ q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 ||
+ q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 ||
+ q->delta_q_res > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (q->qm_y > GENMASK(3, 0) ||
+ q->qm_u > GENMASK(3, 0) ||
+ q->qm_v > GENMASK(3, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_segmentation(struct v4l2_av1_segmentation *s)
+{
+ u32 i;
+ u32 j;
+
+ if (s->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) {
+ static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 };
+ static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0};
+
+ for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) {
+ s32 limit = segmentation_feature_max[j];
+
+ if (segmentation_feature_signed[j]) {
+ if (s->feature_data[i][j] < -limit ||
+ s->feature_data[i][j] > limit)
+ return -EINVAL;
+ } else {
+ if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit)
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf)
+{
+ u32 i;
+
+ if (lf->flags > GENMASK(3, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->level); i++) {
+ if (lf->level[i] > GENMASK(5, 0))
+ return -EINVAL;
+ }
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
+ if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
+ if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_cdef(struct v4l2_av1_cdef *cdef)
+{
+ u32 i;
+
+ if (cdef->damping_minus_3 > GENMASK(1, 0) ||
+ cdef->bits > GENMASK(1, 0))
+ return -EINVAL;
+
+ for (i = 0; i < 1 << cdef->bits; i++) {
+ if (cdef->y_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->y_sec_strength[i] > 4 ||
+ cdef->uv_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->uv_sec_strength[i] > 4)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr)
+{
+ if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg)
+{
+ u32 i;
+
+ if (fg->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ if (fg->film_grain_params_ref_idx > GENMASK(2, 0) ||
+ fg->num_y_points > 14 ||
+ fg->num_cb_points > 10 ||
+ fg->num_cr_points > GENMASK(3, 0) ||
+ fg->grain_scaling_minus_8 > GENMASK(1, 0) ||
+ fg->ar_coeff_lag > GENMASK(1, 0) ||
+ fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) ||
+ fg->grain_scale_shift > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN))
+ return 0;
+
+ for (i = 1; i < fg->num_y_points; i++)
+ if (fg->point_y_value[i] <= fg->point_y_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cb_points; i++)
+ if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cr_points; i++)
+ if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f)
+{
+ int ret = 0;
+
+ ret = validate_av1_quantization(&f->quantization);
+ if (ret)
+ return ret;
+ ret = validate_av1_segmentation(&f->segmentation);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_filter(&f->loop_filter);
+ if (ret)
+ return ret;
+ ret = validate_av1_cdef(&f->cdef);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_restauration(&f->loop_restoration);
+ if (ret)
+ return ret;
+
+ if (f->flags &
+ ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME |
+ V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME |
+ V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE |
+ V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE |
+ V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS |
+ V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV |
+ V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC |
+ V4L2_AV1_FRAME_FLAG_USE_SUPERRES |
+ V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV |
+ V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE |
+ V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS |
+ V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF |
+ V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION |
+ V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT |
+ V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE |
+ V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING))
+ return -EINVAL;
+
+ if (f->superres_denom > GENMASK(2, 0) + 9)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s)
+{
+ if (s->flags &
+ ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE |
+ V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION |
+ V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME |
+ V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y |
+ V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT |
+ V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q))
+ return -EINVAL;
+
+ if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME)
+ return -EINVAL;
+
+ /* reserved */
+ if (s->seq_profile > 2)
+ return -EINVAL;
+
+ /* TODO: PROFILES */
+ return 0;
+}
+
/*
* Compound controls validation requires setting unused fields/flags to zero
* in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's
@@ -911,6 +1154,14 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_VP9_FRAME:
return validate_vp9_frame(p);
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ return validate_av1_frame(p);
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ return validate_av1_sequence(p);
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ return validate_av1_film_grain(p);
case V4L2_CTRL_TYPE_AREA:
area = p;
@@ -1602,6 +1853,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_VP9_FRAME:
elem_size = sizeof(struct v4l2_ctrl_vp9_frame);
break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_av1_sequence);
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_av1_frame);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ elem_size = sizeof(struct v4l2_ctrl_av1_film_grain);
+ break;
case V4L2_CTRL_TYPE_AREA:
elem_size = sizeof(struct v4l2_area);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 564fedee2c88..8696eb1cdd61 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -499,6 +499,40 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
NULL,
};
+ static const char * const av1_profile[] = {
+ "Main",
+ "High",
+ "Professional",
+ NULL,
+ };
+ static const char * const av1_level[] = {
+ "2.0",
+ "2.1",
+ "2.2",
+ "2.3",
+ "3.0",
+ "3.1",
+ "3.2",
+ "3.3",
+ "4.0",
+ "4.1",
+ "4.2",
+ "4.3",
+ "5.0",
+ "5.1",
+ "5.2",
+ "5.3",
+ "6.0",
+ "6.1",
+ "6.2",
+ "6.3",
+ "7.0",
+ "7.1",
+ "7.2",
+ "7.3",
+ NULL,
+ };
+
static const char * const hevc_profile[] = {
"Main",
"Main Still Picture",
@@ -704,6 +738,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ return av1_profile;
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
+ return av1_level;
case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
return hevc_decode_mode;
case V4L2_CID_STATELESS_HEVC_START_CODE:
@@ -1004,6 +1042,10 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ /* AV1 controls */
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile";
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level";
+
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
@@ -1190,6 +1232,10 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode";
case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code";
case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets";
+ case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters";
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry";
+ case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters";
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain";
/* Colorimetry controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1365,6 +1411,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
case V4L2_CID_STATELESS_HEVC_START_CODE:
case V4L2_CID_STATELESS_H264_DECODE_MODE:
@@ -1531,6 +1579,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_STATELESS_VP9_FRAME:
*type = V4L2_CTRL_TYPE_VP9_FRAME;
break;
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_AV1_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY:
+ *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_AV1_FRAME:
+ *type = V4L2_CTRL_TYPE_AV1_FRAME;
+ break;
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN:
+ *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN;
+ break;
case V4L2_CID_UNIT_CELL_SIZE:
*type = V4L2_CTRL_TYPE_AREA;
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index a858acea6547..01ba27f2ef87 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1356,6 +1356,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
+ case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break;
@@ -1506,6 +1507,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break;
case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break;
case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break;
+ case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
default:
if (fmt->description[0])
return;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 22fe08fce0a9..52d349e72b8c 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -105,9 +105,11 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
/* Link the tuner and IF video output pads */
if (tuner) {
if (if_vid) {
- pad_source = media_get_pad_index(tuner, false,
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
- pad_sink = media_get_pad_index(if_vid, true,
+ pad_sink = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
@@ -122,9 +124,11 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
return ret;
}
- pad_source = media_get_pad_index(if_vid, false,
+ pad_source = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
- pad_sink = media_get_pad_index(decoder, true,
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
@@ -139,9 +143,11 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
return ret;
}
} else {
- pad_source = media_get_pad_index(tuner, false,
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_ANALOG);
- pad_sink = media_get_pad_index(decoder, true,
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
@@ -156,9 +162,11 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
}
if (if_aud) {
- pad_source = media_get_pad_index(tuner, false,
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
PAD_SIGNAL_AUDIO);
- pad_sink = media_get_pad_index(if_aud, true,
+ pad_sink = media_get_pad_index(if_aud,
+ MEDIA_PAD_FL_SINK,
PAD_SIGNAL_AUDIO);
if (pad_source < 0 || pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
@@ -180,7 +188,8 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
/* Create demod to V4L, VBI and SDR radio links */
if (io_v4l) {
- pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
return -EINVAL;
@@ -195,7 +204,8 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
}
if (io_swradio) {
- pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
return -EINVAL;
@@ -210,7 +220,8 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
}
if (io_vbi) {
- pad_source = media_get_pad_index(decoder, false, PAD_SIGNAL_DV);
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
if (pad_source < 0) {
dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
return -EINVAL;
@@ -231,7 +242,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
case MEDIA_ENT_F_CONN_RF:
if (!tuner)
continue;
- pad_sink = media_get_pad_index(tuner, true,
+ pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
@@ -243,7 +254,8 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
break;
case MEDIA_ENT_F_CONN_SVIDEO:
case MEDIA_ENT_F_CONN_COMPOSITE:
- pad_sink = media_get_pad_index(decoder, true,
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
PAD_SIGNAL_ANALOG);
if (pad_sink < 0) {
dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n");
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
index d60a8cfd63f3..7756b3971244 100644
--- a/drivers/memory/ti-emif-sram-pm.S
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -8,7 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include "emif.h"
#include "ti-emif-asm-offsets.h"
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 4d9b61b92754..300caa067335 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -597,7 +597,7 @@ static struct i2c_driver pm800_driver = {
.name = "88PM800",
.pm = pm_sleep_ptr(&pm80x_pm_ops),
},
- .probe_new = pm800_probe,
+ .probe = pm800_probe,
.remove = pm800_remove,
.id_table = pm80x_id_table,
};
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 352f13cb1481..68417c3c4f5a 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -253,7 +253,7 @@ static struct i2c_driver pm805_driver = {
.name = "88PM805",
.pm = pm_sleep_ptr(&pm80x_pm_ops),
},
- .probe_new = pm805_probe,
+ .probe = pm805_probe,
.remove = pm805_remove,
.id_table = pm80x_id_table,
};
diff --git a/drivers/mfd/88pm80x.c b/drivers/mfd/88pm80x.c
index ac4f08565f29..bbc1a87f0c8f 100644
--- a/drivers/mfd/88pm80x.c
+++ b/drivers/mfd/88pm80x.c
@@ -74,7 +74,6 @@ int pm80x_init(struct i2c_client *client)
chip->irq = client->irq;
chip->dev = &client->dev;
- dev_set_drvdata(chip->dev, chip);
i2c_set_clientdata(chip->client, chip);
ret = regmap_read(chip->regmap, PM80X_CHIP_ID, &val);
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index aabac37c3502..151bf03e772d 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1166,7 +1166,6 @@ static int pm860x_probe(struct i2c_client *client)
chip->client = client;
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
- dev_set_drvdata(chip->dev, chip);
/*
* Both client and companion client shares same platform driver.
@@ -1251,7 +1250,7 @@ static struct i2c_driver pm860x_driver = {
.pm = pm_sleep_ptr(&pm860x_pm_ops),
.of_match_table = pm860x_dt_ids,
},
- .probe_new = pm860x_probe,
+ .probe = pm860x_probe,
.remove = pm860x_remove,
.id_table = pm860x_id_table,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f89f455c130a..6f5b259a6d6a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -266,8 +266,8 @@ config MFD_MADERA_SPI
Support for the Cirrus Logic Madera platform audio SoC
core functionality controlled via SPI.
-config MFD_MAX597X
- tristate "Maxim 597x power switch and monitor"
+config MFD_MAX5970
+ tristate "Maxim 5970/5978 power switch and monitor"
depends on (I2C && OF)
select MFD_SIMPLE_MFD_I2C
help
@@ -784,6 +784,19 @@ config MFD_MAX14577
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MAX77541
+ tristate "Analog Devices MAX77541/77540 PMIC Support"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Say yes here to add support for Analog Devices MAX77541 and
+ MAX77540 Power Management ICs. This driver provides
+ common support for accessing the device; additional drivers
+ must be enabled in order to use the functionality of the device.
+ There are regulators and adc.
+
config MFD_MAX77620
bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 39c461536181..f3d1f1dc73b5 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -154,6 +154,7 @@ obj-$(CONFIG_MFD_DA9063) += da9063.o
obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
+obj-$(CONFIG_MFD_MAX77541) += max77541.o
obj-$(CONFIG_MFD_MAX77620) += max77620.o
obj-$(CONFIG_MFD_MAX77650) += max77650.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index f253da5b246b..2fee62f1016c 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -345,8 +345,6 @@ static int aat2870_i2c_probe(struct i2c_client *client)
return -ENOMEM;
aat2870->dev = &client->dev;
- dev_set_drvdata(aat2870->dev, aat2870);
-
aat2870->client = client;
i2c_set_clientdata(client, aat2870);
@@ -451,7 +449,7 @@ static struct i2c_driver aat2870_i2c_driver = {
.pm = pm_sleep_ptr(&aat2870_pm_ops),
.suppress_bind_attrs = true,
},
- .probe_new = aat2870_i2c_probe,
+ .probe = aat2870_i2c_probe,
.id_table = aat2870_i2c_id_table,
};
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
index 7fd8b9988075..feb757e90dc3 100644
--- a/drivers/mfd/acer-ec-a500.c
+++ b/drivers/mfd/acer-ec-a500.c
@@ -190,7 +190,7 @@ static struct i2c_driver a500_ec_driver = {
.name = "acer-a500-embedded-controller",
.of_match_table = a500_ec_match,
},
- .probe_new = a500_ec_probe,
+ .probe = a500_ec_probe,
.remove = a500_ec_remove,
};
module_i2c_driver(a500_ec_driver);
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index bcf0fda15f0c..2406fcdff5f9 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -70,7 +70,7 @@ static struct i2c_driver act8945a_i2c_driver = {
.name = "act8945a",
.of_match_table = of_match_ptr(act8945a_of_match),
},
- .probe_new = act8945a_i2c_probe,
+ .probe = act8945a_i2c_probe,
.id_table = act8945a_i2c_id,
};
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index cb168efdbafe..bd6f4965ebc8 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -340,7 +340,7 @@ static struct i2c_driver adp5520_driver = {
.pm = pm_sleep_ptr(&adp5520_pm),
.suppress_bind_attrs = true,
},
- .probe_new = adp5520_probe,
+ .probe = adp5520_probe,
.id_table = adp5520_id,
};
builtin_i2c_driver(adp5520_driver);
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 43e393c8608d..9b7183ffc928 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -121,7 +121,7 @@ static struct i2c_driver arizona_i2c_driver = {
.pm = pm_ptr(&arizona_pm_ops),
.of_match_table = of_match_ptr(arizona_i2c_of_match),
},
- .probe_new = arizona_i2c_probe,
+ .probe = arizona_i2c_probe,
.remove = arizona_i2c_remove,
.id_table = arizona_i2c_id,
};
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index 3facfdd28e81..c7e85ff38013 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -201,7 +201,7 @@ static struct i2c_driver as3711_i2c_driver = {
.name = "as3711",
.of_match_table = of_match_ptr(as3711_of_match),
},
- .probe_new = as3711_i2c_probe,
+ .probe = as3711_i2c_probe,
.id_table = as3711_i2c_id,
};
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index b6dda0eb8645..a2bf68afc131 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -445,7 +445,7 @@ static struct i2c_driver as3722_i2c_driver = {
.of_match_table = as3722_of_match,
.pm = &as3722_pm_ops,
},
- .probe_new = as3722_i2c_probe,
+ .probe = as3722_i2c_probe,
.id_table = as3722_i2c_id,
};
diff --git a/drivers/mfd/atc260x-i2c.c b/drivers/mfd/atc260x-i2c.c
index 8e1491167160..fb53b0432a31 100644
--- a/drivers/mfd/atc260x-i2c.c
+++ b/drivers/mfd/atc260x-i2c.c
@@ -53,7 +53,7 @@ static struct i2c_driver atc260x_i2c_driver = {
.name = "atc260x",
.of_match_table = atc260x_i2c_of_match,
},
- .probe_new = atc260x_i2c_probe,
+ .probe = atc260x_i2c_probe,
};
module_i2c_driver(atc260x_i2c_driver);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index a49e5e217554..68d3560cfe4a 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -59,6 +59,7 @@ static void axp20x_i2c_remove(struct i2c_client *i2c)
#ifdef CONFIG_OF
static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp152", .data = (void *)AXP152_ID },
+ { .compatible = "x-powers,axp192", .data = (void *)AXP192_ID },
{ .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
{ .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
@@ -74,6 +75,7 @@ MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp152", 0 },
+ { "axp192", 0 },
{ "axp202", 0 },
{ "axp209", 0 },
{ "axp221", 0 },
@@ -103,7 +105,7 @@ static struct i2c_driver axp20x_i2c_driver = {
.of_match_table = of_match_ptr(axp20x_i2c_of_match),
.acpi_match_table = ACPI_PTR(axp20x_i2c_acpi_match),
},
- .probe_new = axp20x_i2c_probe,
+ .probe = axp20x_i2c_probe,
.remove = axp20x_i2c_remove,
.id_table = axp20x_i2c_id,
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 07a846ecbf18..c03bc5cda080 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -34,6 +34,7 @@
static const char * const axp20x_model_names[] = {
"AXP152",
+ "AXP192",
"AXP202",
"AXP209",
"AXP221",
@@ -94,6 +95,35 @@ static const struct regmap_access_table axp20x_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(axp20x_volatile_ranges),
};
+static const struct regmap_range axp192_writeable_ranges[] = {
+ regmap_reg_range(AXP192_DATACACHE(0), AXP192_DATACACHE(5)),
+ regmap_reg_range(AXP192_PWR_OUT_CTRL, AXP192_IRQ5_STATE),
+ regmap_reg_range(AXP20X_DCDC_MODE, AXP192_N_RSTO_CTRL),
+ regmap_reg_range(AXP20X_CC_CTRL, AXP20X_CC_CTRL),
+};
+
+static const struct regmap_range axp192_volatile_ranges[] = {
+ regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP192_USB_OTG_STATUS),
+ regmap_reg_range(AXP192_IRQ1_STATE, AXP192_IRQ4_STATE),
+ regmap_reg_range(AXP192_IRQ5_STATE, AXP192_IRQ5_STATE),
+ regmap_reg_range(AXP20X_ACIN_V_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
+ regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
+ regmap_reg_range(AXP192_GPIO2_0_STATE, AXP192_GPIO2_0_STATE),
+ regmap_reg_range(AXP192_GPIO4_3_STATE, AXP192_GPIO4_3_STATE),
+ regmap_reg_range(AXP192_N_RSTO_CTRL, AXP192_N_RSTO_CTRL),
+ regmap_reg_range(AXP20X_CHRG_CC_31_24, AXP20X_CC_CTRL),
+};
+
+static const struct regmap_access_table axp192_writeable_table = {
+ .yes_ranges = axp192_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp192_writeable_ranges),
+};
+
+static const struct regmap_access_table axp192_volatile_table = {
+ .yes_ranges = axp192_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp192_volatile_ranges),
+};
+
/* AXP22x ranges are shared with the AXP809, as they cover the same range */
static const struct regmap_range axp22x_writeable_ranges[] = {
regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
@@ -220,6 +250,19 @@ static const struct resource axp152_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
};
+static const struct resource axp192_ac_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_ACIN_PLUGIN, "ACIN_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_ACIN_REMOVAL, "ACIN_REMOVAL"),
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_ACIN_OVER_V, "ACIN_OVER_V"),
+};
+
+static const struct resource axp192_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_VBUS_VALID, "VBUS_VALID"),
+ DEFINE_RES_IRQ_NAMED(AXP192_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
+};
+
static const struct resource axp20x_ac_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_ACIN_PLUGIN, "ACIN_PLUGIN"),
DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_ACIN_REMOVAL, "ACIN_REMOVAL"),
@@ -302,6 +345,15 @@ static const struct regmap_config axp152_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_config axp192_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .wr_table = &axp192_writeable_table,
+ .volatile_table = &axp192_volatile_table,
+ .max_register = AXP20X_CC_CTRL,
+ .cache_type = REGCACHE_RBTREE,
+};
+
static const struct regmap_config axp20x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -379,6 +431,42 @@ static const struct regmap_irq axp152_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP152, GPIO0_INPUT, 2, 0),
};
+static const struct regmap_irq axp192_regmap_irqs[] = {
+ INIT_REGMAP_IRQ(AXP192, ACIN_OVER_V, 0, 7),
+ INIT_REGMAP_IRQ(AXP192, ACIN_PLUGIN, 0, 6),
+ INIT_REGMAP_IRQ(AXP192, ACIN_REMOVAL, 0, 5),
+ INIT_REGMAP_IRQ(AXP192, VBUS_OVER_V, 0, 4),
+ INIT_REGMAP_IRQ(AXP192, VBUS_PLUGIN, 0, 3),
+ INIT_REGMAP_IRQ(AXP192, VBUS_REMOVAL, 0, 2),
+ INIT_REGMAP_IRQ(AXP192, VBUS_V_LOW, 0, 1),
+ INIT_REGMAP_IRQ(AXP192, BATT_PLUGIN, 1, 7),
+ INIT_REGMAP_IRQ(AXP192, BATT_REMOVAL, 1, 6),
+ INIT_REGMAP_IRQ(AXP192, BATT_ENT_ACT_MODE, 1, 5),
+ INIT_REGMAP_IRQ(AXP192, BATT_EXIT_ACT_MODE, 1, 4),
+ INIT_REGMAP_IRQ(AXP192, CHARG, 1, 3),
+ INIT_REGMAP_IRQ(AXP192, CHARG_DONE, 1, 2),
+ INIT_REGMAP_IRQ(AXP192, BATT_TEMP_HIGH, 1, 1),
+ INIT_REGMAP_IRQ(AXP192, BATT_TEMP_LOW, 1, 0),
+ INIT_REGMAP_IRQ(AXP192, DIE_TEMP_HIGH, 2, 7),
+ INIT_REGMAP_IRQ(AXP192, CHARG_I_LOW, 2, 6),
+ INIT_REGMAP_IRQ(AXP192, DCDC1_V_LONG, 2, 5),
+ INIT_REGMAP_IRQ(AXP192, DCDC2_V_LONG, 2, 4),
+ INIT_REGMAP_IRQ(AXP192, DCDC3_V_LONG, 2, 3),
+ INIT_REGMAP_IRQ(AXP192, PEK_SHORT, 2, 1),
+ INIT_REGMAP_IRQ(AXP192, PEK_LONG, 2, 0),
+ INIT_REGMAP_IRQ(AXP192, N_OE_PWR_ON, 3, 7),
+ INIT_REGMAP_IRQ(AXP192, N_OE_PWR_OFF, 3, 6),
+ INIT_REGMAP_IRQ(AXP192, VBUS_VALID, 3, 5),
+ INIT_REGMAP_IRQ(AXP192, VBUS_NOT_VALID, 3, 4),
+ INIT_REGMAP_IRQ(AXP192, VBUS_SESS_VALID, 3, 3),
+ INIT_REGMAP_IRQ(AXP192, VBUS_SESS_END, 3, 2),
+ INIT_REGMAP_IRQ(AXP192, LOW_PWR_LVL, 3, 0),
+ INIT_REGMAP_IRQ(AXP192, TIMER, 4, 7),
+ INIT_REGMAP_IRQ(AXP192, GPIO2_INPUT, 4, 2),
+ INIT_REGMAP_IRQ(AXP192, GPIO1_INPUT, 4, 1),
+ INIT_REGMAP_IRQ(AXP192, GPIO0_INPUT, 4, 0),
+};
+
static const struct regmap_irq axp20x_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V, 0, 7),
INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN, 0, 6),
@@ -615,6 +703,32 @@ static const struct regmap_irq_chip axp152_regmap_irq_chip = {
.num_regs = 3,
};
+static unsigned int axp192_get_irq_reg(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ /* linear mapping for IRQ1 to IRQ4 */
+ if (index < 4)
+ return base + index;
+
+ /* handle IRQ5 separately */
+ if (base == AXP192_IRQ1_EN)
+ return AXP192_IRQ5_EN;
+
+ return AXP192_IRQ5_STATE;
+}
+
+static const struct regmap_irq_chip axp192_regmap_irq_chip = {
+ .name = "axp192_irq_chip",
+ .status_base = AXP192_IRQ1_STATE,
+ .ack_base = AXP192_IRQ1_STATE,
+ .unmask_base = AXP192_IRQ1_EN,
+ .init_ack_masked = true,
+ .irqs = axp192_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(axp192_regmap_irqs),
+ .num_regs = 5,
+ .get_irq_reg = axp192_get_irq_reg,
+};
+
static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
.name = "axp20x_irq_chip",
.status_base = AXP20X_IRQ1_STATE,
@@ -705,6 +819,27 @@ static const struct regmap_irq_chip axp15060_regmap_irq_chip = {
.num_regs = 2,
};
+static const struct mfd_cell axp192_cells[] = {
+ {
+ .name = "axp192-adc",
+ .of_compatible = "x-powers,axp192-adc",
+ }, {
+ .name = "axp20x-battery-power-supply",
+ .of_compatible = "x-powers,axp192-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp202-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp192_ac_power_supply_resources),
+ .resources = axp192_ac_power_supply_resources,
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .of_compatible = "x-powers,axp192-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp192_usb_power_supply_resources),
+ .resources = axp192_usb_power_supply_resources,
+ },
+ { .name = "axp20x-regulator" },
+};
+
static const struct mfd_cell axp20x_cells[] = {
{
.name = "axp20x-gpio",
@@ -1022,6 +1157,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_cfg = &axp152_regmap_config;
axp20x->regmap_irq_chip = &axp152_regmap_irq_chip;
break;
+ case AXP192_ID:
+ axp20x->nr_cells = ARRAY_SIZE(axp192_cells);
+ axp20x->cells = axp192_cells;
+ axp20x->regmap_cfg = &axp192_regmap_config;
+ axp20x->regmap_irq_chip = &axp192_regmap_irq_chip;
+ break;
case AXP202_ID:
case AXP209_ID:
axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 251d515478d5..9f39b46b87f4 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -108,7 +108,7 @@ static struct i2c_driver bcm590xx_i2c_driver = {
.name = "bcm590xx",
.of_match_table = bcm590xx_of_match,
},
- .probe_new = bcm590xx_i2c_probe,
+ .probe = bcm590xx_i2c_probe,
.id_table = bcm590xx_i2c_id,
};
module_i2c_driver(bcm590xx_i2c_driver);
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 60dc858c8117..819d09e4d100 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -278,7 +278,7 @@ static struct i2c_driver bd9571mwv_driver = {
.name = "bd9571mwv",
.of_match_table = bd9571mwv_of_match_table,
},
- .probe_new = bd9571mwv_probe,
+ .probe = bd9571mwv_probe,
.id_table = bd9571mwv_id_table,
};
module_i2c_driver(bd9571mwv_driver);
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 6570b33a5a77..e86b39de3303 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -543,7 +543,7 @@ static struct i2c_driver da903x_driver = {
.driver = {
.name = "da903x",
},
- .probe_new = da903x_probe,
+ .probe = da903x_probe,
.remove = da903x_remove,
.id_table = da903x_id_table,
};
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 03db7a2ccf7a..541e2d47677e 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -176,7 +176,7 @@ static void da9052_i2c_remove(struct i2c_client *client)
}
static struct i2c_driver da9052_i2c_driver = {
- .probe_new = da9052_i2c_probe,
+ .probe = da9052_i2c_probe,
.remove = da9052_i2c_remove,
.id_table = da9052_i2c_id,
.driver = {
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 537fd5de3e6d..bbaf4f07f274 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -66,7 +66,7 @@ static const struct of_device_id da9055_of_match[] = {
};
static struct i2c_driver da9055_i2c_driver = {
- .probe_new = da9055_i2c_probe,
+ .probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index d073d5f106ec..48f58b6f5629 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -726,7 +726,7 @@ static struct i2c_driver da9062_i2c_driver = {
.name = "da9062",
.of_match_table = da9062_dt_ids,
},
- .probe_new = da9062_i2c_probe,
+ .probe = da9062_i2c_probe,
.remove = da9062_i2c_remove,
.id_table = da9062_i2c_id,
};
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 03f8f95a1d62..d715cf9a9e68 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -469,7 +469,7 @@ static struct i2c_driver da9063_i2c_driver = {
.name = "da9063",
.of_match_table = da9063_dt_ids,
},
- .probe_new = da9063_i2c_probe,
+ .probe = da9063_i2c_probe,
.id_table = da9063_i2c_id,
};
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index d2c954103b2f..94d621e20635 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -510,7 +510,7 @@ static struct i2c_driver da9150_driver = {
.name = "da9150",
.of_match_table = da9150_of_match,
},
- .probe_new = da9150_probe,
+ .probe = da9150_probe,
.remove = da9150_remove,
.shutdown = da9150_shutdown,
.id_table = da9150_i2c_id,
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index c3149729cec2..c7510434380a 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
index 3eff98e26bea..fa0ad2f14a39 100644
--- a/drivers/mfd/ene-kb3930.c
+++ b/drivers/mfd/ene-kb3930.c
@@ -196,7 +196,7 @@ static const struct of_device_id kb3930_dt_ids[] = {
MODULE_DEVICE_TABLE(of, kb3930_dt_ids);
static struct i2c_driver kb3930_driver = {
- .probe_new = kb3930_probe,
+ .probe = kb3930_probe,
.remove = kb3930_remove,
.driver = {
.name = "ene-kb3930",
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index c954ed265de8..b02bfdc871e9 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -264,7 +264,7 @@ static struct i2c_driver gsc_driver = {
.name = "gateworks-gsc",
.of_match_table = gsc_of_match,
},
- .probe_new = gsc_probe,
+ .probe = gsc_probe,
.remove = gsc_remove,
};
module_i2c_driver(gsc_driver);
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index a143c8dca2d9..212818aef93e 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -183,6 +183,9 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!info->mem)
+ return -ENODEV;
+
info->irq = platform_get_irq(pdev, 0);
ret = intel_lpss_probe(&pdev->dev, info);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index cfbee2cfba6b..9591b354072a 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -460,6 +460,7 @@ void intel_lpss_remove(struct device *dev)
}
EXPORT_SYMBOL_GPL(intel_lpss_remove);
+#ifdef CONFIG_PM
static int resume_lpss_device(struct device *dev, void *data)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
@@ -514,6 +515,7 @@ int intel_lpss_resume(struct device *dev)
return 0;
}
EXPORT_SYMBOL_GPL(intel_lpss_resume);
+#endif
static int __init intel_lpss_init(void)
{
diff --git a/drivers/mfd/intel-m10-bmc-core.c b/drivers/mfd/intel-m10-bmc-core.c
index dac9cf7bcb4a..8ad5b3821584 100644
--- a/drivers/mfd/intel-m10-bmc-core.c
+++ b/drivers/mfd/intel-m10-bmc-core.c
@@ -12,6 +12,91 @@
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state)
+{
+ /* bmcfw_state is only needed if handshake_sys_reg_nranges > 0 */
+ if (!m10bmc->info->handshake_sys_reg_nranges)
+ return;
+
+ down_write(&m10bmc->bmcfw_lock);
+ m10bmc->bmcfw_state = new_state;
+ up_write(&m10bmc->bmcfw_lock);
+}
+EXPORT_SYMBOL_NS_GPL(m10bmc_fw_state_set, INTEL_M10_BMC_CORE);
+
+/*
+ * For some Intel FPGA devices, the BMC firmware is not available to service
+ * handshake registers during a secure update.
+ */
+static bool m10bmc_reg_always_available(struct intel_m10bmc *m10bmc, unsigned int offset)
+{
+ if (!m10bmc->info->handshake_sys_reg_nranges)
+ return true;
+
+ return !regmap_reg_in_ranges(offset, m10bmc->info->handshake_sys_reg_ranges,
+ m10bmc->info->handshake_sys_reg_nranges);
+}
+
+/*
+ * m10bmc_handshake_reg_unavailable - Checks if reg access collides with secure update state
+ * @m10bmc: M10 BMC structure
+ *
+ * For some Intel FPGA devices, the BMC firmware is not available to service
+ * handshake registers during a secure update erase and write phases.
+ *
+ * Context: @m10bmc->bmcfw_lock must be held.
+ */
+static bool m10bmc_handshake_reg_unavailable(struct intel_m10bmc *m10bmc)
+{
+ return m10bmc->bmcfw_state == M10BMC_FW_STATE_SEC_UPDATE_PREPARE ||
+ m10bmc->bmcfw_state == M10BMC_FW_STATE_SEC_UPDATE_WRITE;
+}
+
+/*
+ * This function helps to simplify the accessing of the system registers.
+ *
+ * The base of the system registers is configured through the struct
+ * csr_map.
+ */
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val)
+{
+ const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
+ int ret;
+
+ if (m10bmc_reg_always_available(m10bmc, offset))
+ return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
+
+ down_read(&m10bmc->bmcfw_lock);
+ if (m10bmc_handshake_reg_unavailable(m10bmc))
+ ret = -EBUSY; /* Reg not available during secure update */
+ else
+ ret = m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
+ up_read(&m10bmc->bmcfw_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(m10bmc_sys_read, INTEL_M10_BMC_CORE);
+
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val)
+{
+ const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
+ int ret;
+
+ if (m10bmc_reg_always_available(m10bmc, offset))
+ return regmap_update_bits(m10bmc->regmap, csr_map->base + offset, msk, val);
+
+ down_read(&m10bmc->bmcfw_lock);
+ if (m10bmc_handshake_reg_unavailable(m10bmc))
+ ret = -EBUSY; /* Reg not available during secure update */
+ else
+ ret = regmap_update_bits(m10bmc->regmap, csr_map->base + offset, msk, val);
+ up_read(&m10bmc->bmcfw_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(m10bmc_sys_update_bits, INTEL_M10_BMC_CORE);
+
static ssize_t bmc_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -98,7 +183,7 @@ const struct attribute_group *m10bmc_dev_groups[] = {
&m10bmc_group,
NULL,
};
-EXPORT_SYMBOL_GPL(m10bmc_dev_groups);
+EXPORT_SYMBOL_NS_GPL(m10bmc_dev_groups, INTEL_M10_BMC_CORE);
int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info)
{
@@ -106,6 +191,7 @@ int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platf
m10bmc->info = info;
dev_set_drvdata(m10bmc->dev, m10bmc);
+ init_rwsem(&m10bmc->bmcfw_lock);
ret = devm_mfd_add_devices(m10bmc->dev, PLATFORM_DEVID_AUTO,
info->cells, info->n_cells,
@@ -115,7 +201,7 @@ int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platf
return ret;
}
-EXPORT_SYMBOL_GPL(m10bmc_dev_init);
+EXPORT_SYMBOL_NS_GPL(m10bmc_dev_init, INTEL_M10_BMC_CORE);
MODULE_DESCRIPTION("Intel MAX 10 BMC core driver");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c
index 8821f1876dd6..0392ef8b57d8 100644
--- a/drivers/mfd/intel-m10-bmc-pmci.c
+++ b/drivers/mfd/intel-m10-bmc-pmci.c
@@ -453,3 +453,4 @@ module_dfl_driver(m10bmc_pmci_driver);
MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c
index 957200e17fed..cbeb7de9e041 100644
--- a/drivers/mfd/intel-m10-bmc-spi.c
+++ b/drivers/mfd/intel-m10-bmc-spi.c
@@ -116,12 +116,20 @@ static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-sec-update" },
};
+static const struct regmap_range m10bmc_d5005_fw_handshake_regs[] = {
+ regmap_reg_range(M10BMC_N3000_TELEM_START, M10BMC_D5005_TELEM_END),
+};
+
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
{ .name = "n3000bmc-sec-update" },
};
+static const struct regmap_range m10bmc_n3000_fw_handshake_regs[] = {
+ regmap_reg_range(M10BMC_N3000_TELEM_START, M10BMC_N3000_TELEM_END),
+};
+
static struct mfd_cell m10bmc_n5010_subdevs[] = {
{ .name = "n5010bmc-hwmon" },
};
@@ -129,18 +137,24 @@ static struct mfd_cell m10bmc_n5010_subdevs[] = {
static const struct intel_m10bmc_platform_info m10bmc_spi_n3000 = {
.cells = m10bmc_pacn3000_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_pacn3000_subdevs),
+ .handshake_sys_reg_ranges = m10bmc_n3000_fw_handshake_regs,
+ .handshake_sys_reg_nranges = ARRAY_SIZE(m10bmc_n3000_fw_handshake_regs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_d5005 = {
.cells = m10bmc_d5005_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_d5005_subdevs),
+ .handshake_sys_reg_ranges = m10bmc_d5005_fw_handshake_regs,
+ .handshake_sys_reg_nranges = ARRAY_SIZE(m10bmc_d5005_fw_handshake_regs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_n5010 = {
.cells = m10bmc_n5010_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_n5010_subdevs),
+ .handshake_sys_reg_ranges = m10bmc_n3000_fw_handshake_regs,
+ .handshake_sys_reg_nranges = ARRAY_SIZE(m10bmc_n3000_fw_handshake_regs),
.csr_map = &m10bmc_n3000_csr_map,
};
@@ -166,3 +180,4 @@ MODULE_DESCRIPTION("Intel MAX 10 BMC SPI bus interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:intel-m10-bmc");
+MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 282b8fd08009..992855bfda3e 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -172,7 +172,7 @@ static struct i2c_driver chtdc_ti_i2c_driver = {
.pm = pm_sleep_ptr(&chtdc_ti_pm_ops),
.acpi_match_table = chtdc_ti_acpi_ids,
},
- .probe_new = chtdc_ti_probe,
+ .probe = chtdc_ti_probe,
.shutdown = chtdc_ti_shutdown,
};
module_i2c_driver(chtdc_ti_i2c_driver);
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 871776d511e3..7fce3ef7ab45 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -272,7 +272,7 @@ static struct i2c_driver cht_wc_driver = {
.pm = pm_sleep_ptr(&cht_wc_pm_ops),
.acpi_match_table = cht_wc_acpi_ids,
},
- .probe_new = cht_wc_probe,
+ .probe = cht_wc_probe,
.shutdown = cht_wc_shutdown,
.id_table = cht_wc_i2c_id,
};
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index b745ace46e5b..581f81cbaa24 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -263,7 +263,7 @@ static struct i2c_driver crystal_cove_i2c_driver = {
.pm = pm_sleep_ptr(&crystal_cove_pm_ops),
.acpi_match_table = crystal_cove_acpi_match,
},
- .probe_new = crystal_cove_i2c_probe,
+ .probe = crystal_cove_i2c_probe,
.remove = crystal_cove_i2c_remove,
.shutdown = crystal_cove_shutdown,
};
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index 1895fce25b06..dfe9cb79e6a1 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -1069,7 +1069,7 @@ static struct i2c_driver iqs62x_i2c_driver = {
.of_match_table = iqs62x_of_match,
.pm = &iqs62x_pm,
},
- .probe_new = iqs62x_probe,
+ .probe = iqs62x_probe,
.remove = iqs62x_remove,
};
module_i2c_driver(iqs62x_i2c_driver);
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
index 1c807c0e6d25..61396d824f16 100644
--- a/drivers/mfd/khadas-mcu.c
+++ b/drivers/mfd/khadas-mcu.c
@@ -134,7 +134,7 @@ static struct i2c_driver khadas_mcu_driver = {
.name = "khadas-mcu-core",
.of_match_table = of_match_ptr(khadas_mcu_of_match),
},
- .probe_new = khadas_mcu_probe,
+ .probe = khadas_mcu_probe,
};
module_i2c_driver(khadas_mcu_driver);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 946f94f3a3c3..c211183cecb2 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -485,8 +485,6 @@ static int lm3533_device_init(struct lm3533 *lm3533)
lm3533->gpio_hwen = pdata->gpio_hwen;
- dev_set_drvdata(lm3533->dev, lm3533);
-
if (gpio_is_valid(lm3533->gpio_hwen)) {
ret = devm_gpio_request_one(lm3533->dev, lm3533->gpio_hwen,
GPIOF_OUT_INIT_LOW, "lm3533-hwen");
@@ -626,7 +624,7 @@ static struct i2c_driver lm3533_i2c_driver = {
.name = "lm3533",
},
.id_table = lm3533_i2c_ids,
- .probe_new = lm3533_i2c_probe,
+ .probe = lm3533_i2c_probe,
.remove = lm3533_i2c_remove,
};
diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c
index 3a65d9938902..3c8843117080 100644
--- a/drivers/mfd/lochnagar-i2c.c
+++ b/drivers/mfd/lochnagar-i2c.c
@@ -382,7 +382,7 @@ static struct i2c_driver lochnagar_i2c_driver = {
.of_match_table = of_match_ptr(lochnagar_of_match),
.suppress_bind_attrs = true,
},
- .probe_new = lochnagar_i2c_probe,
+ .probe = lochnagar_i2c_probe,
};
static int __init lochnagar_i2c_init(void)
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index f9f39b53d030..7f749a23dca8 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -140,7 +140,7 @@ MODULE_DEVICE_TABLE(of, lp3943_of_match);
#endif
static struct i2c_driver lp3943_driver = {
- .probe_new = lp3943_probe,
+ .probe = lp3943_probe,
.driver = {
.name = "lp3943",
.of_match_table = of_match_ptr(lp3943_of_match),
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index c81c5c9ad748..6639f0fad4ea 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -78,7 +78,7 @@ static struct i2c_driver lp873x_driver = {
.name = "lp873x",
.of_match_table = of_lp873x_match_table,
},
- .probe_new = lp873x_probe,
+ .probe = lp873x_probe,
.id_table = lp873x_id_table,
};
module_i2c_driver(lp873x_driver);
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 568f0f01ea0d..88ce4d7c50a7 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -119,7 +119,7 @@ static struct i2c_driver lp87565_driver = {
.name = "lp87565",
.of_match_table = of_lp87565_match_table,
},
- .probe_new = lp87565_probe,
+ .probe = lp87565_probe,
.shutdown = lp87565_shutdown,
.id_table = lp87565_id_table,
};
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 18583addaae2..f371eeb042e0 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -225,7 +225,7 @@ static struct i2c_driver lp8788_driver = {
.driver = {
.name = "lp8788",
},
- .probe_new = lp8788_probe,
+ .probe = lp8788_probe,
.remove = lp8788_remove,
.id_table = lp8788_ids,
};
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 47e65d88feb0..0968aa9733ac 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -139,7 +139,7 @@ static struct i2c_driver madera_i2c_driver = {
.pm = &madera_pm_ops,
.of_match_table = of_match_ptr(madera_of_match),
},
- .probe_new = madera_i2c_probe,
+ .probe = madera_i2c_probe,
.remove = madera_i2c_remove,
.id_table = madera_i2c_id,
};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 0e3731e9e9b5..25ed8846b7fb 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -518,7 +518,7 @@ static struct i2c_driver max14577_i2c_driver = {
.pm = pm_sleep_ptr(&max14577_pm),
.of_match_table = max14577_dt_match,
},
- .probe_new = max14577_i2c_probe,
+ .probe = max14577_i2c_probe,
.remove = max14577_i2c_remove,
.id_table = max14577_i2c_id,
};
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
new file mode 100644
index 000000000000..e147e949c2b3
--- /dev/null
+++ b/drivers/mfd/max77541.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 Analog Devices, Inc.
+ * Driver for the MAX77540 and MAX77541
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77541.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+static const struct regmap_config max77541_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_irq max77541_src_irqs[] = {
+ { .mask = MAX77541_BIT_INT_SRC_TOPSYS },
+ { .mask = MAX77541_BIT_INT_SRC_BUCK },
+};
+
+static const struct regmap_irq_chip max77541_src_irq_chip = {
+ .name = "max77541-src",
+ .status_base = MAX77541_REG_INT_SRC,
+ .mask_base = MAX77541_REG_INT_SRC_M,
+ .num_regs = 1,
+ .irqs = max77541_src_irqs,
+ .num_irqs = ARRAY_SIZE(max77541_src_irqs),
+};
+
+static const struct regmap_irq max77541_topsys_irqs[] = {
+ { .mask = MAX77541_BIT_TOPSYS_INT_TJ_120C },
+ { .mask = MAX77541_BIT_TOPSYS_INT_TJ_140C },
+ { .mask = MAX77541_BIT_TOPSYS_INT_TSHDN },
+ { .mask = MAX77541_BIT_TOPSYS_INT_UVLO },
+ { .mask = MAX77541_BIT_TOPSYS_INT_ALT_SWO },
+ { .mask = MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET },
+};
+
+static const struct regmap_irq_chip max77541_topsys_irq_chip = {
+ .name = "max77541-topsys",
+ .status_base = MAX77541_REG_TOPSYS_INT,
+ .mask_base = MAX77541_REG_TOPSYS_INT_M,
+ .num_regs = 1,
+ .irqs = max77541_topsys_irqs,
+ .num_irqs = ARRAY_SIZE(max77541_topsys_irqs),
+};
+
+static const struct regmap_irq max77541_buck_irqs[] = {
+ { .mask = MAX77541_BIT_BUCK_INT_M1_POK_FLT },
+ { .mask = MAX77541_BIT_BUCK_INT_M2_POK_FLT },
+ { .mask = MAX77541_BIT_BUCK_INT_M1_SCFLT },
+ { .mask = MAX77541_BIT_BUCK_INT_M2_SCFLT },
+};
+
+static const struct regmap_irq_chip max77541_buck_irq_chip = {
+ .name = "max77541-buck",
+ .status_base = MAX77541_REG_BUCK_INT,
+ .mask_base = MAX77541_REG_BUCK_INT_M,
+ .num_regs = 1,
+ .irqs = max77541_buck_irqs,
+ .num_irqs = ARRAY_SIZE(max77541_buck_irqs),
+};
+
+static const struct regmap_irq max77541_adc_irqs[] = {
+ { .mask = MAX77541_BIT_ADC_INT_CH1_I },
+ { .mask = MAX77541_BIT_ADC_INT_CH2_I },
+ { .mask = MAX77541_BIT_ADC_INT_CH3_I },
+ { .mask = MAX77541_BIT_ADC_INT_CH6_I },
+};
+
+static const struct regmap_irq_chip max77541_adc_irq_chip = {
+ .name = "max77541-adc",
+ .status_base = MAX77541_REG_ADC_INT,
+ .mask_base = MAX77541_REG_ADC_INT_M,
+ .num_regs = 1,
+ .irqs = max77541_adc_irqs,
+ .num_irqs = ARRAY_SIZE(max77541_adc_irqs),
+};
+
+static const struct mfd_cell max77540_devs[] = {
+ MFD_CELL_OF("max77540-regulator", NULL, NULL, 0, 0, NULL),
+};
+
+static const struct mfd_cell max77541_devs[] = {
+ MFD_CELL_OF("max77541-regulator", NULL, NULL, 0, 0, NULL),
+ MFD_CELL_OF("max77541-adc", NULL, NULL, 0, 0, NULL),
+};
+
+static int max77541_pmic_irq_init(struct device *dev)
+{
+ struct max77541 *max77541 = dev_get_drvdata(dev);
+ int irq = max77541->i2c->irq;
+ int ret;
+
+ ret = devm_regmap_add_irq_chip(dev, max77541->regmap, irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77541_src_irq_chip,
+ &max77541->irq_data);
+ if (ret)
+ return ret;
+
+ ret = devm_regmap_add_irq_chip(dev, max77541->regmap, irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77541_topsys_irq_chip,
+ &max77541->irq_topsys);
+ if (ret)
+ return ret;
+
+ ret = devm_regmap_add_irq_chip(dev, max77541->regmap, irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77541_buck_irq_chip,
+ &max77541->irq_buck);
+ if (ret)
+ return ret;
+
+ if (max77541->id == MAX77541) {
+ ret = devm_regmap_add_irq_chip(dev, max77541->regmap, irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77541_adc_irq_chip,
+ &max77541->irq_adc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77541_pmic_setup(struct device *dev)
+{
+ struct max77541 *max77541 = dev_get_drvdata(dev);
+ const struct mfd_cell *cells;
+ int n_devs;
+ int ret;
+
+ switch (max77541->id) {
+ case MAX77540:
+ cells = max77540_devs;
+ n_devs = ARRAY_SIZE(max77540_devs);
+ break;
+ case MAX77541:
+ cells = max77541_devs;
+ n_devs = ARRAY_SIZE(max77541_devs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = max77541_pmic_irq_init(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize IRQ\n");
+
+ ret = device_init_wakeup(dev, true);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to init wakeup\n");
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ cells, n_devs, NULL, 0, NULL);
+}
+
+static int max77541_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ struct device *dev = &client->dev;
+ struct max77541 *max77541;
+
+ max77541 = devm_kzalloc(dev, sizeof(*max77541), GFP_KERNEL);
+ if (!max77541)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, max77541);
+ max77541->i2c = client;
+
+ max77541->id = (enum max7754x_ids)device_get_match_data(dev);
+ if (!max77541->id)
+ max77541->id = (enum max7754x_ids)id->driver_data;
+
+ if (!max77541->id)
+ return -EINVAL;
+
+ max77541->regmap = devm_regmap_init_i2c(client,
+ &max77541_regmap_config);
+ if (IS_ERR(max77541->regmap))
+ return dev_err_probe(dev, PTR_ERR(max77541->regmap),
+ "Failed to allocate register map\n");
+
+ return max77541_pmic_setup(dev);
+}
+
+static const struct of_device_id max77541_of_id[] = {
+ {
+ .compatible = "adi,max77540",
+ .data = (void *)MAX77540,
+ },
+ {
+ .compatible = "adi,max77541",
+ .data = (void *)MAX77541,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77541_of_id);
+
+static const struct i2c_device_id max77541_id[] = {
+ { "max77540", MAX77540 },
+ { "max77541", MAX77541 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77541_id);
+
+static struct i2c_driver max77541_driver = {
+ .driver = {
+ .name = "max77541",
+ .of_match_table = max77541_of_id,
+ },
+ .probe = max77541_probe,
+ .id_table = max77541_id,
+};
+module_i2c_driver(max77541_driver);
+
+MODULE_DESCRIPTION("MAX7740/MAX7741 Driver");
+MODULE_AUTHOR("Okan Sahin <okan.sahin@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index cbd2297126f0..5811ed8f4840 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -698,7 +698,7 @@ static struct i2c_driver max77620_driver = {
.name = "max77620",
.pm = pm_sleep_ptr(&max77620_pm_ops),
},
- .probe_new = max77620_probe,
+ .probe = max77620_probe,
.id_table = max77620_id,
};
builtin_i2c_driver(max77620_driver);
diff --git a/drivers/mfd/max77650.c b/drivers/mfd/max77650.c
index 3c07fcdd9d07..75b5dc44e38c 100644
--- a/drivers/mfd/max77650.c
+++ b/drivers/mfd/max77650.c
@@ -222,7 +222,7 @@ static struct i2c_driver max77650_i2c_driver = {
.name = "max77650",
.of_match_table = max77650_of_match,
},
- .probe_new = max77650_i2c_probe,
+ .probe = max77650_i2c_probe,
};
module_i2c_driver(max77650_i2c_driver);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f8e863f3fc95..01833086ca7d 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -269,7 +269,7 @@ static struct i2c_driver max77686_i2c_driver = {
.pm = pm_sleep_ptr(&max77686_pm),
.of_match_table = max77686_pmic_dt_match,
},
- .probe_new = max77686_i2c_probe,
+ .probe = max77686_i2c_probe,
};
module_i2c_driver(max77686_i2c_driver);
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 3995e8769f49..1c485a4c3dcf 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -356,7 +356,7 @@ static struct i2c_driver max77693_i2c_driver = {
.pm = &max77693_pm,
.of_match_table = of_match_ptr(max77693_dt_match),
},
- .probe_new = max77693_i2c_probe,
+ .probe = max77693_i2c_probe,
.remove = max77693_i2c_remove,
.id_table = max77693_i2c_id,
};
diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c
index 143a432ea343..c618605a3fb2 100644
--- a/drivers/mfd/max77714.c
+++ b/drivers/mfd/max77714.c
@@ -143,7 +143,7 @@ static struct i2c_driver max77714_driver = {
.name = "max77714",
.of_match_table = max77714_dt_match,
},
- .probe_new = max77714_probe,
+ .probe = max77714_probe,
};
module_i2c_driver(max77714_driver);
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index 8ff0723808c8..b3689c13a14d 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -207,7 +207,7 @@ static struct i2c_driver max77843_i2c_driver = {
.of_match_table = max77843_dt_match,
.suppress_bind_attrs = true,
},
- .probe_new = max77843_probe,
+ .probe = max77843_probe,
.id_table = max77843_id,
};
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index a69b865c6eac..78b5ee688dec 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -201,8 +201,6 @@ static int max8907_i2c_probe(struct i2c_client *i2c)
}
max8907->dev = &i2c->dev;
- dev_set_drvdata(max8907->dev, max8907);
-
max8907->i2c_gen = i2c;
i2c_set_clientdata(i2c, max8907);
max8907->regmap_gen = devm_regmap_init_i2c(i2c,
@@ -313,7 +311,7 @@ static struct i2c_driver max8907_i2c_driver = {
.name = "max8907",
.of_match_table = of_match_ptr(max8907_of_match),
},
- .probe_new = max8907_i2c_probe,
+ .probe = max8907_i2c_probe,
.remove = max8907_i2c_remove,
.id_table = max8907_i2c_id,
};
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 4057fd15c29e..7608000488f9 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -172,7 +172,6 @@ static int max8925_probe(struct i2c_client *client)
chip->i2c = client;
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
- dev_set_drvdata(chip->dev, chip);
mutex_init(&chip->io_lock);
chip->rtc = i2c_new_dummy_device(chip->i2c->adapter, RTC_I2C_ADDR);
@@ -240,7 +239,7 @@ static struct i2c_driver max8925_driver = {
.pm = pm_sleep_ptr(&max8925_pm_ops),
.of_match_table = max8925_dt_ids,
},
- .probe_new = max8925_probe,
+ .probe = max8925_probe,
.remove = max8925_remove,
.id_table = max8925_id_table,
};
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 79d551b86150..94c09a5eab32 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -478,7 +478,7 @@ static struct i2c_driver max8997_i2c_driver = {
.suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
- .probe_new = max8997_i2c_probe,
+ .probe = max8997_i2c_probe,
.id_table = max8997_i2c_id,
};
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 122f7b931f5a..33a3ec5464fb 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -348,7 +348,7 @@ static struct i2c_driver max8998_i2c_driver = {
.suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8998_dt_match),
},
- .probe_new = max8998_i2c_probe,
+ .probe = max8998_i2c_probe,
.id_table = max8998_i2c_id,
};
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 633b973a5ba7..de59b498c925 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -95,7 +95,7 @@ static struct i2c_driver mc13xxx_i2c_driver = {
.name = "mc13xxx",
.of_match_table = mc13xxx_dt_ids,
},
- .probe_new = mc13xxx_i2c_probe,
+ .probe = mc13xxx_i2c_probe,
.remove = mc13xxx_i2c_remove,
};
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index c2866a11c1d2..662604ea97f2 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1240,7 +1240,7 @@ static struct i2c_driver menelaus_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
},
- .probe_new = menelaus_probe,
+ .probe = menelaus_probe,
.remove = menelaus_remove,
.id_table = menelaus_id,
};
diff --git a/drivers/mfd/menf21bmc.c b/drivers/mfd/menf21bmc.c
index 9092fac46e35..1d36095155e0 100644
--- a/drivers/mfd/menf21bmc.c
+++ b/drivers/mfd/menf21bmc.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(i2c, menf21bmc_id_table);
static struct i2c_driver menf21bmc_driver = {
.driver.name = "menf21bmc",
.id_table = menf21bmc_id_table,
- .probe_new = menf21bmc_probe,
+ .probe = menf21bmc_probe,
};
module_i2c_driver(menf21bmc_driver);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 695d50b3bac6..0ed7c0d7784e 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -102,7 +102,6 @@ static int mfd_match_of_node_to_dev(struct platform_device *pdev,
{
#if IS_ENABLED(CONFIG_OF)
struct mfd_of_node_entry *of_entry;
- const __be32 *reg;
u64 of_node_addr;
/* Skip if OF node has previously been allocated to a device */
@@ -115,13 +114,10 @@ static int mfd_match_of_node_to_dev(struct platform_device *pdev,
goto allocate_of_node;
/* We only care about each node's first defined address */
- reg = of_get_address(np, 0, NULL, NULL);
- if (!reg)
+ if (of_property_read_reg(np, 0, &of_node_addr, NULL))
/* OF node does not contatin a 'reg' property to match to */
return -EAGAIN;
- of_node_addr = of_read_number(reg, of_n_addr_cells(np));
-
if (cell->of_reg != of_node_addr)
/* No match */
return -EAGAIN;
diff --git a/drivers/mfd/mp2629.c b/drivers/mfd/mp2629.c
index 16840ec5fd1c..717b704c3c6b 100644
--- a/drivers/mfd/mp2629.c
+++ b/drivers/mfd/mp2629.c
@@ -70,7 +70,7 @@ static struct i2c_driver mp2629_driver = {
.name = "mp2629",
.of_match_table = mp2629_of_match,
},
- .probe_new = mp2629_probe,
+ .probe = mp2629_probe,
};
module_i2c_driver(mp2629_driver);
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index d3b32eb79837..2685efa5c9e2 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -623,7 +623,7 @@ static struct i2c_driver mt6360_driver = {
.pm = &mt6360_pm_ops,
.of_match_table = of_match_ptr(mt6360_of_id),
},
- .probe_new = mt6360_probe,
+ .probe = mt6360_probe,
};
module_i2c_driver(mt6360_driver);
diff --git a/drivers/mfd/mt6370.c b/drivers/mfd/mt6370.c
index cf19cce2fdc0..c126ccb25d66 100644
--- a/drivers/mfd/mt6370.c
+++ b/drivers/mfd/mt6370.c
@@ -303,7 +303,7 @@ static struct i2c_driver mt6370_driver = {
.name = "mt6370",
.of_match_table = mt6370_match_table,
},
- .probe_new = mt6370_probe,
+ .probe = mt6370_probe,
};
module_i2c_driver(mt6370_driver);
diff --git a/drivers/mfd/ntxec.c b/drivers/mfd/ntxec.c
index b02785b10d48..4416cd37e539 100644
--- a/drivers/mfd/ntxec.c
+++ b/drivers/mfd/ntxec.c
@@ -260,7 +260,7 @@ static struct i2c_driver ntxec_driver = {
.name = "ntxec",
.of_match_table = of_ntxec_match_table,
},
- .probe_new = ntxec_probe,
+ .probe = ntxec_probe,
.remove = ntxec_remove,
};
module_i2c_driver(ntxec_driver);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index b8383c6cba3f..a36f12402987 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -725,7 +725,7 @@ static struct i2c_driver palmas_i2c_driver = {
.name = "palmas",
.of_match_table = of_palmas_match_tbl,
},
- .probe_new = palmas_i2c_probe,
+ .probe = palmas_i2c_probe,
.remove = palmas_i2c_remove,
.id_table = palmas_i2c_id,
};
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 0e4fc99e9f49..014a68711b18 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -282,7 +282,7 @@ static struct i2c_driver pcf50633_driver = {
.pm = pm_sleep_ptr(&pcf50633_pm),
},
.id_table = pcf50633_id_table,
- .probe_new = pcf50633_probe,
+ .probe = pcf50633_probe,
.remove = pcf50633_remove,
};
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index 837246aab4ac..94a8cca1d955 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -199,15 +199,15 @@ static const struct of_device_id pm8008_match[] = {
{ .compatible = "qcom,pm8008", },
{ },
};
+MODULE_DEVICE_TABLE(of, pm8008_match);
static struct i2c_driver pm8008_mfd_driver = {
.driver = {
.name = "pm8008",
.of_match_table = pm8008_match,
},
- .probe_new = pm8008_probe,
+ .probe = pm8008_probe,
};
module_i2c_driver(pm8008_mfd_driver);
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:qcom-pm8008");
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index 621ea61fa7c6..97f52b671520 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -8,9 +8,9 @@
* based on code
* Copyright (C) 2011 RICOH COMPANY,LTD
*/
+#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/i2c.h>
#include <linux/mfd/rc5t583.h>
enum int_type {
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index df83cc399315..5e81f011363f 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -288,7 +288,7 @@ static struct i2c_driver rc5t583_i2c_driver = {
.driver = {
.name = "rc5t583",
},
- .probe_new = rc5t583_i2c_probe,
+ .probe = rc5t583_i2c_probe,
.id_table = rc5t583_i2c_id,
};
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index d71483859e2e..b50cfa7f4b8f 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -318,7 +318,7 @@ static struct i2c_driver retu_driver = {
.name = "retu-mfd",
.of_match_table = retu_of_match,
},
- .probe_new = retu_probe,
+ .probe = retu_probe,
.remove = retu_remove,
.id_table = retu_id,
};
diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c
index 2822bfa8a04a..1a98feea97e2 100644
--- a/drivers/mfd/rk8xx-i2c.c
+++ b/drivers/mfd/rk8xx-i2c.c
@@ -173,7 +173,7 @@ static struct i2c_driver rk8xx_i2c_driver = {
.of_match_table = rk8xx_i2c_of_match,
.pm = &rk8xx_i2c_pm_ops,
},
- .probe_new = rk8xx_i2c_probe,
+ .probe = rk8xx_i2c_probe,
.shutdown = rk8xx_i2c_shutdown,
};
module_i2c_driver(rk8xx_i2c_driver);
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 2f59230749cd..333fef8729a5 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -280,7 +280,7 @@ static struct i2c_driver rn5t618_i2c_driver = {
.of_match_table = of_match_ptr(rn5t618_of_match),
.pm = &rn5t618_i2c_dev_pm_ops,
},
- .probe_new = rn5t618_i2c_probe,
+ .probe = rn5t618_i2c_probe,
.remove = rn5t618_i2c_remove,
};
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 7eb920633ee9..93d80a79b901 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -564,7 +564,7 @@ static struct i2c_driver bd71828_drv = {
.name = "rohm-bd71828",
.of_match_table = bd71828_of_match,
},
- .probe_new = &bd71828_i2c_probe,
+ .probe = bd71828_i2c_probe,
};
module_i2c_driver(bd71828_drv);
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 378eb1a692e4..0b58ecc78334 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -208,7 +208,7 @@ static struct i2c_driver bd718xx_i2c_driver = {
.name = "rohm-bd718x7",
.of_match_table = bd718xx_of_match,
},
- .probe_new = bd718xx_i2c_probe,
+ .probe = bd718xx_i2c_probe,
};
static int __init bd718xx_i2c_init(void)
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index 6491e385d980..645673322ec0 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -178,7 +178,7 @@ static struct i2c_driver bd957x_drv = {
.name = "rohm-bd957x",
.of_match_table = bd957x_of_match,
},
- .probe_new = &bd957x_i2c_probe,
+ .probe = bd957x_i2c_probe,
};
module_i2c_driver(bd957x_drv);
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 807c32101460..26972a5aff45 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -279,7 +279,7 @@ static struct i2c_driver rsmu_i2c_driver = {
.name = "rsmu-i2c",
.of_match_table = of_match_ptr(rsmu_i2c_of_match),
},
- .probe_new = rsmu_i2c_probe,
+ .probe = rsmu_i2c_probe,
.remove = rsmu_i2c_remove,
.id_table = rsmu_i2c_id,
};
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
index c6d34dc2b520..f8d6dc55b558 100644
--- a/drivers/mfd/rt4831.c
+++ b/drivers/mfd/rt4831.c
@@ -109,7 +109,7 @@ static struct i2c_driver rt4831_driver = {
.name = "rt4831",
.of_match_table = rt4831_of_match,
},
- .probe_new = rt4831_probe,
+ .probe = rt4831_probe,
.remove = rt4831_remove,
};
module_i2c_driver(rt4831_driver);
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index a5e520fe50a1..67b0a228db24 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -41,9 +41,6 @@ static const struct mfd_cell rt5033_devs[] = {
.name = "rt5033-charger",
.of_compatible = "richtek,rt5033-charger",
}, {
- .name = "rt5033-battery",
- .of_compatible = "richtek,rt5033-battery",
- }, {
.name = "rt5033-led",
.of_compatible = "richtek,rt5033-led",
},
@@ -58,7 +55,7 @@ static const struct regmap_config rt5033_regmap_config = {
static int rt5033_i2c_probe(struct i2c_client *i2c)
{
struct rt5033_dev *rt5033;
- unsigned int dev_id;
+ unsigned int dev_id, chip_rev;
int ret;
rt5033 = devm_kzalloc(&i2c->dev, sizeof(*rt5033), GFP_KERNEL);
@@ -81,7 +78,8 @@ static int rt5033_i2c_probe(struct i2c_client *i2c)
dev_err(&i2c->dev, "Device not found\n");
return -ENODEV;
}
- dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id);
+ chip_rev = dev_id & RT5033_CHIP_REV_MASK;
+ dev_info(&i2c->dev, "Device found (rev. %d)\n", chip_rev);
ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -122,7 +120,7 @@ static struct i2c_driver rt5033_driver = {
.name = "rt5033",
.of_match_table = rt5033_dt_match,
},
- .probe_new = rt5033_i2c_probe,
+ .probe = rt5033_i2c_probe,
.id_table = rt5033_i2c_id,
};
module_i2c_driver(rt5033_driver);
diff --git a/drivers/mfd/rt5120.c b/drivers/mfd/rt5120.c
index 829b7a0a0781..58d9a124d795 100644
--- a/drivers/mfd/rt5120.c
+++ b/drivers/mfd/rt5120.c
@@ -114,7 +114,7 @@ static struct i2c_driver rt5120_driver = {
.name = "rt5120",
.of_match_table = rt5120_device_match_table,
},
- .probe_new = rt5120_probe,
+ .probe = rt5120_probe,
};
module_i2c_driver(rt5120_driver);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index c2d0ed496959..d2f631901886 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -450,7 +450,7 @@ static struct i2c_driver sec_pmic_driver = {
.pm = pm_sleep_ptr(&sec_pmic_pm_ops),
.of_match_table = sec_dt_match,
},
- .probe_new = sec_pmic_probe,
+ .probe = sec_pmic_probe,
.shutdown = sec_pmic_shutdown,
};
module_i2c_driver(sec_pmic_driver);
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index 22131cf85e3f..899c0b5ea3aa 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -866,7 +866,7 @@ static struct i2c_driver si476x_core_driver = {
.driver = {
.name = "si476x-core",
},
- .probe_new = si476x_core_probe,
+ .probe = si476x_core_probe,
.remove = si476x_core_remove,
.id_table = si476x_id,
};
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index 20782b4dd172..6eda79533208 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -72,28 +72,28 @@ static const struct simple_mfd_data silergy_sy7636a = {
.mfd_cell_size = ARRAY_SIZE(sy7636a_cells),
};
-static const struct mfd_cell max597x_cells[] = {
- { .name = "max597x-regulator", },
- { .name = "max597x-iio", },
- { .name = "max597x-led", },
+static const struct mfd_cell max5970_cells[] = {
+ { .name = "max5970-regulator", },
+ { .name = "max5970-iio", },
+ { .name = "max5970-led", },
};
-static const struct simple_mfd_data maxim_max597x = {
- .mfd_cell = max597x_cells,
- .mfd_cell_size = ARRAY_SIZE(max597x_cells),
+static const struct simple_mfd_data maxim_max5970 = {
+ .mfd_cell = max5970_cells,
+ .mfd_cell_size = ARRAY_SIZE(max5970_cells),
};
static const struct of_device_id simple_mfd_i2c_of_match[] = {
{ .compatible = "kontron,sl28cpld" },
{ .compatible = "silergy,sy7636a", .data = &silergy_sy7636a},
- { .compatible = "maxim,max5970", .data = &maxim_max597x},
- { .compatible = "maxim,max5978", .data = &maxim_max597x},
+ { .compatible = "maxim,max5970", .data = &maxim_max5970},
+ { .compatible = "maxim,max5978", .data = &maxim_max5970},
{}
};
MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match);
static struct i2c_driver simple_mfd_i2c_driver = {
- .probe_new = simple_mfd_i2c_probe,
+ .probe = simple_mfd_i2c_probe,
.driver = {
.name = "simple-mfd-i2c",
.of_match_table = simple_mfd_i2c_of_match,
diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c
index 2515ecae1d3f..771b62a5c70f 100644
--- a/drivers/mfd/sky81452.c
+++ b/drivers/mfd/sky81452.c
@@ -77,7 +77,7 @@ static struct i2c_driver sky81452_driver = {
.name = "sky81452",
.of_match_table = of_match_ptr(sky81452_of_match),
},
- .probe_new = sky81452_probe,
+ .probe = sky81452_probe,
.id_table = sky81452_ids,
};
diff --git a/drivers/mfd/smpro-core.c b/drivers/mfd/smpro-core.c
index d7729cf70378..59d31590c69b 100644
--- a/drivers/mfd/smpro-core.c
+++ b/drivers/mfd/smpro-core.c
@@ -125,7 +125,7 @@ static const struct of_device_id smpro_core_of_match[] = {
MODULE_DEVICE_TABLE(of, smpro_core_of_match);
static struct i2c_driver smpro_core_driver = {
- .probe_new = smpro_core_probe,
+ .probe = smpro_core_probe,
.driver = {
.name = "smpro-core",
.of_match_table = smpro_core_of_match,
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index e281971ba54e..c02cbd9c2f5d 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -330,9 +330,8 @@ static int stmfx_chip_init(struct i2c_client *client)
stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd");
ret = PTR_ERR_OR_ZERO(stmfx->vdd);
if (ret) {
- if (ret == -ENODEV)
- stmfx->vdd = NULL;
- else
+ stmfx->vdd = NULL;
+ if (ret != -ENODEV)
return dev_err_probe(&client->dev, ret, "Failed to get VDD regulator\n");
}
@@ -387,7 +386,7 @@ static int stmfx_chip_init(struct i2c_client *client)
err:
if (stmfx->vdd)
- return regulator_disable(stmfx->vdd);
+ regulator_disable(stmfx->vdd);
return ret;
}
@@ -553,7 +552,7 @@ static struct i2c_driver stmfx_driver = {
.of_match_table = stmfx_of_match,
.pm = pm_sleep_ptr(&stmfx_dev_pm_ops),
},
- .probe_new = stmfx_probe,
+ .probe = stmfx_probe,
.remove = stmfx_remove,
};
module_i2c_driver(stmfx_driver);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 7998e0db1e15..1d7b401776d1 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -118,7 +118,7 @@ static struct i2c_driver stmpe_i2c_driver = {
.pm = pm_sleep_ptr(&stmpe_dev_pm_ops),
.of_match_table = stmpe_of_match,
},
- .probe_new = stmpe_i2c_probe,
+ .probe = stmpe_i2c_probe,
.remove = stmpe_i2c_remove,
.id_table = stmpe_i2c_id,
};
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index a92301dfc712..9c3cf58457a7 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1485,9 +1485,9 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
void stmpe_remove(struct stmpe *stmpe)
{
- if (!IS_ERR(stmpe->vio))
+ if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio))
regulator_disable(stmpe->vio);
- if (!IS_ERR(stmpe->vcc))
+ if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc))
regulator_disable(stmpe->vcc);
__stmpe_disable(stmpe, STMPE_BLOCK_ADC);
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 8db1530d9bac..3cc7492f828f 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -7,6 +7,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/stpmic1.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -19,7 +20,7 @@
static const struct regmap_range stpmic1_readable_ranges[] = {
regmap_reg_range(TURN_ON_SR, VERSION_SR),
- regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR),
+ regmap_reg_range(MAIN_CR, LDO6_STDBY_CR),
regmap_reg_range(BST_SW_CR, BST_SW_CR),
regmap_reg_range(INT_PENDING_R1, INT_PENDING_R4),
regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4),
@@ -30,7 +31,7 @@ static const struct regmap_range stpmic1_readable_ranges[] = {
};
static const struct regmap_range stpmic1_writeable_ranges[] = {
- regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR),
+ regmap_reg_range(MAIN_CR, LDO6_STDBY_CR),
regmap_reg_range(BST_SW_CR, BST_SW_CR),
regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4),
regmap_reg_range(INT_SET_MASK_R1, INT_SET_MASK_R4),
@@ -117,6 +118,16 @@ static const struct regmap_irq_chip stpmic1_regmap_irq_chip = {
.num_irqs = ARRAY_SIZE(stpmic1_irqs),
};
+static int stpmic1_power_off(struct sys_off_data *data)
+{
+ struct stpmic1 *ddata = data->cb_data;
+
+ regmap_update_bits(ddata->regmap, MAIN_CR,
+ SOFTWARE_SWITCH_OFF, SOFTWARE_SWITCH_OFF);
+
+ return NOTIFY_DONE;
+}
+
static int stpmic1_probe(struct i2c_client *i2c)
{
struct stpmic1 *ddata;
@@ -159,6 +170,16 @@ static int stpmic1_probe(struct i2c_client *i2c)
return ret;
}
+ ret = devm_register_sys_off_handler(ddata->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ stpmic1_power_off,
+ ddata);
+ if (ret) {
+ dev_err(ddata->dev, "failed to register sys-off handler: %d\n", ret);
+ return ret;
+ }
+
return devm_of_platform_populate(dev);
}
@@ -201,7 +222,7 @@ static struct i2c_driver stpmic1_driver = {
.of_match_table = of_match_ptr(stpmic1_of_match),
.pm = pm_sleep_ptr(&stpmic1_pm),
},
- .probe_new = stpmic1_probe,
+ .probe = stpmic1_probe,
};
module_i2c_driver(stpmic1_driver);
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 2a8fc9d1c806..f35c3af680dd 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -239,7 +239,7 @@ static struct i2c_driver stw481x_driver = {
.name = "stw481x",
.of_match_table = stw481x_match,
},
- .probe_new = stw481x_probe,
+ .probe = stw481x_probe,
.id_table = stw481x_id,
};
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index cbfe19d1b145..16df64e3c0be 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -485,7 +485,7 @@ static struct i2c_driver tc3589x_driver = {
.pm = pm_sleep_ptr(&tc3589x_dev_pm_ops),
.of_match_table = of_match_ptr(tc3589x_match),
},
- .probe_new = tc3589x_probe,
+ .probe = tc3589x_probe,
.remove = tc3589x_remove,
.id_table = tc3589x_id,
};
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index 9921320be255..4f06adad7b5e 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -217,7 +217,7 @@ static const struct i2c_device_id ti_lmu_ids[] = {
MODULE_DEVICE_TABLE(i2c, ti_lmu_ids);
static struct i2c_driver ti_lmu_driver = {
- .probe_new = ti_lmu_probe,
+ .probe = ti_lmu_probe,
.driver = {
.name = "ti-lmu",
.of_match_table = ti_lmu_of_match,
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index a66cb911998d..5601f6d0d874 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -209,7 +209,7 @@ static struct i2c_driver tps6105x_driver = {
.name = "tps6105x",
.of_match_table = tps6105x_of_match,
},
- .probe_new = tps6105x_probe,
+ .probe = tps6105x_probe,
.remove = tps6105x_remove,
.id_table = tps6105x_id,
};
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index faea4ff44c6f..2b9105295f30 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -664,7 +664,7 @@ static struct i2c_driver tps65010_driver = {
.driver = {
.name = "tps65010",
},
- .probe_new = tps65010_probe,
+ .probe = tps65010_probe,
.remove = tps65010_remove,
.id_table = tps65010_id,
};
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 500b594de316..9716bf703c7a 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -122,7 +122,7 @@ static struct i2c_driver tps6507x_i2c_driver = {
.name = "tps6507x",
.of_match_table = of_match_ptr(tps6507x_of_match),
},
- .probe_new = tps6507x_i2c_probe,
+ .probe = tps6507x_i2c_probe,
.id_table = tps6507x_i2c_id,
};
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 9494c1d71b86..6a21000aad4a 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -129,7 +129,7 @@ static struct i2c_driver tps65086_driver = {
.name = "tps65086",
.of_match_table = tps65086_of_match_table,
},
- .probe_new = tps65086_probe,
+ .probe = tps65086_probe,
.remove = tps65086_remove,
.id_table = tps65086_id_table,
};
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index af718a9c58b3..a35ad70755fb 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -236,7 +236,7 @@ static struct i2c_driver tps65090_driver = {
.suppress_bind_attrs = true,
.of_match_table = of_match_ptr(tps65090_of_match),
},
- .probe_new = tps65090_i2c_probe,
+ .probe = tps65090_i2c_probe,
.id_table = tps65090_id_table,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index eebd60601b01..60599291b315 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -402,7 +402,7 @@ static struct i2c_driver tps65217_driver = {
.of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
- .probe_new = tps65217_probe,
+ .probe = tps65217_probe,
.remove = tps65217_remove,
};
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index ea69dcef91ec..619bf7adb20c 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -347,7 +347,7 @@ static struct i2c_driver tps65218_driver = {
.name = "tps65218",
.of_match_table = of_tps65218_match_table,
},
- .probe_new = tps65218_probe,
+ .probe = tps65218_probe,
.id_table = tps65218_id_table,
};
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 0e402fda206b..0e0c42e4fdfc 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -25,13 +25,21 @@ static int tps65219_cold_reset(struct tps65219 *tps)
TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK);
}
-static int tps65219_restart(struct notifier_block *this,
- unsigned long reboot_mode, void *cmd)
+static int tps65219_soft_shutdown(struct tps65219 *tps)
{
- struct tps65219 *tps;
+ return regmap_update_bits(tps->regmap, TPS65219_REG_MFP_CTRL,
+ TPS65219_MFP_I2C_OFF_REQ_MASK,
+ TPS65219_MFP_I2C_OFF_REQ_MASK);
+}
- tps = container_of(this, struct tps65219, nb);
+static int tps65219_power_off_handler(struct sys_off_data *data)
+{
+ tps65219_soft_shutdown(data->cb_data);
+ return NOTIFY_DONE;
+}
+static int tps65219_restart(struct tps65219 *tps, unsigned long reboot_mode)
+{
if (reboot_mode == REBOOT_WARM)
tps65219_warm_reset(tps);
else
@@ -40,10 +48,11 @@ static int tps65219_restart(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block pmic_rst_restart_nb = {
- .notifier_call = tps65219_restart,
- .priority = 200,
-};
+static int tps65219_restart_handler(struct sys_off_data *data)
+{
+ tps65219_restart(data->cb_data, data->mode);
+ return NOTIFY_DONE;
+}
static const struct resource tps65219_pwrbutton_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65219_INT_PB_FALLING_EDGE_DETECT, "falling"),
@@ -106,7 +115,7 @@ static const struct mfd_cell tps65219_cells[] = {
.resources = tps65219_regulator_resources,
.num_resources = ARRAY_SIZE(tps65219_regulator_resources),
},
- { .name = "tps65219-gpios", },
+ { .name = "tps65219-gpio", },
};
static const struct mfd_cell tps65219_pwrbutton_cell = {
@@ -269,13 +278,22 @@ static int tps65219_probe(struct i2c_client *client)
}
}
- tps->nb = pmic_rst_restart_nb;
- ret = register_restart_handler(&tps->nb);
+ ret = devm_register_restart_handler(tps->dev,
+ tps65219_restart_handler,
+ tps);
+
if (ret) {
dev_err(tps->dev, "cannot register restart handler, %d\n", ret);
return ret;
}
+ ret = devm_register_power_off_handler(tps->dev,
+ tps65219_power_off_handler,
+ tps);
+ if (ret) {
+ dev_err(tps->dev, "failed to register power-off handler: %d\n", ret);
+ return ret;
+ }
return 0;
}
@@ -290,7 +308,7 @@ static struct i2c_driver tps65219_driver = {
.name = "tps65219",
.of_match_table = of_tps65219_match_table,
},
- .probe_new = tps65219_probe,
+ .probe = tps65219_probe,
};
module_i2c_driver(tps65219_driver);
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 90e23232b6b0..55675ceedcd3 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -619,7 +619,7 @@ static struct i2c_driver tps6586x_driver = {
.of_match_table = of_match_ptr(tps6586x_of_match),
.pm = &tps6586x_pm_ops,
},
- .probe_new = tps6586x_i2c_probe,
+ .probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove,
.id_table = tps6586x_id_table,
};
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 821c0277a2ed..41408df1712f 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -535,7 +535,7 @@ static struct i2c_driver tps65910_i2c_driver = {
.name = "tps65910",
.of_match_table = of_match_ptr(tps65910_of_match),
},
- .probe_new = tps65910_i2c_probe,
+ .probe = tps65910_i2c_probe,
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 1bf945966bf7..3c5ac781b6c1 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -60,7 +60,7 @@ static struct i2c_driver tps65912_i2c_driver = {
.name = "tps65912",
.of_match_table = tps65912_i2c_of_match_table,
},
- .probe_new = tps65912_i2c_probe,
+ .probe = tps65912_i2c_probe,
.remove = tps65912_i2c_remove,
.id_table = tps65912_i2c_id_table,
};
diff --git a/drivers/mfd/tps6594-i2c.c b/drivers/mfd/tps6594-i2c.c
index 449d5c61bc9f..899c88c0fe77 100644
--- a/drivers/mfd/tps6594-i2c.c
+++ b/drivers/mfd/tps6594-i2c.c
@@ -222,7 +222,7 @@ static int tps6594_i2c_probe(struct i2c_client *client)
match = of_match_device(tps6594_i2c_of_match_table, dev);
if (!match)
- return dev_err_probe(dev, PTR_ERR(match), "Failed to find matching chip ID\n");
+ return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n");
tps->chip_id = (unsigned long)match->data;
crc8_populate_msb(tps6594_i2c_crc_table, TPS6594_CRC8_POLYNOMIAL);
@@ -235,7 +235,7 @@ static struct i2c_driver tps6594_i2c_driver = {
.name = "tps6594",
.of_match_table = tps6594_i2c_of_match_table,
},
- .probe_new = tps6594_i2c_probe,
+ .probe = tps6594_i2c_probe,
};
module_i2c_driver(tps6594_i2c_driver);
diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c
index a938a191744f..f4b4f37f957f 100644
--- a/drivers/mfd/tps6594-spi.c
+++ b/drivers/mfd/tps6594-spi.c
@@ -107,7 +107,7 @@ static int tps6594_spi_probe(struct spi_device *spi)
match = of_match_device(tps6594_spi_of_match_table, dev);
if (!match)
- return dev_err_probe(dev, PTR_ERR(match), "Failed to find matching chip ID\n");
+ return dev_err_probe(dev, -EINVAL, "Failed to find matching chip ID\n");
tps->chip_id = (unsigned long)match->data;
crc8_populate_msb(tps6594_spi_crc_table, TPS6594_CRC8_POLYNOMIAL);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index e801b7ce010f..ce01a87f8dc3 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -890,7 +890,7 @@ static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
.driver.pm = &twl_dev_pm_ops,
.id_table = twl_ids,
- .probe_new = twl_probe,
+ .probe = twl_probe,
.remove = twl_remove,
};
builtin_i2c_driver(twl_driver);
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index e982119bbefa..d85675a4d9a8 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -608,7 +608,7 @@ static const struct regmap_config twl6040_regmap_config = {
.volatile_reg = twl6040_volatile_reg,
.writeable_reg = twl6040_writeable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
@@ -829,7 +829,7 @@ static struct i2c_driver twl6040_driver = {
.driver = {
.name = "twl6040",
},
- .probe_new = twl6040_probe,
+ .probe = twl6040_probe,
.remove = twl6040_remove,
.id_table = twl6040_i2c_id,
};
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index 07e884087f2c..6b942d5270c1 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -227,10 +227,9 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
"Failed to get IRQ\n");
ddata->extclk = devm_clk_get(dev, "extclk");
- if (IS_ERR(ddata->extclk)) {
- dev_err(dev, "Failed to get extclk");
- return PTR_ERR(ddata->extclk);
- }
+ if (IS_ERR(ddata->extclk))
+ return dev_err_probe(dev, PTR_ERR(ddata->extclk),
+ "Failed to get extclk");
ddata->supplies[0].supply = "vdd-buck";
ddata->supplies[1].supply = "vdd-buck-sido";
@@ -239,16 +238,12 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
ddata->supplies[4].supply = "vdd-io";
ret = regulator_bulk_get(dev, WCD934X_MAX_SUPPLY, ddata->supplies);
- if (ret) {
- dev_err(dev, "Failed to get supplies: err = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get supplies\n");
ret = regulator_bulk_enable(WCD934X_MAX_SUPPLY, ddata->supplies);
- if (ret) {
- dev_err(dev, "Failed to enable supplies: err = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable supplies\n");
/*
* For WCD934X, it takes about 600us for the Vout_A and
@@ -258,8 +253,9 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
usleep_range(600, 650);
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio)) {
- return dev_err_probe(dev, PTR_ERR(reset_gpio),
- "Failed to get reset gpio: err = %ld\n", PTR_ERR(reset_gpio));
+ ret = dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset gpio\n");
+ goto err_disable_regulators;
}
msleep(20);
gpiod_set_value(reset_gpio, 1);
@@ -269,6 +265,10 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
dev_set_drvdata(dev, ddata);
return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
+ return ret;
}
static void wcd934x_slim_remove(struct slim_device *sdev)
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index a5d6128fc67d..e2a7fccaed01 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -232,7 +232,7 @@ static struct i2c_driver wl1273_core_driver = {
.driver = {
.name = WL1273_FM_DRIVER_NAME,
},
- .probe_new = wl1273_core_probe,
+ .probe = wl1273_core_probe,
.id_table = wl1273_driver_id_table,
};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index d2f444d2ae78..e86b6a4896a6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1430,7 +1430,7 @@ struct regmap_config wm831x_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = WM831X_DBE_CHECK_DATA,
.readable_reg = wm831x_reg_readable,
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 9dbe96e2d46a..997837f13180 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -102,7 +102,7 @@ static struct i2c_driver wm831x_i2c_driver = {
.of_match_table = of_match_ptr(wm831x_of_match),
.suppress_bind_attrs = true,
},
- .probe_new = wm831x_i2c_probe,
+ .probe = wm831x_i2c_probe,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 1fa1dfbc9e31..c2a7d7069975 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -52,7 +52,7 @@ static struct i2c_driver wm8350_i2c_driver = {
.name = "wm8350",
.suppress_bind_attrs = true,
},
- .probe_new = wm8350_i2c_probe,
+ .probe = wm8350_i2c_probe,
.id_table = wm8350_i2c_id,
};
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 5e1599ac9abc..75483c9be0c4 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -54,8 +54,6 @@ static int wm8400_init(struct wm8400 *wm8400,
unsigned int reg;
int ret;
- dev_set_drvdata(wm8400->dev, wm8400);
-
/* Check that this is actually a WM8400 */
ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
if (ret != 0) {
@@ -145,7 +143,7 @@ static struct i2c_driver wm8400_i2c_driver = {
.driver = {
.name = "WM8400",
},
- .probe_new = wm8400_i2c_probe,
+ .probe = wm8400_i2c_probe,
.id_table = wm8400_i2c_id,
};
#endif
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index c419ab0c0eae..1e4f1694f065 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -320,8 +320,6 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
if (ret != 0)
return ret;
- dev_set_drvdata(wm8994->dev, wm8994);
-
/* Add the on-chip regulators first for bootstrapping */
ret = mfd_add_devices(wm8994->dev, 0,
wm8994_regulator_devs,
@@ -672,7 +670,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.pm = pm_ptr(&wm8994_pm_ops),
.of_match_table = wm8994_of_match,
},
- .probe_new = wm8994_i2c_probe,
+ .probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
.id_table = wm8994_i2c_id,
};
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 433aa4197785..75e427f124b2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -538,6 +538,29 @@ config TMR_INJECT
Say N here unless you know what you are doing.
+config TPS6594_ESM
+ tristate "TI TPS6594 Error Signal Monitor support"
+ depends on MFD_TPS6594
+ default MFD_TPS6594
+ help
+ Support ESM (Error Signal Monitor) on TPS6594 PMIC devices.
+ ESM is used typically to reboot the board in error condition.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps6594-esm.
+
+config TPS6594_PFSM
+ tristate "TI TPS6594 Pre-configurable Finite State Machine support"
+ depends on MFD_TPS6594
+ default MFD_TPS6594
+ help
+ Support PFSM (Pre-configurable Finite State Machine) on TPS6594 PMIC devices.
+ These devices integrate a finite state machine engine, which manages the state
+ of the device during operating state transition.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps6594-pfsm.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 56de43943cd5..f2a4d1ff65d4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -65,3 +65,5 @@ obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/
obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o
obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o
obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o
+obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o
+obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 3856d5c04c5f..469478f7a1d3 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -106,7 +106,7 @@ static struct i2c_driver ad_dpot_i2c_driver = {
.driver = {
.name = "ad_dpot",
},
- .probe_new = ad_dpot_i2c_probe,
+ .probe = ad_dpot_i2c_probe,
.remove = ad_dpot_i2c_remove,
.id_table = ad_dpot_id,
};
diff --git a/drivers/misc/altera-stapl/Makefile b/drivers/misc/altera-stapl/Makefile
index dd0f8189666b..90f18e7bf9b0 100644
--- a/drivers/misc/altera-stapl/Makefile
+++ b/drivers/misc/altera-stapl/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o
+altera-stapl-y = altera-jtag.o altera-comp.o altera.o
+altera-stapl-$(CONFIG_HAS_IOPORT) += altera-lpt.o
obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index a58b7cb81d98..587427b73914 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2407,6 +2407,10 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
astate->config = config;
if (!astate->config->jtag_io) {
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ retval = -ENODEV;
+ goto free_state;
+ }
dprintk("%s: using byteblaster!\n", __func__);
astate->config->jtag_io = netup_jtag_io_lpt;
}
@@ -2481,7 +2485,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
} else if (exec_result)
printk(KERN_ERR "%s: error %d\n", __func__, exec_result);
-
+free_state:
kfree(astate);
free_value:
kfree(value);
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 0526c55d5cd5..693f0e539f37 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -296,7 +296,7 @@ static struct i2c_driver apds9802als_driver = {
.name = DRIVER_NAME,
.pm = APDS9802ALS_PM_OPS,
},
- .probe_new = apds9802als_probe,
+ .probe = apds9802als_probe,
.remove = apds9802als_remove,
.id_table = apds9802als_id,
};
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 0024503ea6db..92b92be91d60 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1267,11 +1267,11 @@ static const struct dev_pm_ops apds990x_pm_ops = {
};
static struct i2c_driver apds990x_driver = {
- .driver = {
+ .driver = {
.name = "apds990x",
.pm = &apds990x_pm_ops,
},
- .probe_new = apds990x_probe,
+ .probe = apds990x_probe,
.remove = apds990x_remove,
.id_table = apds990x_id,
};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index bedbe0efb330..1629b62fd052 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1374,11 +1374,11 @@ static const struct dev_pm_ops bh1770_pm_ops = {
};
static struct i2c_driver bh1770_driver = {
- .driver = {
+ .driver = {
.name = "bh1770glc",
.pm = &bh1770_pm_ops,
},
- .probe_new = bh1770_probe,
+ .probe = bh1770_probe,
.remove = bh1770_remove,
.id_table = bh1770_id,
};
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index d517eed32971..21fc5bc85c5c 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -250,7 +250,7 @@ static struct i2c_driver ds1682_driver = {
.name = "ds1682",
.of_match_table = ds1682_of_match,
},
- .probe_new = ds1682_probe,
+ .probe = ds1682_probe,
.remove = ds1682_remove,
.id_table = ds1682_id,
};
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 938c4f41b98c..dbbf7db4ff2f 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -761,7 +761,8 @@ static int at24_probe(struct i2c_client *client)
pm_runtime_disable(dev);
if (!pm_runtime_status_suspended(dev))
regulator_disable(at24->vcc_reg);
- return PTR_ERR(at24->nvmem);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
}
/*
@@ -832,7 +833,7 @@ static struct i2c_driver at24_driver = {
.of_match_table = at24_of_match,
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
- .probe_new = at24_probe,
+ .probe = at24_probe,
.remove = at24_remove,
.id_table = at24_ids,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index c8c6deb7ed89..a1acd77130f2 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -234,7 +234,7 @@ static struct i2c_driver ee1004_driver = {
.name = "ee1004",
.dev_groups = ee1004_groups,
},
- .probe_new = ee1004_probe,
+ .probe = ee1004_probe,
.remove = ee1004_remove,
.id_table = ee1004_ids,
};
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 32611100d5cd..ccb7c2f7ee2f 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -196,7 +196,7 @@ static struct i2c_driver eeprom_driver = {
.driver = {
.name = "eeprom",
},
- .probe_new = eeprom_probe,
+ .probe = eeprom_probe,
.remove = eeprom_remove,
.id_table = eeprom_id,
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 7075d0b37881..740c06382b83 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1556,7 +1556,7 @@ static struct i2c_driver idt_driver = {
.name = IDT_NAME,
.of_match_table = idt_of_match,
},
- .probe_new = idt_probe,
+ .probe = idt_probe,
.remove = idt_remove,
.id_table = idt_ids,
};
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 79cf8afcef2e..cb6b1efeafe0 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -192,7 +192,7 @@ static struct i2c_driver max6875_driver = {
.driver = {
.name = "max6875",
},
- .probe_new = max6875_probe,
+ .probe = max6875_probe,
.remove = max6875_remove,
.id_table = max6875_id,
};
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 30d4d0476248..9666d28037e1 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1437,7 +1437,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
- sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
@@ -2225,6 +2225,9 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct
fdev->miscdev.fops = &fastrpc_fops;
fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
domain, is_secured ? "-secure" : "");
+ if (!fdev->miscdev.name)
+ return -ENOMEM;
+
err = misc_register(&fdev->miscdev);
if (!err) {
if (is_secured)
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 8967940ecd1e..759eaeb64307 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -131,7 +131,7 @@ static struct i2c_driver hmc6352_driver = {
.driver = {
.name = "hmc6352",
},
- .probe_new = hmc6352_probe,
+ .probe = hmc6352_probe,
.remove = hmc6352_remove,
.id_table = hmc6352_id,
};
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 12108a7b9b40..ee6296b98078 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -105,7 +105,7 @@ static struct i2c_driver ics932s401_driver = {
.driver = {
.name = "ics932s401",
},
- .probe_new = ics932s401_probe,
+ .probe = ics932s401_probe,
.remove = ics932s401_remove,
.id_table = ics932s401_id,
.detect = ics932s401_detect,
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 147b58f7968d..ebf0635aee64 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -459,7 +459,7 @@ static struct i2c_driver isl29003_driver = {
.name = ISL29003_DRV_NAME,
.pm = ISL29003_PM_OPS,
},
- .probe_new = isl29003_probe,
+ .probe = isl29003_probe,
.remove = isl29003_remove,
.id_table = isl29003_id,
};
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 3be02093368c..c5976fa8c825 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -214,7 +214,7 @@ static struct i2c_driver isl29020_driver = {
.name = "isl29020",
.pm = ISL29020_PM_OPS,
},
- .probe_new = isl29020_probe,
+ .probe = isl29020_probe,
.remove = isl29020_remove,
.id_table = isl29020_id,
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 7071412d6bf6..3882e97e96a7 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -262,7 +262,7 @@ static struct i2c_driver lis3lv02d_i2c_driver = {
.pm = &lis3_pm_ops,
.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
},
- .probe_new = lis3lv02d_i2c_probe,
+ .probe = lis3lv02d_i2c_probe,
.remove = lis3lv02d_i2c_remove,
.id_table = lis3lv02d_id,
};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index b4712ff196b4..0772e4a4757e 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -79,7 +79,7 @@ static struct crashpoint crashpoints[] = {
CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
- CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("FS_SUBMIT_BH", "submit_bh"),
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
CRASHPOINT("TIMERADD", "hrtimer_start"),
CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 31e3c74ca1f1..b8b716faf192 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -108,7 +108,7 @@ struct mkhi_fw_ver {
static int mei_osver(struct mei_cl_device *cldev)
{
const size_t size = MKHI_OSVER_BUF_LEN;
- char buf[MKHI_OSVER_BUF_LEN];
+ u8 buf[MKHI_OSVER_BUF_LEN];
struct mkhi_msg *req;
struct mkhi_fwcaps *fwcaps;
struct mei_os_ver *os_ver;
@@ -137,7 +137,7 @@ static int mei_osver(struct mei_cl_device *cldev)
sizeof(struct mkhi_fw_ver_block) * (__num))
static int mei_fwver(struct mei_cl_device *cldev)
{
- char buf[MKHI_FWVER_BUF_LEN];
+ u8 buf[MKHI_FWVER_BUF_LEN];
struct mkhi_msg req;
struct mkhi_msg *rsp;
struct mkhi_fw_ver *fwver;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 5d7a68674d9b..33ec6424dfee 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1046,9 +1046,6 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
const struct mei_cl_device_id *found_id;
- if (!cldev)
- return 0;
-
if (!cldev->do_match)
return 0;
@@ -1079,9 +1076,6 @@ static int mei_cl_device_probe(struct device *dev)
cldev = to_mei_cl_device(dev);
cldrv = to_mei_cl_driver(dev->driver);
- if (!cldev)
- return 0;
-
if (!cldrv || !cldrv->probe)
return -ENODEV;
@@ -1276,9 +1270,6 @@ static void mei_cl_bus_dev_release(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- if (!cldev)
- return;
-
mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
diff --git a/drivers/misc/smpro-errmon.c b/drivers/misc/smpro-errmon.c
index a1f0b2c77fac..c12035a46585 100644
--- a/drivers/misc/smpro-errmon.c
+++ b/drivers/misc/smpro-errmon.c
@@ -6,7 +6,6 @@
*
*/
-#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/misc/tps6594-esm.c b/drivers/misc/tps6594-esm.c
new file mode 100644
index 000000000000..b488f704f104
--- /dev/null
+++ b/drivers/misc/tps6594-esm.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ESM (Error Signal Monitor) driver for TI TPS6594/TPS6593/LP8764 PMICs
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/tps6594.h>
+
+static irqreturn_t tps6594_esm_isr(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ int i;
+
+ for (i = 0 ; i < pdev->num_resources ; i++) {
+ if (irq == platform_get_irq_byname(pdev, pdev->resource[i].name)) {
+ dev_err(pdev->dev.parent, "%s error detected\n", pdev->resource[i].name);
+ return IRQ_HANDLED;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static int tps6594_esm_probe(struct platform_device *pdev)
+{
+ struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ int irq;
+ int ret;
+ int i;
+
+ for (i = 0 ; i < pdev->num_resources ; i++) {
+ irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get %s irq\n",
+ pdev->resource[i].name);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ tps6594_esm_isr, IRQF_ONESHOT,
+ pdev->resource[i].name, pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+ }
+
+ ret = regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_MODE_CFG,
+ TPS6594_BIT_ESM_SOC_EN | TPS6594_BIT_ESM_SOC_ENDRV);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to configure ESM\n");
+
+ ret = regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
+ TPS6594_BIT_ESM_SOC_START);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to start ESM\n");
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+}
+
+static int tps6594_esm_remove(struct platform_device *pdev)
+{
+ struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
+ TPS6594_BIT_ESM_SOC_START);
+ if (ret) {
+ dev_err(dev, "Failed to stop ESM\n");
+ goto out;
+ }
+
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_MODE_CFG,
+ TPS6594_BIT_ESM_SOC_EN | TPS6594_BIT_ESM_SOC_ENDRV);
+ if (ret)
+ dev_err(dev, "Failed to unconfigure ESM\n");
+
+out:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int tps6594_esm_suspend(struct device *dev)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
+ TPS6594_BIT_ESM_SOC_START);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
+static int tps6594_esm_resume(struct device *dev)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+
+ pm_runtime_get_sync(dev);
+
+ return regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
+ TPS6594_BIT_ESM_SOC_START);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(tps6594_esm_pm_ops, tps6594_esm_suspend, tps6594_esm_resume);
+
+static struct platform_driver tps6594_esm_driver = {
+ .driver = {
+ .name = "tps6594-esm",
+ .pm = pm_sleep_ptr(&tps6594_esm_pm_ops),
+ },
+ .probe = tps6594_esm_probe,
+ .remove = tps6594_esm_remove,
+};
+
+module_platform_driver(tps6594_esm_driver);
+
+MODULE_ALIAS("platform:tps6594-esm");
+MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 Error Signal Monitor Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
new file mode 100644
index 000000000000..5223d1580807
--- /dev/null
+++ b/drivers/misc/tps6594-pfsm.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PFSM (Pre-configurable Finite State Machine) driver for TI TPS6594/TPS6593/LP8764 PMICs
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/tps6594.h>
+
+#include <linux/tps6594_pfsm.h>
+
+#define TPS6594_STARTUP_DEST_MCU_ONLY_VAL 2
+#define TPS6594_STARTUP_DEST_ACTIVE_VAL 3
+#define TPS6594_STARTUP_DEST_SHIFT 5
+#define TPS6594_STARTUP_DEST_MCU_ONLY (TPS6594_STARTUP_DEST_MCU_ONLY_VAL \
+ << TPS6594_STARTUP_DEST_SHIFT)
+#define TPS6594_STARTUP_DEST_ACTIVE (TPS6594_STARTUP_DEST_ACTIVE_VAL \
+ << TPS6594_STARTUP_DEST_SHIFT)
+
+/*
+ * To update the PMIC firmware, the user must be able to access
+ * page 0 (user registers) and page 1 (NVM control and configuration).
+ */
+#define TPS6594_PMIC_MAX_POS 0x200
+
+#define TPS6594_FILE_TO_PFSM(f) container_of((f)->private_data, struct tps6594_pfsm, miscdev)
+
+/**
+ * struct tps6594_pfsm - device private data structure
+ *
+ * @miscdev: misc device infos
+ * @regmap: regmap for accessing the device registers
+ */
+struct tps6594_pfsm {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+};
+
+static ssize_t tps6594_pfsm_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
+ loff_t pos = *ppos;
+ unsigned int val;
+ int ret;
+ int i;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= TPS6594_PMIC_MAX_POS)
+ return 0;
+ if (count > TPS6594_PMIC_MAX_POS - pos)
+ count = TPS6594_PMIC_MAX_POS - pos;
+
+ for (i = 0 ; i < count ; i++) {
+ ret = regmap_read(pfsm->regmap, pos + i, &val);
+ if (ret)
+ return ret;
+
+ if (put_user(val, buf + i))
+ return -EFAULT;
+ }
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static ssize_t tps6594_pfsm_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
+ loff_t pos = *ppos;
+ char val;
+ int ret;
+ int i;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= TPS6594_PMIC_MAX_POS || !count)
+ return 0;
+ if (count > TPS6594_PMIC_MAX_POS - pos)
+ count = TPS6594_PMIC_MAX_POS - pos;
+
+ for (i = 0 ; i < count ; i++) {
+ if (get_user(val, buf + i))
+ return -EFAULT;
+
+ ret = regmap_write(pfsm->regmap, pos + i, val);
+ if (ret)
+ return ret;
+ }
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static int tps6594_pfsm_configure_ret_trig(struct regmap *regmap, u8 gpio_ret, u8 ddr_ret)
+{
+ int ret;
+
+ if (gpio_ret)
+ ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
+ else
+ ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
+ if (ret)
+ return ret;
+
+ if (ddr_ret)
+ ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(7));
+ else
+ ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(7));
+
+ return ret;
+}
+
+static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
+ struct pmic_state_opt state_opt;
+ void __user *argp = (void __user *)arg;
+ int ret = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case PMIC_GOTO_STANDBY:
+ /* Disable LP mode */
+ ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_BIT_LP_STANDBY_SEL);
+ if (ret)
+ return ret;
+
+ /* Force trigger */
+ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
+ break;
+ case PMIC_GOTO_LP_STANDBY:
+ /* Enable LP mode */
+ ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_BIT_LP_STANDBY_SEL);
+ if (ret)
+ return ret;
+
+ /* Force trigger */
+ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
+ break;
+ case PMIC_UPDATE_PGM:
+ /* Force trigger */
+ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(3), TPS6594_BIT_TRIGGER_I2C(3));
+ break;
+ case PMIC_SET_ACTIVE_STATE:
+ /* Modify NSLEEP1-2 bits */
+ ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
+ TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B);
+ break;
+ case PMIC_SET_MCU_ONLY_STATE:
+ if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
+ return -EFAULT;
+
+ /* Configure retention triggers */
+ ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
+ state_opt.ddr_retention);
+ if (ret)
+ return ret;
+
+ /* Modify NSLEEP1-2 bits */
+ ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
+ TPS6594_BIT_NSLEEP1B);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
+ TPS6594_BIT_NSLEEP2B);
+ break;
+ case PMIC_SET_RETENTION_STATE:
+ if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
+ return -EFAULT;
+
+ /* Configure wake-up destination */
+ if (state_opt.mcu_only_startup_dest)
+ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_MASK_STARTUP_DEST,
+ TPS6594_STARTUP_DEST_MCU_ONLY);
+ else
+ ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_MASK_STARTUP_DEST,
+ TPS6594_STARTUP_DEST_ACTIVE);
+ if (ret)
+ return ret;
+
+ /* Configure retention triggers */
+ ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
+ state_opt.ddr_retention);
+ if (ret)
+ return ret;
+
+ /* Modify NSLEEP1-2 bits */
+ ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
+ TPS6594_BIT_NSLEEP2B);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations tps6594_pfsm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = generic_file_llseek,
+ .read = tps6594_pfsm_read,
+ .write = tps6594_pfsm_write,
+ .unlocked_ioctl = tps6594_pfsm_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static irqreturn_t tps6594_pfsm_isr(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ int i;
+
+ for (i = 0 ; i < pdev->num_resources ; i++) {
+ if (irq == platform_get_irq_byname(pdev, pdev->resource[i].name)) {
+ dev_err(pdev->dev.parent, "%s event detected\n", pdev->resource[i].name);
+ return IRQ_HANDLED;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static int tps6594_pfsm_probe(struct platform_device *pdev)
+{
+ struct tps6594_pfsm *pfsm;
+ struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ int irq;
+ int ret;
+ int i;
+
+ pfsm = devm_kzalloc(dev, sizeof(struct tps6594_pfsm), GFP_KERNEL);
+ if (!pfsm)
+ return -ENOMEM;
+
+ pfsm->regmap = tps->regmap;
+
+ pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
+ tps->chip_id, tps->reg);
+ pfsm->miscdev.fops = &tps6594_pfsm_fops;
+ pfsm->miscdev.parent = dev->parent;
+
+ for (i = 0 ; i < pdev->num_resources ; i++) {
+ irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get %s irq\n",
+ pdev->resource[i].name);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ tps6594_pfsm_isr, IRQF_ONESHOT,
+ pdev->resource[i].name, pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+ }
+
+ platform_set_drvdata(pdev, pfsm);
+
+ return misc_register(&pfsm->miscdev);
+}
+
+static int tps6594_pfsm_remove(struct platform_device *pdev)
+{
+ struct tps6594_pfsm *pfsm = platform_get_drvdata(pdev);
+
+ misc_deregister(&pfsm->miscdev);
+
+ return 0;
+}
+
+static struct platform_driver tps6594_pfsm_driver = {
+ .driver = {
+ .name = "tps6594-pfsm",
+ },
+ .probe = tps6594_pfsm_probe,
+ .remove = tps6594_pfsm_remove,
+};
+
+module_platform_driver(tps6594_pfsm_driver);
+
+MODULE_ALIAS("platform:tps6594-pfsm");
+MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 Pre-configurable Finite State Machine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 6c62b94e0acd..a3bc2823143e 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -437,7 +437,7 @@ static struct i2c_driver tsl2550_driver = {
.of_match_table = tsl2550_of_match,
.pm = TSL2550_PM_OPS,
},
- .probe_new = tsl2550_probe,
+ .probe = tsl2550_probe,
.remove = tsl2550_remove,
.id_table = tsl2550_id,
};
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 346bd7cf2e94..930c252753a0 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -166,8 +166,8 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
init_waitqueue_head(&q->wait);
filep->private_data = q;
- uacce->inode = inode;
q->state = UACCE_Q_INIT;
+ q->mapping = filep->f_mapping;
mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
mutex_unlock(&uacce->mutex);
@@ -200,12 +200,15 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
static void uacce_vma_close(struct vm_area_struct *vma)
{
struct uacce_queue *q = vma->vm_private_data;
- struct uacce_qfile_region *qfr = NULL;
- if (vma->vm_pgoff < UACCE_MAX_REGION)
- qfr = q->qfrs[vma->vm_pgoff];
+ if (vma->vm_pgoff < UACCE_MAX_REGION) {
+ struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff];
- kfree(qfr);
+ mutex_lock(&q->mutex);
+ q->qfrs[vma->vm_pgoff] = NULL;
+ mutex_unlock(&q->mutex);
+ kfree(qfr);
+ }
}
static const struct vm_operations_struct uacce_vm_ops = {
@@ -574,12 +577,6 @@ void uacce_remove(struct uacce_device *uacce)
if (!uacce)
return;
- /*
- * unmap remaining mapping from user space, preventing user still
- * access the mmaped area while parent device is already removed
- */
- if (uacce->inode)
- unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
/*
* uacce_fops_open() may be running concurrently, even after we remove
@@ -597,6 +594,12 @@ void uacce_remove(struct uacce_device *uacce)
uacce_put_queue(q);
mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
+
+ /*
+ * unmap remaining mapping from user space, preventing user still
+ * access the mmaped area while parent device is already removed
+ */
+ unmap_mapping_range(q->mapping, 0, 0, 1);
}
/* disable sva now since no opened queues */
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index cb9506f9cbd0..270ff4c5971a 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -855,16 +855,6 @@ static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
return 0;
}
-static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
-{
- return 0;
-}
-
-static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
-{
- return 0;
-}
-
static int xsdfec_start(struct xsdfec_dev *xsdfec)
{
u32 regread;
@@ -1030,8 +1020,6 @@ static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
static const struct file_operations xsdfec_fops = {
.owner = THIS_MODULE,
- .open = xsdfec_dev_open,
- .release = xsdfec_dev_release,
.unlocked_ioctl = xsdfec_dev_ioctl,
.poll = xsdfec_poll,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ec4108a3e5b9..3d3e0ca52614 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2199,8 +2199,10 @@ int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
}
EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
-static void __mmc_rescan(struct mmc_host *host)
+void mmc_rescan(struct work_struct *work)
{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
int i;
if (host->rescan_disable)
@@ -2272,14 +2274,6 @@ static void __mmc_rescan(struct mmc_host *host)
mmc_schedule_delayed_work(&host->detect, HZ);
}
-void mmc_rescan(struct work_struct *work)
-{
- struct mmc_host *host =
- container_of(work, struct mmc_host, detect.work);
-
- __mmc_rescan(host);
-}
-
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(min(freqs[0], host->f_max), host->f_min);
@@ -2292,8 +2286,7 @@ void mmc_start_host(struct mmc_host *host)
}
mmc_gpiod_request_cd_irq(host);
- host->detect_change = 1;
- __mmc_rescan(host);
+ _mmc_detect_change(host, 0, false);
}
void __mmc_stop_host(struct mmc_host *host)
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index e5c571fd232c..80f015cf6e54 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -47,7 +47,7 @@ config MUX_GPIO
config MUX_MMIO
tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
- depends on OF || COMPILE_TEST
+ depends on OF
help
MMIO/Regmap register bitfield-controlled Multiplexer controller.
diff --git a/drivers/mux/adg792a.c b/drivers/mux/adg792a.c
index e8fc2fc1ab09..4da5aecb9fc6 100644
--- a/drivers/mux/adg792a.c
+++ b/drivers/mux/adg792a.c
@@ -143,7 +143,7 @@ static struct i2c_driver adg792a_driver = {
.name = "adg792a",
.of_match_table = of_match_ptr(adg792a_of_match),
},
- .probe_new = adg792a_probe,
+ .probe = adg792a_probe,
.id_table = adg792a_id,
};
module_i2c_driver(adg792a_driver);
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index 44a7a0e885b8..245bc07eee4b 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -131,7 +131,7 @@ static int mux_mmio_probe(struct platform_device *pdev)
static struct platform_driver mux_mmio_driver = {
.driver = {
.name = "mmio-mux",
- .of_match_table = of_match_ptr(mux_mmio_dt_ids),
+ .of_match_table = mux_mmio_dt_ids,
},
.probe = mux_mmio_probe,
};
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 1bad1866ae46..99265667538c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -196,13 +196,10 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
{
struct arcnet_local *lp = netdev_priv(dev);
- unsigned long led_delay = 350;
- unsigned long tx_delay = 50;
switch (event) {
case ARCNET_LED_EVENT_RECON:
- led_trigger_blink_oneshot(lp->recon_led_trig,
- &led_delay, &led_delay, 0);
+ led_trigger_blink_oneshot(lp->recon_led_trig, 350, 350, 0);
break;
case ARCNET_LED_EVENT_OPEN:
led_trigger_event(lp->tx_led_trig, LED_OFF);
@@ -213,8 +210,7 @@ void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
led_trigger_event(lp->recon_led_trig, LED_OFF);
break;
case ARCNET_LED_EVENT_TX:
- led_trigger_blink_oneshot(lp->tx_led_trig,
- &tx_delay, &tx_delay, 0);
+ led_trigger_blink_oneshot(lp->tx_led_trig, 50, 50, 0);
break;
}
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 80861ac090ae..70c0e2b1936b 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1725,6 +1725,18 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
u32 tstamp_hi;
u64 tstamp;
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_L2:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
+ return false;
+ break;
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
+ return false;
+ break;
+ }
+
/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
* for RX timestamping. Then free it, and poll for its copy through
* MMIO in the CPU port module, and inject that into the stack from
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index fb1549a5fe32..dee35ba924ad 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -252,6 +252,7 @@ struct sja1105_private {
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
unsigned long hwts_tx_en;
+ unsigned long hwts_rx_en;
const struct sja1105_info *info;
size_t max_xfer_len;
struct spi_device *spidev;
@@ -289,7 +290,6 @@ struct sja1105_spi_message {
/* From sja1105_main.c */
enum sja1105_reset_reason {
SJA1105_VLAN_FILTERING = 0,
- SJA1105_RX_HWTSTAMPING,
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
SJA1105_BEST_EFFORT_POLICING,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index a55a6436fc05..3529a565b4aa 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -866,12 +866,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.hostprio = 7,
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
- .incl_srcpt1 = false,
- .send_meta1 = false,
+ .incl_srcpt1 = true,
+ .send_meta1 = true,
.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
- .incl_srcpt0 = false,
- .send_meta0 = false,
+ .incl_srcpt0 = true,
+ .send_meta0 = true,
/* Default to an invalid value */
.mirr_port = priv->ds->num_ports,
/* No TTEthernet */
@@ -2215,7 +2215,6 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
static const char * const sja1105_reset_reasons[] = {
[SJA1105_VLAN_FILTERING] = "VLAN filtering",
- [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
@@ -2405,11 +2404,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->tpid = tpid;
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid2 = tpid2;
- /* When VLAN filtering is on, we need to at least be able to
- * decode management traffic through the "backup plan".
- */
- general_params->incl_srcpt1 = enabled;
- general_params->incl_srcpt0 = enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 30fb2cc40164..a7d41e781398 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,35 +58,10 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-/* Must be called only while the RX timestamping state of the tagger
- * is turned off
- */
-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- struct sja1105_general_params_entry *general_params;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
- general_params = table->entries;
- general_params->send_meta1 = on;
- general_params->send_meta0 = on;
-
- ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&ptp_data->skb_txtstamp_queue);
- skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
-
- return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
-}
-
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
- struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
- bool rx_on;
- int rc;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -104,26 +79,13 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- rx_on = false;
+ priv->hwts_rx_en &= ~BIT(port);
break;
default:
- rx_on = true;
+ priv->hwts_rx_en |= BIT(port);
break;
}
- if (rx_on != tagger_data->rxtstamp_get_state(ds)) {
- tagger_data->rxtstamp_set_state(ds, false);
-
- rc = sja1105_change_rxtstamping(priv, rx_on);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to change RX timestamping: %d\n", rc);
- return rc;
- }
- if (rx_on)
- tagger_data->rxtstamp_set_state(ds, true);
- }
-
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
@@ -131,7 +93,6 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
- struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
@@ -140,7 +101,7 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
- if (tagger_data->rxtstamp_get_state(ds))
+ if (priv->hwts_rx_en & BIT(port))
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -413,11 +374,10 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
- struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!tagger_data->rxtstamp_get_state(ds))
+ if (!(priv->hwts_rx_en & BIT(port)))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index ae55167ce0a6..ef1a4a7c47b2 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1025,17 +1025,17 @@ static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct vsc73xx *vsc = ds->priv;
return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAXLEN, new_mtu);
+ VSC73XX_MAXLEN, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
/* According to application not "VSC7398 Jumbo Frames" setting
- * up the MTU to 9.6 KB does not affect the performance on standard
+ * up the frame size to 9.6 KB does not affect the performance on standard
* frames. It is clear from the application note that
* "9.6 kilobytes" == 9600 bytes.
*/
static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
{
- return 9600;
+ return 9600 - ETH_HLEN - ETH_FCS_LEN;
}
static const struct dsa_switch_ops vsc73xx_ds_ops = {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5e68a6a4b2af..5ef073a79ce9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -225,6 +225,7 @@ MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG357766);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c63d3ec9d328..763d613adbcc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1816,7 +1816,14 @@ static int __ibmvnic_open(struct net_device *netdev)
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
- netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
+ /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
+ * resets, don't reset the stats because there could be batched
+ * skb's waiting to be sent. If we reset dql stats, we risk
+ * num_completed being greater than num_queued. This will cause
+ * a BUG_ON in dql_completed().
+ */
+ if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index bd77152bb8d7..592037f4e55b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -169,6 +169,9 @@ void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return;
cgx_write(cgx_dev, lmac_id, offset, val);
}
@@ -176,6 +179,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return 0;
+
return cgx_read(cgx_dev, lmac_id, offset);
}
@@ -530,14 +537,15 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
- u8 lmac_type;
+ struct lmac *lmac;
u64 cfg;
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
- if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (lmac->lmac_type == LMAC_MODE_SGMII ||
+ lmac->lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
@@ -1556,6 +1564,23 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Resetting PFC related CSRs */
+ cfg = 0xff;
+ cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
+
+ if (pf_req_flr)
+ cgx_lmac_internal_loopback(cgxd, lmac_id, false);
+ return 0;
+}
+
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
@@ -1675,6 +1700,7 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->lmac_idmap[lmac->lmac_id] = lmac;
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
+ lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -1771,6 +1797,7 @@ static struct mac_ops cgx_mac_ops = {
.mac_tx_enable = cgx_lmac_tx_enable,
.pfc_config = cgx_lmac_pfc_config,
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
+ .mac_reset = cgx_lmac_reset,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 5a20d93004c7..574114179688 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -35,6 +35,7 @@
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LOGL_XON 0x100
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
@@ -181,4 +182,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
u8 *rx_pause);
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
int pfvf_idx);
+int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 39aaf0e4467d..0b4cba03f2e8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -24,6 +24,7 @@
* @cgx: parent cgx port
* @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @lmac_type: lmac type like SGMII/XAUI
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @name: lmac port name
@@ -43,6 +44,7 @@ struct lmac {
struct cgx *cgx;
u8 mcast_filters_count;
u8 lmac_id;
+ u8 lmac_type;
bool cmd_pend;
char *name;
};
@@ -125,6 +127,7 @@ struct mac_ops {
int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
+ int (*mac_reset)(void *cgxd, int lmac_id, u8 pf_req_flr);
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index de0d88dd10d6..b4fcb20c3f4f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -37,6 +37,7 @@ static struct mac_ops rpm_mac_ops = {
.mac_tx_enable = rpm_lmac_tx_enable,
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+ .mac_reset = rpm_lmac_reset,
};
static struct mac_ops rpm2_mac_ops = {
@@ -47,7 +48,7 @@ static struct mac_ops rpm2_mac_ops = {
.int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
.irq_offset = 1,
.int_ena_bit = BIT_ULL(0),
- .lmac_fwi = RPM_LMAC_FWI,
+ .lmac_fwi = RPM2_LMAC_FWI,
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
@@ -68,6 +69,7 @@ static struct mac_ops rpm2_mac_ops = {
.mac_tx_enable = rpm_lmac_tx_enable,
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+ .mac_reset = rpm_lmac_reset,
};
bool is_dev_rpm2(void *rpmd)
@@ -537,14 +539,15 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
- u8 lmac_type;
+ struct lmac *lmac;
u64 cfg;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
- lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (lmac->lmac_type == LMAC_MODE_QSGMII ||
+ lmac->lmac_type == LMAC_MODE_SGMII) {
dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
return 0;
}
@@ -713,3 +716,24 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
return 0;
}
+
+int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
+{
+ u64 rx_logl_xon, cfg;
+ rpm_t *rpm = rpmd;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* Resetting PFC related CSRs */
+ rx_logl_xon = is_dev_rpm2(rpm) ? RPM2_CMRX_RX_LOGL_XON :
+ RPMX_CMRX_RX_LOGL_XON;
+ cfg = 0xff;
+
+ rpm_write(rpm, lmac_id, rx_logl_xon, cfg);
+
+ if (pf_req_flr)
+ rpm_lmac_internal_loopback(rpm, lmac_id, false);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 22147b4c2137..b79cfbc6f877 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -74,6 +74,7 @@
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+#define RPMX_CMRX_RX_LOGL_XON 0x4100
#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
@@ -94,7 +95,8 @@
/* CN10KB CSR Declaration */
#define RPM2_CMRX_SW_INT 0x1b0
-#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8
+#define RPM2_CMRX_SW_INT_ENA_W1S 0x1c8
+#define RPM2_LMAC_FWI 0x12
#define RPM2_CMR_CHAN_MSK_OR 0x3120
#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
@@ -131,4 +133,5 @@ int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0069e60afa3b..8dbc35c481f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2629,6 +2629,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* Since LF is detached use LF number as -1.
*/
rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+ rvu_mac_reset(rvu, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b5a7ee63508c..e8e65fd7888d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -23,6 +23,7 @@
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
@@ -686,6 +687,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
return rvu->hw->cpt_chan_base + chan;
}
+static inline bool is_rvu_supports_nix1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX)
+ return true;
+
+ return false;
+}
+
/* Function Prototypes
* RVU
*/
@@ -884,6 +895,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 83b342fa8d75..095b2cc4a699 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -114,7 +114,7 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
pfvf->nix_blkaddr = BLKADDR_NIX0;
- if (p2x == CMR_P2X_SEL_NIX1)
+ if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
pfvf->nix_blkaddr = BLKADDR_NIX1;
}
@@ -763,7 +763,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
cgxd = rvu_cgx_pdata(cgx_id, rvu);
mac_ops = get_mac_ops(cgxd);
- mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
@@ -1250,3 +1250,21 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
return err;
}
+
+void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ struct cgx *cgxd;
+ u8 cgx, lmac;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
+ dev_err(rvu->dev, "Failed to reset MAC\n");
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 6b56eadd736e..6b98c3287b49 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -417,6 +417,7 @@ static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
err_kmalloc_array:
for (i--; i >= 0; i--)
kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
err_kcalloc:
kfree(mlxsw_m->ports);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 445ba7fe3c40..b32adf277a22 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -10794,8 +10794,8 @@ static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
int err;
router->lb_crif = mlxsw_sp_crif_alloc(NULL);
- if (IS_ERR(router->lb_crif))
- return PTR_ERR(router->lb_crif);
+ if (!router->lb_crif)
+ return -ENOMEM;
/* Create a generic loopback RIF associated with the main table
* (default VRF). Any table can be used, but the main table exists
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 5b0e8b0e0c89..a36f6369f132 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -144,6 +144,18 @@ static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
!(data & HW_CFG_LRST_), 100000, 10000000);
}
+static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
+ int offset, u32 bit_mask,
+ int target_value, int udelay_min,
+ int udelay_max, int count)
+{
+ u32 data;
+
+ return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
+ target_value == !!(data & bit_mask),
+ udelay_max, udelay_min * count);
+}
+
static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
int offset, u32 bit_mask,
int target_value, int usleep_min,
@@ -736,8 +748,8 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 dp_sel;
int i;
- if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
- 1, 40, 100, 100))
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
@@ -748,8 +760,9 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_ADDR, addr + i);
lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
- if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
- 1, 40, 100, 100))
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
+ DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
return -EIO;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1f5f00b30441..2fa833d041ba 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2925,7 +2925,6 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
mutex_init(&ocelot->tas_lock);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 2180ae94c744..cb32234a5bf1 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -439,8 +439,12 @@ static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
bool l2, bool l4)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
+ ocelot_port->trap_proto &= ~(OCELOT_PROTO_PTP_L2 |
+ OCELOT_PROTO_PTP_L4);
+
if (l2)
err = ocelot_l2_ptp_trap_add(ocelot, port);
else
@@ -464,6 +468,11 @@ static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
if (err)
return err;
+ if (l2)
+ ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L2;
+ if (l4)
+ ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L4;
+
return 0;
err_ipv6:
@@ -474,10 +483,38 @@ err_ipv4:
return err;
}
+static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
+{
+ if ((proto & OCELOT_PROTO_PTP_L2) && (proto & OCELOT_PROTO_PTP_L4))
+ return HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (proto & OCELOT_PROTO_PTP_L2)
+ return HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (proto & OCELOT_PROTO_PTP_L4)
+ return HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+
+ return HWTSTAMP_FILTER_NONE;
+}
+
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
- return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
- sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct hwtstamp_config cfg = {};
+
+ switch (ocelot_port->ptp_cmd) {
+ case IFH_REW_OP_TWO_STEP_PTP:
+ cfg.tx_type = HWTSTAMP_TX_ON;
+ break;
+ case IFH_REW_OP_ORIGIN_PTP:
+ cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ break;
+ default:
+ cfg.tx_type = HWTSTAMP_TX_OFF;
+ break;
+ }
+
+ cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
@@ -509,8 +546,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
return -ERANGE;
}
- mutex_lock(&ocelot->ptp_lock);
-
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -531,28 +566,14 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
l4 = true;
break;
default:
- mutex_unlock(&ocelot->ptp_lock);
return -ERANGE;
}
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
- if (err) {
- mutex_unlock(&ocelot->ptp_lock);
+ if (err)
return err;
- }
- if (l2 && l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- else if (l2)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- else
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
-
- /* Commit back the result & save it */
- memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
- mutex_unlock(&ocelot->ptp_lock);
+ cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -824,11 +845,6 @@ int ocelot_init_timestamp(struct ocelot *ocelot,
ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
- /* There is no device reconfiguration, PTP Rx stamping is always
- * enabled.
- */
- ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-
return 0;
}
EXPORT_SYMBOL(ocelot_init_timestamp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 49f2f081ebb5..6b1fb5708434 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -53,6 +53,8 @@
#include "crypto/crypto.h"
#include "crypto/fw.h"
+static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr);
+
/**
* nfp_net_get_fw_version() - Read and parse the FW version
* @fw_ver: Output fw_version structure to read to
@@ -1084,6 +1086,9 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 2: Tell NFP
*/
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
+ __dev_mc_unsync(netdev, nfp_net_mc_unsync);
+
nfp_net_clear_config_and_disable(nn);
nfp_port_configure(netdev, false);
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index b82dad50a5b1..3cd750820fdd 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -626,6 +626,9 @@ static struct devlink_port *ef100_set_devlink_port(struct efx_nic *efx, u32 idx)
u32 id;
int rc;
+ if (!efx->mae)
+ return NULL;
+
if (efx_mae_lookup_mport(efx, idx, &id)) {
/* This should not happen. */
if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 85dbe7f73e31..536bd6564f8b 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -493,7 +493,7 @@ static int __init ntb_netdev_init_module(void)
return 0;
}
-module_init(ntb_netdev_init_module);
+late_initcall(ntb_netdev_init_module);
static void __exit ntb_netdev_exit_module(void)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 0fe78826c8fa..32183f24e63f 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -24,6 +24,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/rcupdate.h>
+#include <linux/security.h>
#include <linux/spinlock.h>
#include <net/sock.h>
@@ -128,6 +129,23 @@ static void del_chan(struct pppox_sock *sock)
spin_unlock(&chan_lock);
}
+static struct rtable *pptp_route_output(struct pppox_sock *po,
+ struct flowi4 *fl4)
+{
+ struct sock *sk = &po->sk;
+ struct net *net;
+
+ net = sock_net(sk);
+ flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
+ RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
+ po->proto.pptp.dst_addr.sin_addr.s_addr,
+ po->proto.pptp.src_addr.sin_addr.s_addr,
+ 0, 0, sock_net_uid(net, sk));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+
+ return ip_route_output_flow(net, fl4, sk);
+}
+
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
struct sock *sk = (struct sock *) chan->private;
@@ -151,11 +169,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
- rt = ip_route_output_ports(net, &fl4, NULL,
- opt->dst_addr.sin_addr.s_addr,
- opt->src_addr.sin_addr.s_addr,
- 0, 0, IPPROTO_GRE,
- RT_TOS(0), sk->sk_bound_dev_if);
+ rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt))
goto tx_error;
@@ -438,12 +452,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
- rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
- opt->dst_addr.sin_addr.s_addr,
- opt->src_addr.sin_addr.s_addr,
- 0, 0,
- IPPROTO_GRE, RT_CONN_FLAGS(sk),
- sk->sk_bound_dev_if);
+ rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 80849d115e5d..c00a89b24df9 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -876,6 +876,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* U-blox LARA-R6 01B */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1313, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* U-blox LARA-L6 */
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1343, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 43c8c84e7ea8..6d1bd9f52d02 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -546,6 +546,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
u8 public_key[NOISE_PUBLIC_KEY_LEN];
struct wg_peer *peer, *temp;
+ bool send_staged_packets;
if (!crypto_memneq(wg->static_identity.static_private,
private_key, NOISE_PUBLIC_KEY_LEN))
@@ -564,14 +565,17 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
}
down_write(&wg->static_identity.lock);
- wg_noise_set_static_identity_private_key(&wg->static_identity,
- private_key);
- list_for_each_entry_safe(peer, temp, &wg->peer_list,
- peer_list) {
+ send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev);
+ wg_noise_set_static_identity_private_key(&wg->static_identity, private_key);
+ send_staged_packets = send_staged_packets && wg->static_identity.has_identity;
+
+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
wg_noise_precompute_static_static(peer);
wg_noise_expire_current_peer_keypairs(peer);
+ if (send_staged_packets)
+ wg_packet_send_staged_packets(peer);
}
- wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
up_write(&wg->static_identity.lock);
}
skip_set_private_key:
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 8084e7408c0a..26d235d15235 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
int ret;
memset(queue, 0, sizeof(*queue));
+ queue->last_cpu = -1;
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 125284b346a7..1ea4f874e367 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
return cpu;
}
-/* This function is racy, in the sense that next is unlocked, so it could return
- * the same CPU twice. A race-free version of this would be to instead store an
- * atomic sequence number, do an increment-and-return, and then iterate through
- * every possible CPU until we get to that index -- choose_cpu. However that's
- * a bit slower, and it doesn't seem like this potential race actually
- * introduces any performance loss, so we live with it.
+/* This function is racy, in the sense that it's called while last_cpu is
+ * unlocked, so it could return the same CPU twice. Adding locking or using
+ * atomic sequence numbers is slower though, and the consequences of racing are
+ * harmless, so live with it.
*/
-static inline int wg_cpumask_next_online(int *next)
+static inline int wg_cpumask_next_online(int *last_cpu)
{
- int cpu = *next;
-
- while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
- cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
- *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ int cpu = cpumask_next(*last_cpu, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+ *last_cpu = cpu;
return cpu;
}
@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
static inline int wg_queue_enqueue_per_device_and_peer(
struct crypt_queue *device_queue, struct prev_queue *peer_queue,
- struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+ struct sk_buff *skb, struct workqueue_struct *wq)
{
int cpu;
@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
- cpu = wg_cpumask_next_online(next_cpu);
+ cpu = wg_cpumask_next_online(&device_queue->last_cpu);
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
return -EPIPE;
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7135d51d2d87..0b3f0c843550 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
- wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
+ wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 5368f7c35b4b..95c853b59e1d 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
- wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+ wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err:
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
index 53d8a57a0dfa..968bdb4df0b3 100644
--- a/drivers/net/wireguard/timers.c
+++ b/drivers/net/wireguard/timers.c
@@ -234,10 +234,10 @@ void wg_timers_init(struct wg_peer *peer)
void wg_timers_stop(struct wg_peer *peer)
{
- del_timer_sync(&peer->timer_retransmit_handshake);
- del_timer_sync(&peer->timer_send_keepalive);
- del_timer_sync(&peer->timer_new_handshake);
- del_timer_sync(&peer->timer_zero_key_material);
- del_timer_sync(&peer->timer_persistent_keepalive);
+ timer_delete_sync(&peer->timer_retransmit_handshake);
+ timer_delete_sync(&peer->timer_send_keepalive);
+ timer_delete_sync(&peer->timer_new_handshake);
+ timer_delete_sync(&peer->timer_zero_key_material);
+ timer_delete_sync(&peer->timer_persistent_keepalive);
flush_work(&peer->clear_peer_work);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3d79b35eb577..c8d20cddf658 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -689,7 +689,7 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
prev_pending_idx = pending_idx;
txp = &queue->pending_tx_info[pending_idx].req;
- page = virt_to_page(idx_to_kaddr(queue, pending_idx));
+ page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 04550b1f984c..4940b6301d83 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -941,7 +941,7 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
- if (!ndev->debugfs_dir)
+ if (IS_ERR(ndev->debugfs_dir))
ndev->debugfs_info = NULL;
else
ndev->debugfs_info =
@@ -1194,7 +1194,6 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
return 0;
err_dma_mask:
- pci_clear_master(pdev);
pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
@@ -1209,7 +1208,6 @@ static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
pci_iounmap(pdev, ndev->self_mmio);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -1338,12 +1336,17 @@ static struct pci_driver amd_ntb_pci_driver = {
static int __init amd_ntb_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return pci_register_driver(&amd_ntb_pci_driver);
+ ret = pci_register_driver(&amd_ntb_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(debugfs_dir);
+
+ return ret;
}
module_init(amd_ntb_pci_driver_init);
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index 3ece49cb18ff..b640aa0bf45e 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -591,7 +591,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
- goto err_dma_mask;
+ goto err_pci_regions;
}
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
@@ -599,14 +599,14 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
if (!ndev->ctrl_reg) {
ret = -EIO;
- goto err_dma_mask;
+ goto err_pci_regions;
}
if (ndev->peer_spad_reg_bar) {
ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
if (!ndev->peer_spad_reg) {
ret = -EIO;
- goto err_dma_mask;
+ goto err_pci_regions;
}
} else {
spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
@@ -617,14 +617,11 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
if (!ndev->db_reg) {
ret = -EIO;
- goto err_dma_mask;
+ goto err_pci_regions;
}
return 0;
-err_dma_mask:
- pci_clear_master(pdev);
-
err_pci_regions:
pci_disable_device(pdev);
@@ -642,7 +639,6 @@ static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
pci_iounmap(pdev, ndev->peer_spad_reg);
pci_iounmap(pdev, ndev->db_reg);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 0ed6f809ff2e..48823b53ede3 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2651,20 +2651,18 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
}
/*
- * Enable the device advanced error reporting. It's not critical to
+ * The PCI core enables device error reporting. It's not critical to
* have AER disabled in the kernel.
+ *
+ * Cleanup nonfatal error status before getting to init.
*/
- ret = pci_enable_pcie_error_reporting(pdev);
- if (ret != 0)
- dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
- else /* Cleanup nonfatal error status before getting to init */
- pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_nonfatal_status(pdev);
/* First enable the PCI device */
ret = pcim_enable_device(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to enable PCIe device\n");
- goto err_disable_aer;
+ return ret;
}
/*
@@ -2692,8 +2690,6 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
err_clear_master:
pci_clear_master(pdev);
-err_disable_aer:
- (void)pci_disable_pcie_error_reporting(pdev);
return ret;
}
@@ -2714,9 +2710,6 @@ static void idt_deinit_pci(struct idt_ntb_dev *ndev)
/* Clear the bus master disabling the Request TLPs translation */
pci_clear_master(pdev);
- /* Disable the AER capability */
- (void)pci_disable_pcie_error_reporting(pdev);
-
dev_dbg(&pdev->dev, "NT-function PCIe interface cleared");
}
@@ -2891,6 +2884,7 @@ static struct pci_driver idt_pci_driver = {
static int __init idt_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
/* Create the top DebugFS directory if the FS is initialized */
@@ -2898,7 +2892,11 @@ static int __init idt_pci_driver_init(void)
dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* Register the NTB hardware driver to handle the PCI device */
- return pci_register_driver(&idt_pci_driver);
+ ret = pci_register_driver(&idt_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(dbgfs_topdir);
+
+ return ret;
}
module_init(idt_pci_driver_init);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 84772013812b..9ab836d0d4f1 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -1791,7 +1791,6 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
err_mmio:
err_dma_mask:
- pci_clear_master(pdev);
pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
@@ -1808,7 +1807,6 @@ static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
pci_iounmap(pdev, ndev->peer_mmio);
pci_iounmap(pdev, ndev->self_mmio);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2064,12 +2062,17 @@ static struct pci_driver intel_ntb_pci_driver = {
static int __init intel_ntb_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return pci_register_driver(&intel_ntb_pci_driver);
+ ret = pci_register_driver(&intel_ntb_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(debugfs_dir);
+
+ return ret;
}
module_init(intel_ntb_pci_driver_init);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index a9b97ebc71ac..2abd2235bbca 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -410,7 +410,7 @@ int ntb_transport_register_client_dev(char *device_name)
rc = device_register(dev);
if (rc) {
- kfree(client_dev);
+ put_device(dev);
goto err;
}
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 5ee0afa621a9..eeeb4b1c97d2 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -998,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc)
tc->peers[pidx].outmws =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
+ if (tc->peers[pidx].outmws == NULL)
+ return -ENOMEM;
for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
tc->peers[pidx].outmws[widx].pidx = pidx;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 954dbc105fc8..5852fe290523 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -25,7 +25,7 @@
int nvdimm_major;
static int nvdimm_bus_major;
-struct class *nd_class;
+static struct class *nd_class;
static DEFINE_IDA(nd_ida);
static int to_nd_device_type(const struct device *dev)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 957f7c3d17ba..1273873582be 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -349,8 +349,8 @@ static ssize_t available_slots_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_slots);
-ssize_t security_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t security_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ceea55f621cc..46e094e56159 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -260,7 +260,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long actual_nr;
if (mode != DAX_RECOVERY_WRITE)
- return -EIO;
+ return -EHWPOISON;
/*
* Set the recovery stride is set to kernel page size because
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 5e4f8848dce0..20f46c230885 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -12,7 +12,7 @@ static const char * const nvme_ops[] = {
[nvme_cmd_read] = "Read",
[nvme_cmd_write_uncor] = "Write Uncorrectable",
[nvme_cmd_compare] = "Compare",
- [nvme_cmd_write_zeroes] = "Write Zeros",
+ [nvme_cmd_write_zeroes] = "Write Zeroes",
[nvme_cmd_dsm] = "Dataset Management",
[nvme_cmd_verify] = "Verify",
[nvme_cmd_resv_register] = "Reservation Register",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 98bfb3d9c22a..47d7ba2827ff 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1134,8 +1134,11 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC) {
- dev_info(ctrl->device,
+ if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
+ &ctrl->flags)) {
+ dev_info(ctrl->device,
"controller capabilities changed, reset may be required to take effect.\n");
+ }
}
if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
nvme_queue_scan(ctrl);
@@ -3177,6 +3180,7 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
return ret;
}
+ clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
ctrl->identified = true;
return 0;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 2130ad65b58c..5c3250f36ce7 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -505,7 +505,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- void *cookie = READ_ONCE(ioucmd->cookie);
req->bio = pdu->bio;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -518,10 +517,12 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
- if (cookie != NULL && blk_rq_is_poll(req))
+ if (blk_rq_is_poll(req)) {
+ WRITE_ONCE(ioucmd->cookie, NULL);
nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
- else
+ } else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ }
return RQ_END_IO_FREE;
}
@@ -531,7 +532,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- void *cookie = READ_ONCE(ioucmd->cookie);
req->bio = pdu->bio;
pdu->req = req;
@@ -540,10 +540,12 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
- if (cookie != NULL && blk_rq_is_poll(req))
+ if (blk_rq_is_poll(req)) {
+ WRITE_ONCE(ioucmd->cookie, NULL);
nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
- else
+ } else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
+ }
return RQ_END_IO_NONE;
}
@@ -599,7 +601,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (issue_flags & IO_URING_F_IOPOLL)
rq_flags |= REQ_POLLED;
-retry:
req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -613,17 +614,11 @@ retry:
return ret;
}
- if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
- if (unlikely(!req->bio)) {
- /* we can't poll this, so alloc regular req instead */
- blk_mq_free_request(req);
- rq_flags &= ~REQ_POLLED;
- goto retry;
- } else {
- WRITE_ONCE(ioucmd->cookie, req->bio);
- req->bio->bi_opf |= REQ_POLLED;
- }
+ if (blk_rq_is_poll(req)) {
+ ioucmd->flags |= IORING_URING_CMD_POLLED;
+ WRITE_ONCE(ioucmd->cookie, req);
}
+
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->meta_len = d.metadata_len;
@@ -785,18 +780,16 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
- struct bio *bio;
+ struct request *req;
int ret = 0;
- struct nvme_ns *ns;
- struct request_queue *q;
+
+ if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
+ return 0;
rcu_read_lock();
- bio = READ_ONCE(ioucmd->cookie);
- ns = container_of(file_inode(ioucmd->file)->i_cdev,
- struct nvme_ns, cdev);
- q = ns->queue;
- if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
- ret = bio_poll(bio, iob, poll_flags);
+ req = READ_ONCE(ioucmd->cookie);
+ if (req && blk_rq_is_poll(req))
+ ret = blk_rq_poll(req, iob, poll_flags);
rcu_read_unlock();
return ret;
}
@@ -890,31 +883,6 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
-
-int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
- struct io_comp_batch *iob,
- unsigned int poll_flags)
-{
- struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
- struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
- int srcu_idx = srcu_read_lock(&head->srcu);
- struct nvme_ns *ns = nvme_find_path(head);
- struct bio *bio;
- int ret = 0;
- struct request_queue *q;
-
- if (ns) {
- rcu_read_lock();
- bio = READ_ONCE(ioucmd->cookie);
- q = ns->queue;
- if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
- && bio->bi_bdev)
- ret = bio_poll(bio, iob, poll_flags);
- rcu_read_unlock();
- }
- srcu_read_unlock(&head->srcu, srcu_idx);
- return ret;
-}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 98001eebd275..0a88d7bdc5e3 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -106,6 +106,14 @@ void nvme_failover_req(struct request *req)
bio->bi_opf &= ~REQ_POLLED;
bio->bi_cookie = BLK_QC_T_NONE;
}
+ /*
+ * The alternate request queue that we may end up submitting
+ * the bio to may be frozen temporarily, in this case REQ_NOWAIT
+ * will fail the I/O immediately with EAGAIN to the issuer.
+ * We are not in the issuer context which cannot block. Clear
+ * the flag to avoid spurious EAGAIN I/O failures.
+ */
+ bio->bi_opf &= ~REQ_NOWAIT;
}
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
@@ -470,7 +478,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd,
- .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a69e1efb3299..f35647c470af 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -250,6 +250,7 @@ enum nvme_ctrl_flags {
NVME_CTRL_STARTED_ONCE = 2,
NVME_CTRL_STOPPED = 3,
NVME_CTRL_SKIP_ID_CNS_CS = 4,
+ NVME_CTRL_DIRTY_CAPABILITY = 5,
};
struct nvme_ctrl {
@@ -856,8 +857,6 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob, unsigned int poll_flags);
-int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
- struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 48c60f7fda0b..72725729cb6c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2690,7 +2690,8 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.state != NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
dev->ctrl.state);
- return;
+ result = -ENODEV;
+ goto out;
}
/*
@@ -2777,7 +2778,9 @@ static void nvme_reset_work(struct work_struct *work)
result);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_dev_disable(dev, true);
+ nvme_sync_queues(&dev->ctrl);
nvme_mark_namespaces_dead(&dev->ctrl);
+ nvme_unquiesce_io_queues(&dev->ctrl);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3e7dd6f91832..9ce417cd32a7 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1014,7 +1014,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
msg.msg_flags |= MSG_MORE;
if (!sendpage_ok(page))
- msg.msg_flags &= ~MSG_SPLICE_PAGES,
+ msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page, len, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6cf723bc664e..8cfd60f3b564 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -79,8 +79,8 @@ struct nvmet_ns {
struct completion disable_done;
mempool_t *bvec_pool;
- int use_p2pmem;
struct pci_dev *p2p_dev;
+ int use_p2pmem;
int pi_type;
int metadata_size;
u8 csi;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index b291b27048c7..da9befa3d6c4 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -55,6 +55,7 @@ config NVMEM_BRCM_NVRAM
tristate "Broadcom's NVRAM support"
depends on ARCH_BCM_5301X || COMPILE_TEST
depends on HAS_IOMEM
+ select GENERIC_NET_UTILS
help
This driver provides support for Broadcom's NVRAM that can be accessed
using I/O mapping.
@@ -82,6 +83,15 @@ config NVMEM_IMX_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-imx-ocotp.
+config NVMEM_IMX_OCOTP_ELE
+ tristate "i.MX On-Chip OTP Controller support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ This is a driver for the On-Chip OTP Controller (OCOTP)
+ available on i.MX SoCs which has ELE.
+
config NVMEM_IMX_OCOTP_SCU
tristate "i.MX8 SCU On-Chip OTP Controller support"
depends on IMX_SCU
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index f82431ec8aef..cc23ce4ffb1f 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
nvmem-imx-iim-y := imx-iim.o
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP_ELE) += nvmem-imx-ocotp-ele.o
+nvmem-imx-ocotp-ele-y := imx-ocotp-ele.o
obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 39aa27942f28..4567c597c87f 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -4,6 +4,8 @@
*/
#include <linux/bcm47xx_nvram.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -42,6 +44,25 @@ static int brcm_nvram_read(void *context, unsigned int offset, void *val,
return 0;
}
+static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes)
+{
+ u8 mac[ETH_ALEN];
+
+ if (bytes != 3 * ETH_ALEN - 1)
+ return -EINVAL;
+
+ if (!mac_pton(buf, mac))
+ return -EINVAL;
+
+ if (index)
+ eth_addr_add(mac, index);
+
+ ether_addr_copy(buf, mac);
+
+ return 0;
+}
+
static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
size_t len)
{
@@ -75,6 +96,13 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
priv->cells[idx].offset = value - (char *)data;
priv->cells[idx].bytes = strlen(value);
priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ if (!strcmp(var, "et0macaddr") ||
+ !strcmp(var, "et1macaddr") ||
+ !strcmp(var, "et2macaddr")) {
+ priv->cells[idx].raw_len = strlen(value);
+ priv->cells[idx].bytes = ETH_ALEN;
+ priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
+ }
}
return 0;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 342cd380b420..3f8c7718412b 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -696,7 +696,7 @@ static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
return 0;
}
-static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
{
struct nvmem_layout *layout = nvmem->layout;
struct device *dev = &nvmem->dev;
@@ -704,7 +704,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
const __be32 *addr;
int len, ret;
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node(np, child) {
struct nvmem_cell_info info = {0};
addr = of_get_property(child, "reg", &len);
@@ -742,6 +742,28 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
return 0;
}
+static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
+{
+ return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
+}
+
+static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
+{
+ struct device_node *layout_np;
+ int err = 0;
+
+ layout_np = of_nvmem_layout_get_container(nvmem);
+ if (!layout_np)
+ return 0;
+
+ if (of_device_is_compatible(layout_np, "fixed-layout"))
+ err = nvmem_add_cells_from_dt(nvmem, layout_np);
+
+ of_node_put(layout_np);
+
+ return err;
+}
+
int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
{
layout->owner = owner;
@@ -972,7 +994,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- rval = nvmem_add_cells_from_of(nvmem);
+ rval = nvmem_add_cells_from_legacy_of(nvmem);
if (rval)
goto err_remove_cells;
@@ -982,6 +1004,10 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
+ rval = nvmem_add_cells_from_fixed_layout(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
rval = nvmem_add_cells_from_layout(nvmem);
if (rval)
goto err_remove_cells;
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
new file mode 100644
index 000000000000..f1cbbc9afeb8
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * i.MX9 OCOTP fusebox driver
+ *
+ * Copyright 2023 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum fuse_type {
+ FUSE_FSB = 1,
+ FUSE_ELE = 2,
+ FUSE_INVALID = -1
+};
+
+struct ocotp_map_entry {
+ u32 start; /* start word */
+ u32 num; /* num words */
+ enum fuse_type type;
+};
+
+struct ocotp_devtype_data {
+ u32 reg_off;
+ char *name;
+ u32 size;
+ u32 num_entry;
+ u32 flag;
+ nvmem_reg_read_t reg_read;
+ struct ocotp_map_entry entry[];
+};
+
+struct imx_ocotp_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct nvmem_config config;
+ struct mutex lock;
+ const struct ocotp_devtype_data *data;
+};
+
+static enum fuse_type imx_ocotp_fuse_type(void *context, u32 index)
+{
+ struct imx_ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ u32 start, end;
+ int i;
+
+ for (i = 0; i < data->num_entry; i++) {
+ start = data->entry[i].start;
+ end = data->entry[i].start + data->entry[i].num;
+
+ if (index >= start && index < end)
+ return data->entry[i].type;
+ }
+
+ return FUSE_INVALID;
+}
+
+static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, size_t bytes)
+{
+ struct imx_ocotp_priv *priv = context;
+ void __iomem *reg = priv->base + priv->data->reg_off;
+ u32 count, index, num_bytes;
+ enum fuse_type type;
+ u32 *buf;
+ void *p;
+ int i;
+
+ index = offset;
+ num_bytes = round_up(bytes, 4);
+ count = num_bytes >> 2;
+
+ if (count > ((priv->data->size >> 2) - index))
+ count = (priv->data->size >> 2) - index;
+
+ p = kzalloc(num_bytes, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&priv->lock);
+
+ buf = p;
+
+ for (i = index; i < (index + count); i++) {
+ type = imx_ocotp_fuse_type(context, i);
+ if (type == FUSE_INVALID || type == FUSE_ELE) {
+ *buf++ = 0;
+ continue;
+ }
+
+ *buf++ = readl_relaxed(reg + (i << 2));
+ }
+
+ memcpy(val, (u8 *)p, bytes);
+
+ mutex_unlock(&priv->lock);
+
+ kfree(p);
+
+ return 0;
+};
+
+static int imx_ele_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_ocotp_priv *priv;
+ struct nvmem_device *nvmem;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->config.dev = dev;
+ priv->config.name = "ELE-OCOTP";
+ priv->config.id = NVMEM_DEVID_AUTO;
+ priv->config.owner = THIS_MODULE;
+ priv->config.size = priv->data->size;
+ priv->config.reg_read = priv->data->reg_read;
+ priv->config.word_size = 4;
+ priv->config.stride = 1;
+ priv->config.priv = priv;
+ priv->config.read_only = true;
+ mutex_init(&priv->lock);
+
+ nvmem = devm_nvmem_register(dev, &priv->config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ return 0;
+}
+
+static const struct ocotp_devtype_data imx93_ocotp_data = {
+ .reg_off = 0x8000,
+ .reg_read = imx_ocotp_reg_read,
+ .size = 2048,
+ .num_entry = 6,
+ .entry = {
+ { 0, 52, FUSE_FSB },
+ { 63, 1, FUSE_ELE},
+ { 128, 16, FUSE_ELE },
+ { 182, 1, FUSE_ELE },
+ { 188, 1, FUSE_ELE },
+ { 312, 200, FUSE_FSB }
+ },
+};
+
+static const struct of_device_id imx_ele_ocotp_dt_ids[] = {
+ { .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids);
+
+static struct platform_driver imx_ele_ocotp_driver = {
+ .driver = {
+ .name = "imx_ele_ocotp",
+ .of_match_table = imx_ele_ocotp_dt_ids,
+ },
+ .probe = imx_ele_ocotp_probe,
+};
+module_platform_driver(imx_ele_ocotp_driver);
+
+MODULE_DESCRIPTION("i.MX OCOTP/ELE driver");
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index ac0edb6398f1..ab556c011f3e 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -97,7 +97,6 @@ struct ocotp_params {
unsigned int bank_address_words;
void (*set_timing)(struct ocotp_priv *priv);
struct ocotp_ctrl_reg ctrl;
- bool reverse_mac_address;
};
static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
@@ -545,7 +544,6 @@ static const struct ocotp_params imx8mq_params = {
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
- .reverse_mac_address = true,
};
static const struct ocotp_params imx8mm_params = {
@@ -553,7 +551,6 @@ static const struct ocotp_params imx8mm_params = {
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
- .reverse_mac_address = true,
};
static const struct ocotp_params imx8mn_params = {
@@ -561,7 +558,6 @@ static const struct ocotp_params imx8mn_params = {
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
- .reverse_mac_address = true,
};
static const struct ocotp_params imx8mp_params = {
@@ -569,7 +565,6 @@ static const struct ocotp_params imx8mp_params = {
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_8MP,
- .reverse_mac_address = true,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -596,7 +591,7 @@ static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem,
cell->read_post_process = imx_ocotp_cell_pp;
}
-struct nvmem_layout imx_ocotp_layout = {
+static struct nvmem_layout imx_ocotp_layout = {
.fixup_cell_info = imx_ocotp_fixup_cell_info,
};
@@ -624,8 +619,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
- if (priv->params->reverse_mac_address)
- imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
+ imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
priv->config = &imx_ocotp_nvmem_config;
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
index 80cb187f1481..752d0bf4445e 100644
--- a/drivers/nvmem/rmem.c
+++ b/drivers/nvmem/rmem.c
@@ -71,6 +71,7 @@ static int rmem_probe(struct platform_device *pdev)
config.dev = dev;
config.priv = priv;
config.name = "rmem";
+ config.id = NVMEM_DEVID_AUTO;
config.size = mem->size;
config.reg_read = rmem_read;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
index 9f53bcce2f87..cb9aa5428350 100644
--- a/drivers/nvmem/rockchip-otp.c
+++ b/drivers/nvmem/rockchip-otp.c
@@ -54,21 +54,32 @@
#define OTPC_TIMEOUT 10000
+/* RK3588 Register */
+#define RK3588_OTPC_AUTO_CTRL 0x04
+#define RK3588_OTPC_AUTO_EN 0x08
+#define RK3588_OTPC_INT_ST 0x84
+#define RK3588_OTPC_DOUT0 0x20
+#define RK3588_NO_SECURE_OFFSET 0x300
+#define RK3588_NBYTES 4
+#define RK3588_BURST_NUM 1
+#define RK3588_BURST_SHIFT 8
+#define RK3588_ADDR_SHIFT 16
+#define RK3588_AUTO_EN BIT(0)
+#define RK3588_RD_DONE BIT(1)
+
+struct rockchip_data {
+ int size;
+ const char * const *clks;
+ int num_clks;
+ nvmem_reg_read_t reg_read;
+};
+
struct rockchip_otp {
struct device *dev;
void __iomem *base;
- struct clk_bulk_data *clks;
- int num_clks;
+ struct clk_bulk_data *clks;
struct reset_control *rst;
-};
-
-/* list of required clocks */
-static const char * const rockchip_otp_clocks[] = {
- "otp", "apb_pclk", "phy",
-};
-
-struct rockchip_data {
- int size;
+ const struct rockchip_data *data;
};
static int rockchip_otp_reset(struct rockchip_otp *otp)
@@ -92,18 +103,19 @@ static int rockchip_otp_reset(struct rockchip_otp *otp)
return 0;
}
-static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
+static int rockchip_otp_wait_status(struct rockchip_otp *otp,
+ unsigned int reg, u32 flag)
{
u32 status = 0;
int ret;
- ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status,
+ ret = readl_poll_timeout_atomic(otp->base + reg, status,
(status & flag), 1, OTPC_TIMEOUT);
if (ret)
return ret;
/* clean int status */
- writel(flag, otp->base + OTPC_INT_STATUS);
+ writel(flag, otp->base + reg);
return 0;
}
@@ -125,36 +137,30 @@ static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
- ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
+ ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_SBPI_DONE);
if (ret < 0)
dev_err(otp->dev, "timeout during ecc_enable\n");
return ret;
}
-static int rockchip_otp_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+static int px30_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct rockchip_otp *otp = context;
u8 *buf = val;
- int ret = 0;
-
- ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
- if (ret < 0) {
- dev_err(otp->dev, "failed to prepare/enable clks\n");
- return ret;
- }
+ int ret;
ret = rockchip_otp_reset(otp);
if (ret) {
dev_err(otp->dev, "failed to reset otp phy\n");
- goto disable_clks;
+ return ret;
}
ret = rockchip_otp_ecc_enable(otp, false);
if (ret < 0) {
dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
- goto disable_clks;
+ return ret;
}
writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
@@ -164,7 +170,7 @@ static int rockchip_otp_read(void *context, unsigned int offset,
otp->base + OTPC_USER_ADDR);
writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
otp->base + OTPC_USER_ENABLE);
- ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
+ ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_USER_DONE);
if (ret < 0) {
dev_err(otp->dev, "timeout during read setup\n");
goto read_end;
@@ -174,8 +180,74 @@ static int rockchip_otp_read(void *context, unsigned int offset,
read_end:
writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
-disable_clks:
- clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
+
+ return ret;
+}
+
+static int rk3588_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ unsigned int addr_start, addr_end, addr_len;
+ int ret, i = 0;
+ u32 data;
+ u8 *buf;
+
+ addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES;
+ addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
+ addr_len = addr_end - addr_start;
+ addr_start += RK3588_NO_SECURE_OFFSET;
+
+ buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (addr_len--) {
+ writel((addr_start << RK3588_ADDR_SHIFT) |
+ (RK3588_BURST_NUM << RK3588_BURST_SHIFT),
+ otp->base + RK3588_OTPC_AUTO_CTRL);
+ writel(RK3588_AUTO_EN, otp->base + RK3588_OTPC_AUTO_EN);
+
+ ret = rockchip_otp_wait_status(otp, RK3588_OTPC_INT_ST,
+ RK3588_RD_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+
+ data = readl(otp->base + RK3588_OTPC_DOUT0);
+ memcpy(&buf[i], &data, RK3588_NBYTES);
+
+ i += RK3588_NBYTES;
+ addr_start++;
+ }
+
+ memcpy(val, buf + offset % RK3588_NBYTES, bytes);
+
+read_end:
+ kfree(buf);
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ int ret;
+
+ if (!otp->data || !otp->data->reg_read)
+ return -EINVAL;
+
+ ret = clk_bulk_prepare_enable(otp->data->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = otp->data->reg_read(context, offset, val, bytes);
+
+ clk_bulk_disable_unprepare(otp->data->num_clks, otp->clks);
return ret;
}
@@ -189,18 +261,40 @@ static struct nvmem_config otp_config = {
.reg_read = rockchip_otp_read,
};
+static const char * const px30_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
static const struct rockchip_data px30_data = {
.size = 0x40,
+ .clks = px30_otp_clocks,
+ .num_clks = ARRAY_SIZE(px30_otp_clocks),
+ .reg_read = px30_otp_read,
+};
+
+static const char * const rk3588_otp_clocks[] = {
+ "otp", "apb_pclk", "phy", "arb",
+};
+
+static const struct rockchip_data rk3588_data = {
+ .size = 0x400,
+ .clks = rk3588_otp_clocks,
+ .num_clks = ARRAY_SIZE(rk3588_otp_clocks),
+ .reg_read = rk3588_otp_read,
};
static const struct of_device_id rockchip_otp_match[] = {
{
.compatible = "rockchip,px30-otp",
- .data = (void *)&px30_data,
+ .data = &px30_data,
},
{
.compatible = "rockchip,rk3308-otp",
- .data = (void *)&px30_data,
+ .data = &px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3588-otp",
+ .data = &rk3588_data,
},
{ /* sentinel */ },
};
@@ -215,44 +309,47 @@ static int rockchip_otp_probe(struct platform_device *pdev)
int ret, i;
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "failed to get match data\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "failed to get match data\n");
otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
GFP_KERNEL);
if (!otp)
return -ENOMEM;
+ otp->data = data;
otp->dev = dev;
otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
- return PTR_ERR(otp->base);
+ return dev_err_probe(dev, PTR_ERR(otp->base),
+ "failed to ioremap resource\n");
- otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
- otp->clks = devm_kcalloc(dev, otp->num_clks,
- sizeof(*otp->clks), GFP_KERNEL);
+ otp->clks = devm_kcalloc(dev, data->num_clks, sizeof(*otp->clks),
+ GFP_KERNEL);
if (!otp->clks)
return -ENOMEM;
- for (i = 0; i < otp->num_clks; ++i)
- otp->clks[i].id = rockchip_otp_clocks[i];
+ for (i = 0; i < data->num_clks; ++i)
+ otp->clks[i].id = data->clks[i];
- ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
+ ret = devm_clk_bulk_get(dev, data->num_clks, otp->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
- otp->rst = devm_reset_control_get(dev, "phy");
+ otp->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(otp->rst))
- return PTR_ERR(otp->rst);
+ return dev_err_probe(dev, PTR_ERR(otp->rst),
+ "failed to get resets\n");
otp_config.size = data->size;
otp_config.priv = otp;
otp_config.dev = dev;
- nvmem = devm_nvmem_register(dev, &otp_config);
- return PTR_ERR_OR_ZERO(nvmem);
+ nvmem = devm_nvmem_register(dev, &otp_config);
+ if (IS_ERR(nvmem))
+ return dev_err_probe(dev, PTR_ERR(nvmem),
+ "failed to register nvmem device\n");
+ return 0;
}
static struct platform_driver rockchip_otp_driver = {
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index 52b928a7a6d5..f85350b17d67 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -192,9 +192,11 @@ static int sp_ocotp_probe(struct platform_device *pdev)
sp_ocotp_nvmem_config.dev = dev;
nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config);
- if (IS_ERR(nvmem))
- return dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
+ if (IS_ERR(nvmem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
"register nvmem device fail\n");
+ goto err;
+ }
platform_set_drvdata(pdev, nvmem);
@@ -203,6 +205,9 @@ static int sp_ocotp_probe(struct platform_device *pdev)
(int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
return 0;
+err:
+ clk_unprepare(otp->clk);
+ return ret;
}
static const struct of_device_id sp_ocotp_dt_ids[] = {
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index e28d7b133e11..f49bb9a26d05 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -76,6 +76,6 @@ static struct platform_driver zynqmp_nvmem_driver = {
module_platform_driver(zynqmp_nvmem_driver);
-MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>");
+MODULE_AUTHOR("Michal Simek <michal.simek@amd.com>, Nava kishore Manne <nava.kishore.manne@amd.com>");
MODULE_DESCRIPTION("ZynqMP NVMEM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 954c94865cf5..3f46e499d615 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -29,9 +29,6 @@
*/
LIST_HEAD(opp_tables);
-/* OPP tables with uninitialized required OPPs */
-LIST_HEAD(lazy_opp_tables);
-
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
@@ -230,17 +227,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
unsigned int index)
{
+ struct opp_table *opp_table = opp->opp_table;
+
if (IS_ERR_OR_NULL(opp) || !opp->available ||
- index >= opp->opp_table->required_opp_count) {
+ index >= opp_table->required_opp_count) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
/* required-opps not fully initialized yet */
- if (lazy_linking_pending(opp->opp_table))
+ if (lazy_linking_pending(opp_table))
return 0;
- return opp->required_opps[index]->pstate;
+ /* The required OPP table must belong to a genpd */
+ if (unlikely(!opp_table->required_opp_tables[index]->is_genpd)) {
+ pr_err("%s: Performance state is only valid for genpds.\n", __func__);
+ return 0;
+ }
+
+ return opp->required_opps[index]->level;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
@@ -938,7 +943,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
static int _set_performance_state(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
- unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
+ unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
int ret;
if (!pd_dev)
@@ -1091,7 +1096,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Return early if nothing to do */
if (!forced && old_opp == opp && opp_table->enabled) {
- dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
+ dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
@@ -1358,7 +1363,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
remove_opp_dev:
+ _of_clear_opp_table(opp_table);
_remove_opp_dev(opp_dev, opp_table);
+ mutex_destroy(&opp_table->genpd_virt_dev_lock);
+ mutex_destroy(&opp_table->lock);
err:
kfree(opp_table);
return ERR_PTR(ret);
@@ -1522,16 +1530,8 @@ static void _opp_table_kref_release(struct kref *kref)
WARN_ON(!list_empty(&opp_table->opp_list));
- list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
- /*
- * The OPP table is getting removed, drop the performance state
- * constraints.
- */
- if (opp_table->genpd_performance_state)
- dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
-
+ list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
_remove_opp_dev(opp_dev, opp_table);
- }
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
@@ -2704,6 +2704,12 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
if (!src_table || !src_table->required_opp_count)
return pstate;
+ /* Both OPP tables must belong to genpds */
+ if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
+ pr_err("%s: Performance state is only valid for genpds.\n", __func__);
+ return -EINVAL;
+ }
+
/* required-opps not fully initialized yet */
if (lazy_linking_pending(src_table))
return -EBUSY;
@@ -2722,8 +2728,8 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
mutex_lock(&src_table->lock);
list_for_each_entry(opp, &src_table->opp_list, node) {
- if (opp->pstate == pstate) {
- dest_pstate = opp->required_opps[i]->pstate;
+ if (opp->level == pstate) {
+ dest_pstate = opp->required_opps[i]->level;
goto unlock;
}
}
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 2c7fb683441e..17543c0aa5b6 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -152,7 +152,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
- debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 8246e9b7afe7..ada4963c7cfa 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -21,6 +21,9 @@
#include "opp.h"
+/* OPP tables with uninitialized required OPPs, protected by opp_table_lock */
+static LIST_HEAD(lazy_opp_tables);
+
/*
* Returns opp descriptor node for a device node, caller must
* do of_node_put().
@@ -145,7 +148,10 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
opp_table->required_opp_count = 0;
opp_table->required_opp_tables = NULL;
+
+ mutex_lock(&opp_table_lock);
list_del(&opp_table->lazy);
+ mutex_unlock(&opp_table_lock);
}
/*
@@ -194,8 +200,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
}
/* Let's do the linking later on */
- if (lazy)
+ if (lazy) {
+ /*
+ * The OPP table is not held while allocating the table, take it
+ * now to avoid corruption to the lazy_opp_tables list.
+ */
+ mutex_lock(&opp_table_lock);
list_add(&opp_table->lazy, &lazy_opp_tables);
+ mutex_unlock(&opp_table_lock);
+ }
else
_update_set_required_opps(opp_table);
@@ -500,11 +513,7 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev,
for (i = 0; i < num_paths; i++) {
paths[i] = of_icc_get_by_index(dev, i);
if (IS_ERR(paths[i])) {
- ret = PTR_ERR(paths[i]);
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "%s: Unable to get path%d: %d\n",
- __func__, i, ret);
- }
+ ret = dev_err_probe(dev, PTR_ERR(paths[i]), "%s: Unable to get path%d\n", __func__, i);
goto err;
}
}
@@ -932,9 +941,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (ret)
goto free_required_opps;
- if (opp_table->is_genpd)
- new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
-
ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
@@ -1021,14 +1027,6 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
goto remove_static_opp;
}
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- /* Any non-zero performance state would enable the feature */
- if (opp->pstate) {
- opp_table->genpd_performance_state = true;
- break;
- }
- }
-
lazy_link_required_opp_table(opp_table);
return 0;
@@ -1387,9 +1385,15 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
goto put_required_np;
}
+ /* The OPP tables must belong to a genpd */
+ if (unlikely(!opp_table->is_genpd)) {
+ pr_err("%s: Performance state is only valid for genpds.\n", __func__);
+ goto put_required_np;
+ }
+
opp = _find_opp_of_np(opp_table, required_np);
if (opp) {
- pstate = opp->pstate;
+ pstate = opp->level;
dev_pm_opp_put(opp);
}
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 2a057c42ddf4..8a5ea38f3a3d 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -26,7 +26,7 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
-extern struct list_head opp_tables, lazy_opp_tables;
+extern struct list_head opp_tables;
/* OPP Config flags */
#define OPP_CONFIG_CLK BIT(0)
@@ -78,7 +78,6 @@ struct opp_config_data {
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
- * @pstate: Device's power domain's performance state.
* @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
@@ -101,7 +100,6 @@ struct dev_pm_opp {
bool turbo;
bool suspend;
bool removed;
- unsigned int pstate;
unsigned long *rates;
unsigned int level;
@@ -182,7 +180,6 @@ enum opp_table_access {
* @paths: Interconnect path handles
* @path_count: Number of interconnect paths
* @enabled: Set to true if the device's resources are enabled/configured.
- * @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_required_opps: Helper responsible to set required OPPs.
* @dentry: debugfs dentry pointer of the real device directory (not links).
@@ -233,7 +230,6 @@ struct opp_table {
struct icc_path **paths;
unsigned int path_count;
bool enabled;
- bool genpd_performance_state;
bool is_genpd;
int (*set_required_opps)(struct device *dev,
struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index a66386043aa6..9bf652bd002c 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -97,7 +97,6 @@
#define DBG_RUN_SG(x...)
#endif
-#define CCIO_INLINE inline
#define WRITE_U32(value, addr) __raw_writel(value, addr)
#define READ_U32(addr) __raw_readl(addr)
@@ -330,7 +329,8 @@ static int ioc_count;
/**
* ccio_alloc_range - Allocate pages in the ioc's resource map.
* @ioc: The I/O Controller.
- * @pages_needed: The requested number of pages to be mapped into the
+ * @dev: The PCI device.
+ * @size: The requested number of bytes to be mapped into the
* I/O Pdir...
*
* This function searches the resource map of the ioc to locate a range
@@ -552,7 +552,7 @@ static u32 hint_lookup[] = {
* (Load Coherence Index) instruction. The 8 bits used for the virtual
* index are bits 12:19 of the value returned by LCI.
*/
-static void CCIO_INLINE
+static void
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
@@ -623,7 +623,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
*
* FIXME: Can we change the byte_cnt to pages_mapped?
*/
-static CCIO_INLINE void
+static void
ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
{
u32 chain_size = 1 << ioc->chainid_shift;
@@ -656,7 +656,7 @@ ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
*
* FIXME: Can we change byte_cnt to pages_mapped?
*/
-static CCIO_INLINE void
+static void
ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32)CCIO_IOVP(iova);
@@ -795,9 +795,10 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
/**
* ccio_unmap_page - Unmap an address range from the IOMMU.
* @dev: The PCI device.
- * @addr: The start address of the DMA region.
+ * @iova: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
+ * @attrs: attributes
*/
static void
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
@@ -838,6 +839,8 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
* @dev: The PCI device.
* @size: The length of the DMA region.
* @dma_handle: The DMA address handed back to the device (not the cpu).
+ * @flag: allocation flags
+ * @attrs: attributes
*
* This function implements the pci_alloc_consistent function.
*/
@@ -872,6 +875,7 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
* @size: The length of the DMA region.
* @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
* @dma_handle: The device address returned from the ccio_alloc_consistent.
+ * @attrs: attributes
*
* This function implements the pci_free_consistent function.
*/
@@ -901,6 +905,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr,
* @sglist: The scatter/gather list to be mapped in the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
+ * @attrs: attributes
*
* This function implements the pci_map_sg function.
*/
@@ -980,6 +985,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
* @sglist: The scatter/gather list to be unmapped from the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
+ * @attrs: attributes
*
* This function implements the pci_unmap_sg function.
*/
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index e33036281327..f89f9fb4c84b 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -924,8 +924,6 @@ static const char cujo_vers[][4] = {
"2.0"
};
-void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
-
/*
** Determine if dino should claim this chip (return 0) or not (return 1).
** If so, initialize the chip appropriately (card-mode vs bridge mode).
@@ -1086,9 +1084,8 @@ static struct parisc_driver dino_driver __refdata = {
* This is the only routine which is NOT static.
* Must be called exactly once before pci_init().
*/
-int __init dino_init(void)
+void __init dino_init(void)
{
register_parisc_driver(&dino_driver);
- return 0;
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index f96e5eaee87e..45e487388c6e 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -400,7 +400,7 @@ static struct parisc_driver eisa_driver __refdata = {
.probe = eisa_probe,
};
-void __init eisa_init(void)
+void __init parisc_eisa_init(void)
{
register_parisc_driver(&eisa_driver);
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index afc6e66ddc31..702bfd64e6e1 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -404,7 +404,7 @@ static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int
static void
lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
{
- int error = 0;
+ int error __maybe_unused = 0;
u32 arb_mask = 0;
u32 error_config = 0;
u32 status_control = 0;
@@ -1018,7 +1018,7 @@ static void
lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
unsigned long bytecnt;
- long io_count;
+ long io_count __maybe_unused;
long status; /* PDC return status */
long pa_count;
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */
@@ -1164,10 +1164,6 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
#endif /* CONFIG_64BIT */
-extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
-extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
-
-
static void
lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 2a18f7ba2398..633266447e2f 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -509,6 +509,8 @@ static struct pdcspath_entry *pdcspath_entries[] = {
/**
* pdcs_size_read - Stable Storage size output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_size_read(struct kobject *kobj,
@@ -528,6 +530,8 @@ static ssize_t pdcs_size_read(struct kobject *kobj,
/**
* pdcs_auto_read - Stable Storage autoboot/search flag output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
* @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
*/
@@ -554,6 +558,8 @@ static ssize_t pdcs_auto_read(struct kobject *kobj,
/**
* pdcs_autoboot_read - Stable Storage autoboot flag output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_autoboot_read(struct kobject *kobj,
@@ -564,6 +570,8 @@ static ssize_t pdcs_autoboot_read(struct kobject *kobj,
/**
* pdcs_autosearch_read - Stable Storage autoboot flag output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_autosearch_read(struct kobject *kobj,
@@ -574,6 +582,8 @@ static ssize_t pdcs_autosearch_read(struct kobject *kobj,
/**
* pdcs_timer_read - Stable Storage timer count output (in seconds).
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*
* The value of the timer field correponds to a number of seconds in powers of 2.
@@ -601,6 +611,8 @@ static ssize_t pdcs_timer_read(struct kobject *kobj,
/**
* pdcs_osid_read - Stable Storage OS ID register output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_osid_read(struct kobject *kobj,
@@ -619,6 +631,8 @@ static ssize_t pdcs_osid_read(struct kobject *kobj,
/**
* pdcs_osdep1_read - Stable Storage OS-Dependent data area 1 output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*
* This can hold 16 bytes of OS-Dependent data.
@@ -645,6 +659,8 @@ static ssize_t pdcs_osdep1_read(struct kobject *kobj,
/**
* pdcs_diagnostic_read - Stable Storage Diagnostic register output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*
* I have NFC how to interpret the content of that register ;-).
@@ -669,6 +685,8 @@ static ssize_t pdcs_diagnostic_read(struct kobject *kobj,
/**
* pdcs_fastsize_read - Stable Storage FastSize register output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*
* This register holds the amount of system RAM to be tested during boot sequence.
@@ -697,6 +715,8 @@ static ssize_t pdcs_fastsize_read(struct kobject *kobj,
/**
* pdcs_osdep2_read - Stable Storage OS-Dependent data area 2 output.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The output buffer to write to.
*
* This can hold pdcs_size - 224 bytes of OS-Dependent data, when available.
@@ -729,6 +749,8 @@ static ssize_t pdcs_osdep2_read(struct kobject *kobj,
/**
* pdcs_auto_write - This function handles autoboot/search flag modifying.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
* @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
@@ -801,6 +823,8 @@ parse_error:
/**
* pdcs_autoboot_write - This function handles autoboot flag modifying.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
@@ -817,6 +841,8 @@ static ssize_t pdcs_autoboot_write(struct kobject *kobj,
/**
* pdcs_autosearch_write - This function handles autosearch flag modifying.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
@@ -833,6 +859,8 @@ static ssize_t pdcs_autosearch_write(struct kobject *kobj,
/**
* pdcs_osdep1_write - Stable Storage OS-Dependent data area 1 input.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
@@ -870,6 +898,8 @@ static ssize_t pdcs_osdep1_write(struct kobject *kobj,
/**
* pdcs_osdep2_write - Stable Storage OS-Dependent data area 2 input.
+ * @kobj: The kobject used to share data with userspace.
+ * @attr: The kobject attributes.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
@@ -1023,7 +1053,7 @@ pdcs_unregister_pathentries(void)
static int __init
pdc_stable_init(void)
{
- int rc = 0, error = 0;
+ int rc = 0, error;
u32 result;
/* find the size of the stable storage */
@@ -1052,6 +1082,10 @@ pdc_stable_init(void)
/* Don't forget the root entries */
error = sysfs_create_group(stable_kobj, &pdcs_attr_group);
+ if (error) {
+ rc = -ENOMEM;
+ goto fail_ksetreg;
+ }
/* register the paths kset as a child of the stable kset */
paths_kset = kset_create_and_add("paths", NULL, stable_kobj);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index ecd870087a3d..bf3405f4289e 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -95,8 +95,6 @@
#define DBG_RES(x...)
#endif
-#define SBA_INLINE __inline__
-
#define DEFAULT_DMA_HINT_REG 0
struct sba_device *sba_list;
@@ -332,13 +330,14 @@ static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
+ * @dev: device to query the bitmap for
* @bits_wanted: number of entries we need.
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
-static SBA_INLINE unsigned long
+static unsigned long
sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long bits_wanted)
{
@@ -431,6 +430,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
/**
* sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
+ * @dev: device for which pages should be alloced
* @size: number of bytes to create a mapping for
*
* Given a size, find consecutive unmarked and then mark those bits in the
@@ -490,7 +490,7 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
*
* clear bits in the ioc's resource map
*/
-static SBA_INLINE void
+static void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
unsigned long iovp = SBA_IOVP(ioc, iova);
@@ -568,7 +568,7 @@ typedef unsigned long space_t;
* IOMMU uses little endian for the pdir.
*/
-static void SBA_INLINE
+static void
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
@@ -609,7 +609,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
* must be a power of 2. The "Cool perf optimization" in the
* allocation routine helps keep that true.
*/
-static SBA_INLINE void
+static void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
@@ -793,6 +793,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
+ * @attrs: attributes
*
* See Documentation/core-api/dma-api-howto.rst
*/
@@ -872,6 +873,8 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
+ * @gfp: allocation flags
+ * @attrs: attributes
*
* See Documentation/core-api/dma-api-howto.rst
*/
@@ -902,7 +905,8 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
- * @dma_handler: IO virtual address of "consistent" buffer.
+ * @dma_handle: IO virtual address of "consistent" buffer.
+ * @attrs: attributes
*
* See Documentation/core-api/dma-api-howto.rst
*/
@@ -938,6 +942,7 @@ int dump_run_sg = 0;
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
+ * @attrs: attributes
*
* See Documentation/core-api/dma-api-howto.rst
*/
@@ -946,7 +951,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
- int coalesced, filled = 0;
+ int filled = 0;
unsigned long flags;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@@ -985,7 +990,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
+ iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
/*
** Program the I/O Pdir
@@ -1022,6 +1027,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
+ * @attrs: attributes
*
* See Documentation/core-api/dma-api-howto.rst
*/
@@ -1229,8 +1235,6 @@ struct ibase_data_struct {
static int setup_ibase_imask_callback(struct device *dev, void *data)
{
- /* lba_set_iregs() is in drivers/parisc/lba_pci.c */
- extern void lba_set_iregs(struct parisc_device *, u32, u32);
struct parisc_device *lba = to_parisc_device(dev);
struct ibase_data_struct *ibd = data;
int rope_num = (lba->hpa.start >> 13) & 0xf;
@@ -1992,7 +1996,7 @@ void __init sba_init(void)
/**
* sba_get_iommu - Assign the iommu pointer for the pci bus controller.
- * @dev: The parisc device.
+ * @pci_hba: The parisc device.
*
* Returns the appropriate IOMMU data for the given parisc PCI controller.
* This is cached and used later for PCI DMA Mapping.
@@ -2012,7 +2016,7 @@ void * sba_get_iommu(struct parisc_device *pci_hba)
/**
* sba_directed_lmmio - return first directed LMMIO range routed to rope
- * @pa_dev: The parisc device.
+ * @pci_hba: The parisc device.
* @r: resource PCI host controller wants start/end fields assigned.
*
* For the given parisc PCI controller, determine if any direct ranges
@@ -2054,7 +2058,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
/**
* sba_distributed_lmmio - return portion of distributed LMMIO range
- * @pa_dev: The parisc device.
+ * @pci_hba: The parisc device.
* @r: resource PCI host controller wants start/end fields assigned.
*
* For the given parisc PCI controller, return portion of distributed LMMIO
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 5561362224e2..631c193fe42c 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -42,7 +42,8 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on ARCH_MIGHT_HAVE_PC_PARPORT || (PCI && !S390)
+ depends on ARCH_MIGHT_HAVE_PC_PARPORT || PCI
+ depends on HAS_IOPORT
help
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index df092229e97d..c87848cd8686 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -834,7 +834,7 @@ static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
return platform_get_irq_byname_optional(pdev, name);
}
-static struct dw_edma_core_ops dw_pcie_edma_ops = {
+static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 0f5c8f8be847..c8b423c3c26e 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -1285,6 +1285,7 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_register_dev:
+ put_device(&ndev->ntb.dev);
return -EINVAL;
}
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 44c16508ef14..e72419d7e72e 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -5,7 +5,6 @@
menuconfig PCCARD
tristate "PCCard (PCMCIA/CardBus) support"
- depends on !UML
help
Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
computer. These are credit-card size devices such as network cards,
@@ -113,7 +112,7 @@ config YENTA_TOSHIBA
config PD6729
tristate "Cirrus PD6729 compatible bridge support"
- depends on PCMCIA && PCI
+ depends on PCMCIA && PCI && HAS_IOPORT
select PCCARD_NONSTATIC
help
This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
@@ -121,7 +120,7 @@ config PD6729
config I82092
tristate "i82092 compatible bridge support"
- depends on PCMCIA && PCI
+ depends on PCMCIA && PCI && HAS_IOPORT
select PCCARD_NONSTATIC
help
This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 471e0c5815f3..bf9d070a4496 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
q = p->next;
kfree(p);
}
+
+ kfree(data);
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 4c07d71cab0b..f4572a5cca72 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -221,4 +221,17 @@ source "drivers/perf/arm_cspmu/Kconfig"
source "drivers/perf/amlogic/Kconfig"
+config CXL_PMU
+ tristate "CXL Performance Monitoring Unit"
+ depends on CXL_BUS
+ help
+ Support performance monitoring as defined in CXL rev 3.0
+ section 13.2: Performance Monitoring. CXL components may have
+ one or more CXL Performance Monitoring Units (CPMUs).
+
+ Say 'y/m' to enable a driver that will attach to performance
+ monitoring units and provide standard perf based interfaces.
+
+ If unsure say 'm'.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 5cfe8954c250..16b3ec4db916 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/
obj-$(CONFIG_MESON_DDR_PMU) += amlogic/
+obj-$(CONFIG_CXL_PMU) += cxl_pmu.o
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
new file mode 100644
index 000000000000..0a8f597e695b
--- /dev/null
+++ b/drivers/perf/cxl_pmu.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright(c) 2023 Huawei
+ *
+ * The CXL 3.0 specification includes a standard Performance Monitoring Unit,
+ * called the CXL PMU, or CPMU. In order to allow a high degree of
+ * implementation flexibility the specification provides a wide range of
+ * options all of which are self describing.
+ *
+ * Details in CXL rev 3.0 section 8.2.7 CPMU Register Interface
+ */
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/perf_event.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+
+#include "../cxl/cxlpci.h"
+#include "../cxl/cxl.h"
+#include "../cxl/pmu.h"
+
+#define CXL_PMU_CAP_REG 0x0
+#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(4, 0)
+#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
+#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
+#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
+#define CXL_PMU_FILTER_HDM BIT(0)
+#define CXL_PMU_FILTER_CHAN_RANK_BANK BIT(1)
+#define CXL_PMU_CAP_MSI_N_MSK GENMASK_ULL(47, 44)
+#define CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN BIT_ULL(48)
+#define CXL_PMU_CAP_FREEZE BIT_ULL(49)
+#define CXL_PMU_CAP_INT BIT_ULL(50)
+#define CXL_PMU_CAP_VERSION_MSK GENMASK_ULL(63, 60)
+
+#define CXL_PMU_OVERFLOW_REG 0x10
+#define CXL_PMU_FREEZE_REG 0x18
+#define CXL_PMU_EVENT_CAP_REG(n) (0x100 + 8 * (n))
+#define CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK GENMASK_ULL(31, 0)
+#define CXL_PMU_EVENT_CAP_GROUP_ID_MSK GENMASK_ULL(47, 32)
+#define CXL_PMU_EVENT_CAP_VENDOR_ID_MSK GENMASK_ULL(63, 48)
+
+#define CXL_PMU_COUNTER_CFG_REG(n) (0x200 + 8 * (n))
+#define CXL_PMU_COUNTER_CFG_TYPE_MSK GENMASK_ULL(1, 0)
+#define CXL_PMU_COUNTER_CFG_TYPE_FREE_RUN 0
+#define CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN 1
+#define CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE 2
+#define CXL_PMU_COUNTER_CFG_ENABLE BIT_ULL(8)
+#define CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW BIT_ULL(9)
+#define CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW BIT_ULL(10)
+#define CXL_PMU_COUNTER_CFG_EDGE BIT_ULL(11)
+#define CXL_PMU_COUNTER_CFG_INVERT BIT_ULL(12)
+#define CXL_PMU_COUNTER_CFG_THRESHOLD_MSK GENMASK_ULL(23, 16)
+#define CXL_PMU_COUNTER_CFG_EVENTS_MSK GENMASK_ULL(55, 24)
+#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
+
+#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+
+#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
+
+/* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
+#define CXL_PMU_GID_CLOCK_TICKS 0x00
+#define CXL_PMU_GID_D2H_REQ 0x0010
+#define CXL_PMU_GID_D2H_RSP 0x0011
+#define CXL_PMU_GID_H2D_REQ 0x0012
+#define CXL_PMU_GID_H2D_RSP 0x0013
+#define CXL_PMU_GID_CACHE_DATA 0x0014
+#define CXL_PMU_GID_M2S_REQ 0x0020
+#define CXL_PMU_GID_M2S_RWD 0x0021
+#define CXL_PMU_GID_M2S_BIRSP 0x0022
+#define CXL_PMU_GID_S2M_BISNP 0x0023
+#define CXL_PMU_GID_S2M_NDR 0x0024
+#define CXL_PMU_GID_S2M_DRS 0x0025
+#define CXL_PMU_GID_DDR 0x8000
+
+static int cxl_pmu_cpuhp_state_num;
+
+struct cxl_pmu_ev_cap {
+ u16 vid;
+ u16 gid;
+ u32 msk;
+ union {
+ int counter_idx; /* fixed counters */
+ int event_idx; /* configurable counters */
+ };
+ struct list_head node;
+};
+
+#define CXL_PMU_MAX_COUNTERS 64
+struct cxl_pmu_info {
+ struct pmu pmu;
+ void __iomem *base;
+ struct perf_event **hw_events;
+ struct list_head event_caps_configurable;
+ struct list_head event_caps_fixed;
+ DECLARE_BITMAP(used_counter_bm, CXL_PMU_MAX_COUNTERS);
+ DECLARE_BITMAP(conf_counter_bm, CXL_PMU_MAX_COUNTERS);
+ u16 counter_width;
+ u8 num_counters;
+ u8 num_event_capabilities;
+ int on_cpu;
+ struct hlist_node node;
+ bool filter_hdm;
+ int irq;
+};
+
+#define pmu_to_cxl_pmu_info(_pmu) container_of(_pmu, struct cxl_pmu_info, pmu)
+
+/*
+ * All CPMU counters are discoverable via the Event Capabilities Registers.
+ * Each Event Capability register contains a a VID / GroupID.
+ * A counter may then count any combination (by summing) of events in
+ * that group which are in the Supported Events Bitmask.
+ * However, there are some complexities to the scheme.
+ * - Fixed function counters refer to an Event Capabilities register.
+ * That event capability register is not then used for Configurable
+ * counters.
+ */
+static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
+{
+ unsigned long fixed_counter_event_cap_bm = 0;
+ void __iomem *base = info->base;
+ bool freeze_for_enable;
+ u64 val, eval;
+ int i;
+
+ val = readq(base + CXL_PMU_CAP_REG);
+ freeze_for_enable = FIELD_GET(CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN, val) &&
+ FIELD_GET(CXL_PMU_CAP_FREEZE, val);
+ if (!freeze_for_enable) {
+ dev_err(dev, "Counters not writable while frozen\n");
+ return -ENODEV;
+ }
+
+ info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1;
+ info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val);
+ info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1;
+
+ info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM;
+ if (FIELD_GET(CXL_PMU_CAP_INT, val))
+ info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val);
+ else
+ info->irq = -1;
+
+ /* First handle fixed function counters; note if configurable counters found */
+ for (i = 0; i < info->num_counters; i++) {
+ struct cxl_pmu_ev_cap *pmu_ev;
+ u32 events_msk;
+ u8 group_idx;
+
+ val = readq(base + CXL_PMU_COUNTER_CFG_REG(i));
+
+ if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) ==
+ CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE) {
+ set_bit(i, info->conf_counter_bm);
+ }
+
+ if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) !=
+ CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN)
+ continue;
+
+ /* In this case we know which fields are const */
+ group_idx = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, val);
+ events_msk = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENTS_MSK, val);
+ eval = readq(base + CXL_PMU_EVENT_CAP_REG(group_idx));
+ pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
+ if (!pmu_ev)
+ return -ENOMEM;
+
+ pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
+ pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
+ /* For a fixed purpose counter use the events mask from the counter CFG */
+ pmu_ev->msk = events_msk;
+ pmu_ev->counter_idx = i;
+ /* This list add is never unwound as all entries deleted on remove */
+ list_add(&pmu_ev->node, &info->event_caps_fixed);
+ /*
+ * Configurable counters must not use an Event Capability registers that
+ * is in use for a Fixed counter
+ */
+ set_bit(group_idx, &fixed_counter_event_cap_bm);
+ }
+
+ if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) {
+ struct cxl_pmu_ev_cap *pmu_ev;
+ int j;
+ /* Walk event capabilities unused by fixed counters */
+ for_each_clear_bit(j, &fixed_counter_event_cap_bm,
+ info->num_event_capabilities) {
+ pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
+ if (!pmu_ev)
+ return -ENOMEM;
+
+ eval = readq(base + CXL_PMU_EVENT_CAP_REG(j));
+ pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
+ pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
+ pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval);
+ pmu_ev->event_idx = j;
+ list_add(&pmu_ev->node, &info->event_caps_configurable);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t cxl_pmu_format_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+#define CXL_PMU_FORMAT_ATTR(_name, _format)\
+ (&((struct dev_ext_attribute[]) { \
+ { \
+ .attr = __ATTR(_name, 0444, \
+ cxl_pmu_format_sysfs_show, NULL), \
+ .var = (void *)_format \
+ } \
+ })[0].attr.attr)
+
+enum {
+ cxl_pmu_mask_attr,
+ cxl_pmu_gid_attr,
+ cxl_pmu_vid_attr,
+ cxl_pmu_threshold_attr,
+ cxl_pmu_invert_attr,
+ cxl_pmu_edge_attr,
+ cxl_pmu_hdm_filter_en_attr,
+ cxl_pmu_hdm_attr,
+};
+
+static struct attribute *cxl_pmu_format_attr[] = {
+ [cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
+ [cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
+ [cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
+ [cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
+ [cxl_pmu_invert_attr] = CXL_PMU_FORMAT_ATTR(invert, "config1:16"),
+ [cxl_pmu_edge_attr] = CXL_PMU_FORMAT_ATTR(edge, "config1:17"),
+ [cxl_pmu_hdm_filter_en_attr] = CXL_PMU_FORMAT_ATTR(hdm_filter_en, "config1:18"),
+ [cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
+ NULL
+};
+
+#define CXL_PMU_ATTR_CONFIG_MASK_MSK GENMASK_ULL(31, 0)
+#define CXL_PMU_ATTR_CONFIG_GID_MSK GENMASK_ULL(47, 32)
+#define CXL_PMU_ATTR_CONFIG_VID_MSK GENMASK_ULL(63, 48)
+#define CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK GENMASK_ULL(15, 0)
+#define CXL_PMU_ATTR_CONFIG1_INVERT_MSK BIT(16)
+#define CXL_PMU_ATTR_CONFIG1_EDGE_MSK BIT(17)
+#define CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK BIT(18)
+#define CXL_PMU_ATTR_CONFIG2_HDM_MSK GENMASK(15, 0)
+
+static umode_t cxl_pmu_format_is_visible(struct kobject *kobj,
+ struct attribute *attr, int a)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_pmu_info *info = dev_get_drvdata(dev);
+
+ /*
+ * Filter capability at the CPMU level, so hide the attributes if the particular
+ * filter is not supported.
+ */
+ if (!info->filter_hdm &&
+ (attr == cxl_pmu_format_attr[cxl_pmu_hdm_filter_en_attr] ||
+ attr == cxl_pmu_format_attr[cxl_pmu_hdm_attr]))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group cxl_pmu_format_group = {
+ .name = "format",
+ .attrs = cxl_pmu_format_attr,
+ .is_visible = cxl_pmu_format_is_visible,
+};
+
+static u32 cxl_pmu_config_get_mask(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config);
+}
+
+static u16 cxl_pmu_config_get_gid(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config);
+}
+
+static u16 cxl_pmu_config_get_vid(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config);
+}
+
+static u8 cxl_pmu_config1_get_threshold(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1);
+}
+
+static bool cxl_pmu_config1_get_invert(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1);
+}
+
+static bool cxl_pmu_config1_get_edge(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1);
+}
+
+/*
+ * CPMU specification allows for 8 filters, each with a 16 bit value...
+ * So we need to find 8x16bits to store it in.
+ * As the value used for disable is 0xffff, a separate enable switch
+ * is needed.
+ */
+
+static bool cxl_pmu_config1_hdm_filter_en(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1);
+}
+
+static u16 cxl_pmu_config2_get_hdm_decoder(struct perf_event *event)
+{
+ return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2);
+}
+
+static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id);
+}
+
+#define CXL_PMU_EVENT_ATTR(_name, _vid, _gid, _msk) \
+ PMU_EVENT_ATTR_ID(_name, cxl_pmu_event_sysfs_show, \
+ ((u64)(_vid) << 48) | ((u64)(_gid) << 32) | (u64)(_msk))
+
+/* For CXL spec defined events */
+#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk) \
+ CXL_PMU_EVENT_ATTR(_name, PCI_DVSEC_VENDOR_ID_CXL, _gid, _msk)
+
+static struct attribute *cxl_pmu_event_attrs[] = {
+ CXL_PMU_EVENT_CXL_ATTR(clock_ticks, CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
+ /* CXL rev 3.0 Table 3-17 - Device to Host Requests */
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdcurr, CXL_PMU_GID_D2H_REQ, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdown, CXL_PMU_GID_D2H_REQ, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdshared, CXL_PMU_GID_D2H_REQ, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdany, CXL_PMU_GID_D2H_REQ, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdownnodata, CXL_PMU_GID_D2H_REQ, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_itomwr, CXL_PMU_GID_D2H_REQ, BIT(6)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrcurr, CXL_PMU_GID_D2H_REQ, BIT(7)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_clflush, CXL_PMU_GID_D2H_REQ, BIT(8)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevict, CXL_PMU_GID_D2H_REQ, BIT(9)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_dirtyevict, CXL_PMU_GID_D2H_REQ, BIT(10)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevictnodata, CXL_PMU_GID_D2H_REQ, BIT(11)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinv, CXL_PMU_GID_D2H_REQ, BIT(12)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf, CXL_PMU_GID_D2H_REQ, BIT(13)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv, CXL_PMU_GID_D2H_REQ, BIT(14)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed, CXL_PMU_GID_D2H_REQ, BIT(16)),
+ /* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti, CXL_PMU_GID_D2H_RSP, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv, CXL_PMU_GID_D2H_RSP, BIT(6)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse, CXL_PMU_GID_D2H_RSP, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspshitse, CXL_PMU_GID_D2H_RSP, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspsfwdm, CXL_PMU_GID_D2H_RSP, BIT(7)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspifwdm, CXL_PMU_GID_D2H_RSP, BIT(15)),
+ CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvfwdv, CXL_PMU_GID_D2H_RSP, BIT(22)),
+ /* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
+ CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpdata, CXL_PMU_GID_H2D_REQ, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpinv, CXL_PMU_GID_H2D_REQ, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpcur, CXL_PMU_GID_H2D_REQ, BIT(3)),
+ /* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_writepull, CXL_PMU_GID_H2D_RSP, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_go, CXL_PMU_GID_H2D_RSP, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepull, CXL_PMU_GID_H2D_RSP, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_extcmp, CXL_PMU_GID_H2D_RSP, BIT(6)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepulldrop, CXL_PMU_GID_H2D_RSP, BIT(8)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_fastgowritepull, CXL_PMU_GID_H2D_RSP, BIT(13)),
+ CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_goerrwritepull, CXL_PMU_GID_H2D_RSP, BIT(15)),
+ /* CXL rev 3.0 Table 13-5 directly lists these */
+ CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data, CXL_PMU_GID_CACHE_DATA, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data, CXL_PMU_GID_CACHE_DATA, BIT(1)),
+ /* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv, CXL_PMU_GID_M2S_REQ, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd, CXL_PMU_GID_M2S_REQ, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata, CXL_PMU_GID_M2S_REQ, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd, CXL_PMU_GID_M2S_REQ, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd, CXL_PMU_GID_M2S_REQ, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd, CXL_PMU_GID_M2S_REQ, BIT(8)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt, CXL_PMU_GID_M2S_REQ, BIT(9)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict, CXL_PMU_GID_M2S_REQ, BIT(10)),
+ /* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwr, CXL_PMU_GID_M2S_RWD, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwrptl, CXL_PMU_GID_M2S_RWD, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_biconflict, CXL_PMU_GID_M2S_RWD, BIT(4)),
+ /* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_i, CXL_PMU_GID_M2S_BIRSP, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_s, CXL_PMU_GID_M2S_BIRSP, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_e, CXL_PMU_GID_M2S_BIRSP, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_iblk, CXL_PMU_GID_M2S_BIRSP, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_sblk, CXL_PMU_GID_M2S_BIRSP, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_eblk, CXL_PMU_GID_M2S_BIRSP, BIT(6)),
+ /* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_cur, CXL_PMU_GID_S2M_BISNP, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_data, CXL_PMU_GID_S2M_BISNP, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_inv, CXL_PMU_GID_S2M_BISNP, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk, CXL_PMU_GID_S2M_BISNP, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk, CXL_PMU_GID_S2M_BISNP, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk, CXL_PMU_GID_S2M_BISNP, BIT(6)),
+ /* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
+ /* CXL rev 3.0 Table 13-5 directly lists these */
+ CXL_PMU_EVENT_CXL_ATTR(ddr_act, CXL_PMU_GID_DDR, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_pre, CXL_PMU_GID_DDR, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_casrd, CXL_PMU_GID_DDR, BIT(2)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_caswr, CXL_PMU_GID_DDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_refresh, CXL_PMU_GID_DDR, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_selfrefreshent, CXL_PMU_GID_DDR, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(ddr_rfm, CXL_PMU_GID_DDR, BIT(6)),
+ NULL
+};
+
+static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
+ int vid, int gid, int msk)
+{
+ struct cxl_pmu_ev_cap *pmu_ev;
+
+ list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
+ if (vid != pmu_ev->vid || gid != pmu_ev->gid)
+ continue;
+
+ /* Precise match for fixed counter */
+ if (msk == pmu_ev->msk)
+ return pmu_ev;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
+ int vid, int gid, int msk)
+{
+ struct cxl_pmu_ev_cap *pmu_ev;
+
+ list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
+ if (vid != pmu_ev->vid || gid != pmu_ev->gid)
+ continue;
+
+ /* Request mask must be subset of supported */
+ if (msk & ~pmu_ev->msk)
+ continue;
+
+ return pmu_ev;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static umode_t cxl_pmu_event_is_visible(struct kobject *kobj, struct attribute *attr, int a)
+{
+ struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(dev_attr, struct perf_pmu_events_attr, attr);
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_pmu_info *info = dev_get_drvdata(dev);
+ int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id);
+ int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id);
+ int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id);
+
+ if (!IS_ERR(cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, msk)))
+ return attr->mode;
+
+ if (!IS_ERR(cxl_pmu_find_config_counter_ev_cap(info, vid, gid, msk)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group cxl_pmu_events = {
+ .name = "events",
+ .attrs = cxl_pmu_event_attrs,
+ .is_visible = cxl_pmu_event_is_visible,
+};
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_pmu_info *info = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *cxl_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group cxl_pmu_cpumask_group = {
+ .attrs = cxl_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *cxl_pmu_attr_groups[] = {
+ &cxl_pmu_events,
+ &cxl_pmu_format_group,
+ &cxl_pmu_cpumask_group,
+ NULL
+};
+
+/* If counter_idx == NULL, don't try to allocate a counter. */
+static int cxl_pmu_get_event_idx(struct perf_event *event, int *counter_idx,
+ int *event_idx)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ DECLARE_BITMAP(configurable_and_free, CXL_PMU_MAX_COUNTERS);
+ struct cxl_pmu_ev_cap *pmu_ev;
+ u32 mask;
+ u16 gid, vid;
+ int i;
+
+ vid = cxl_pmu_config_get_vid(event);
+ gid = cxl_pmu_config_get_gid(event);
+ mask = cxl_pmu_config_get_mask(event);
+
+ pmu_ev = cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, mask);
+ if (!IS_ERR(pmu_ev)) {
+ if (!counter_idx)
+ return 0;
+ if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) {
+ *counter_idx = pmu_ev->counter_idx;
+ return 0;
+ }
+ /* Fixed counter is in use, but maybe a configurable one? */
+ }
+
+ pmu_ev = cxl_pmu_find_config_counter_ev_cap(info, vid, gid, mask);
+ if (!IS_ERR(pmu_ev)) {
+ if (!counter_idx)
+ return 0;
+
+ bitmap_andnot(configurable_and_free, info->conf_counter_bm,
+ info->used_counter_bm, CXL_PMU_MAX_COUNTERS);
+
+ i = find_first_bit(configurable_and_free, CXL_PMU_MAX_COUNTERS);
+ if (i == CXL_PMU_MAX_COUNTERS)
+ return -EINVAL;
+
+ *counter_idx = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cxl_pmu_event_init(struct perf_event *event)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ int rc;
+
+ /* Top level type sanity check - is this a Hardware Event being requested */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+ /* TODO: Validation of any filter */
+
+ /*
+ * Verify that it is possible to count what was requested. Either must
+ * be a fixed counter that is a precise match or a configurable counter
+ * where this is a subset.
+ */
+ rc = cxl_pmu_get_event_idx(event, NULL, NULL);
+ if (rc < 0)
+ return rc;
+
+ event->cpu = info->on_cpu;
+
+ return 0;
+}
+
+static void cxl_pmu_enable(struct pmu *pmu)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
+ void __iomem *base = info->base;
+
+ /* Can assume frozen at this stage */
+ writeq(0, base + CXL_PMU_FREEZE_REG);
+}
+
+static void cxl_pmu_disable(struct pmu *pmu)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
+ void __iomem *base = info->base;
+
+ /*
+ * Whilst bits above number of counters are RsvdZ
+ * they are unlikely to be repurposed given
+ * number of counters is allowed to be 64 leaving
+ * no reserved bits. Hence this is only slightly
+ * naughty.
+ */
+ writeq(GENMASK_ULL(63, 0), base + CXL_PMU_FREEZE_REG);
+}
+
+static void cxl_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ void __iomem *base = info->base;
+ u64 cfg;
+
+ /*
+ * All paths to here should either set these flags directly or
+ * call cxl_pmu_event_stop() which will ensure the correct state.
+ */
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+
+ /*
+ * Currently only hdm filter control is implemnted, this code will
+ * want generalizing when more filters are added.
+ */
+ if (info->filter_hdm) {
+ if (cxl_pmu_config1_hdm_filter_en(event))
+ cfg = cxl_pmu_config2_get_hdm_decoder(event);
+ else
+ cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
+ }
+
+ cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1);
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1);
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1);
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE,
+ cxl_pmu_config1_get_edge(event) ? 1 : 0);
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT,
+ cxl_pmu_config1_get_invert(event) ? 1 : 0);
+
+ /* Fixed purpose counters have next two fields RO */
+ if (test_bit(hwc->idx, info->conf_counter_bm)) {
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK,
+ hwc->event_base);
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK,
+ cxl_pmu_config_get_mask(event));
+ }
+ cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK;
+ /*
+ * For events that generate only 1 count per clock the CXL 3.0 spec
+ * states the threshold shall be set to 1 but if set to 0 it will
+ * count the raw value anwyay?
+ * There is no definition of what events will count multiple per cycle
+ * and hence to which non 1 values of threshold can apply.
+ * (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition)
+ */
+ cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK,
+ cxl_pmu_config1_get_threshold(event));
+ writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
+
+ local64_set(&hwc->prev_count, 0);
+ writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx));
+
+ perf_event_update_userpage(event);
+}
+
+static u64 cxl_pmu_read_counter(struct perf_event *event)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ void __iomem *base = info->base;
+
+ return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx));
+}
+
+static void __cxl_pmu_read(struct perf_event *event, bool overflow)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 new_cnt, prev_cnt, delta;
+
+ do {
+ prev_cnt = local64_read(&hwc->prev_count);
+ new_cnt = cxl_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt);
+
+ /*
+ * If we know an overflow occur then take that into account.
+ * Note counter is not reset as that would lose events
+ */
+ delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0);
+ if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0))
+ delta += (1UL << info->counter_width);
+
+ local64_add(delta, &event->count);
+}
+
+static void cxl_pmu_read(struct perf_event *event)
+{
+ __cxl_pmu_read(event, false);
+}
+
+static void cxl_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ void __iomem *base = info->base;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 cfg;
+
+ cxl_pmu_read(event);
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
+ cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) |
+ FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1));
+ writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
+
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int cxl_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx, rc;
+ int event_idx = 0;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ rc = cxl_pmu_get_event_idx(event, &idx, &event_idx);
+ if (rc < 0)
+ return rc;
+
+ hwc->idx = idx;
+
+ /* Only set for configurable counters */
+ hwc->event_base = event_idx;
+ info->hw_events[idx] = event;
+ set_bit(idx, info->used_counter_bm);
+
+ if (flags & PERF_EF_START)
+ cxl_pmu_event_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void cxl_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ cxl_pmu_event_stop(event, PERF_EF_UPDATE);
+ clear_bit(hwc->idx, info->used_counter_bm);
+ info->hw_events[hwc->idx] = NULL;
+ perf_event_update_userpage(event);
+}
+
+static irqreturn_t cxl_pmu_irq(int irq, void *data)
+{
+ struct cxl_pmu_info *info = data;
+ void __iomem *base = info->base;
+ u64 overflowed;
+ DECLARE_BITMAP(overflowedbm, 64);
+ int i;
+
+ overflowed = readq(base + CXL_PMU_OVERFLOW_REG);
+
+ /* Interrupt may be shared, so maybe it isn't ours */
+ if (!overflowed)
+ return IRQ_NONE;
+
+ bitmap_from_arr64(overflowedbm, &overflowed, 64);
+ for_each_set_bit(i, overflowedbm, info->num_counters) {
+ struct perf_event *event = info->hw_events[i];
+
+ if (!event) {
+ dev_dbg(info->pmu.dev,
+ "overflow but on non enabled counter %d\n", i);
+ continue;
+ }
+
+ __cxl_pmu_read(event, true);
+ }
+
+ writeq(overflowed, base + CXL_PMU_OVERFLOW_REG);
+
+ return IRQ_HANDLED;
+}
+
+static void cxl_pmu_perf_unregister(void *_info)
+{
+ struct cxl_pmu_info *info = _info;
+
+ perf_pmu_unregister(&info->pmu);
+}
+
+static void cxl_pmu_cpuhp_remove(void *_info)
+{
+ struct cxl_pmu_info *info = _info;
+
+ cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
+}
+
+static int cxl_pmu_probe(struct device *dev)
+{
+ struct cxl_pmu *pmu = to_cxl_pmu(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ struct cxl_pmu_info *info;
+ char *irq_name;
+ char *dev_name;
+ int rc, irq;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, info);
+ INIT_LIST_HEAD(&info->event_caps_fixed);
+ INIT_LIST_HEAD(&info->event_caps_configurable);
+
+ info->base = pmu->base;
+
+ info->on_cpu = -1;
+ rc = cxl_pmu_parse_caps(dev, info);
+ if (rc)
+ return rc;
+
+ info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
+ info->num_counters, GFP_KERNEL);
+ if (!info->hw_events)
+ return -ENOMEM;
+
+ switch (pmu->type) {
+ case CXL_PMU_MEMDEV:
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "cxl_pmu_mem%d.%d",
+ pmu->assoc_id, pmu->index);
+ break;
+ }
+ if (!dev_name)
+ return -ENOMEM;
+
+ info->pmu = (struct pmu) {
+ .name = dev_name,
+ .parent = dev,
+ .module = THIS_MODULE,
+ .event_init = cxl_pmu_event_init,
+ .pmu_enable = cxl_pmu_enable,
+ .pmu_disable = cxl_pmu_disable,
+ .add = cxl_pmu_event_add,
+ .del = cxl_pmu_event_del,
+ .start = cxl_pmu_event_start,
+ .stop = cxl_pmu_event_stop,
+ .read = cxl_pmu_read,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cxl_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ if (info->irq <= 0)
+ return -EINVAL;
+
+ rc = pci_irq_vector(pdev, info->irq);
+ if (rc < 0)
+ return rc;
+ irq = rc;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
+ if (!irq_name)
+ return -ENOMEM;
+
+ rc = devm_request_irq(dev, irq, cxl_pmu_irq, IRQF_SHARED | IRQF_ONESHOT,
+ irq_name, info);
+ if (rc)
+ return rc;
+ info->irq = irq;
+
+ rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, cxl_pmu_cpuhp_remove, info);
+ if (rc)
+ return rc;
+
+ rc = perf_pmu_register(&info->pmu, info->pmu.name, -1);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, cxl_pmu_perf_unregister, info);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static struct cxl_driver cxl_pmu_driver = {
+ .name = "cxl_pmu",
+ .probe = cxl_pmu_probe,
+ .id = CXL_DEVICE_PMU,
+};
+
+static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
+
+ if (info->on_cpu != -1)
+ return 0;
+
+ info->on_cpu = cpu;
+ /*
+ * CPU HP lock is held so we should be guaranteed that the CPU hasn't yet
+ * gone away again.
+ */
+ WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
+ unsigned int target;
+
+ if (info->on_cpu != cpu)
+ return 0;
+
+ info->on_cpu = -1;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids) {
+ dev_err(info->pmu.dev, "Unable to find a suitable CPU\n");
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&info->pmu, cpu, target);
+ info->on_cpu = target;
+ /*
+ * CPU HP lock is held so we should be guaranteed that this CPU hasn't yet
+ * gone away.
+ */
+ WARN_ON(irq_set_affinity(info->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static __init int cxl_pmu_init(void)
+{
+ int rc;
+
+ rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "AP_PERF_CXL_PMU_ONLINE",
+ cxl_pmu_online_cpu, cxl_pmu_offline_cpu);
+ if (rc < 0)
+ return rc;
+ cxl_pmu_cpuhp_state_num = rc;
+
+ rc = cxl_driver_register(&cxl_pmu_driver);
+ if (rc)
+ cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
+
+ return rc;
+}
+
+static __exit void cxl_pmu_exit(void)
+{
+ cxl_driver_unregister(&cxl_pmu_driver);
+ cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CXL);
+module_init(cxl_pmu_init);
+module_exit(cxl_pmu_exit);
+MODULE_ALIAS_CXL(CXL_DEVICE_PMU);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index f46e3148d286..8dba9596408f 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -18,6 +18,7 @@ config GENERIC_PHY
config GENERIC_PHY_MIPI_DPHY
bool
+ depends on GENERIC_PHY
help
Generic MIPI D-PHY support.
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index 9d1efa0d9394..ec2555bb83d5 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -172,10 +172,16 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
int ret;
unsigned int value;
- ret = reset_control_reset(priv->reset);
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
+ ret = reset_control_reset(priv->reset);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
udelay(RESET_COMPLETE_TIME);
/* usb2_otg_aca_en == 0 */
@@ -277,8 +283,13 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
static int phy_meson_g12a_usb2_exit(struct phy *phy)
{
struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (!ret)
+ clk_disable_unprepare(priv->clk);
- return reset_control_reset(priv->reset);
+ return ret;
}
/* set_mode is not needed, mode setting is handled via the UTMI bus */
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
index 76cf4280d7ed..4c10cafded4e 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -59,6 +59,8 @@
#define USB_CTLR_TP_DIAG1_wake_MASK BIT(1)
#define USB_CTRL_CTLR_CSHCR 0x50
#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK BIT(18)
+#define USB_CTRL_P0_U2PHY_CFG1 0x68
+#define USB_CTRL_P0_U2PHY_CFG1_COMMONONN_MASK BIT(10)
/* Register definitions for the USB_PHY block in 7211b0 */
#define USB_PHY_PLL_CTL 0x00
@@ -90,6 +92,8 @@
#define BDC_EC_AXIRDA_RTS_MASK GENMASK(31, 28)
#define BDC_EC_AXIRDA_RTS_SHIFT 28
+#define USB_XHCI_GBL_GUSB2PHYCFG 0x100
+#define USB_XHCI_GBL_GUSB2PHYCFG_U2_FREECLK_EXISTS_MASK BIT(30)
static void usb_mdio_write_7211b0(struct brcm_usb_init_params *params,
uint8_t addr, uint16_t data)
@@ -140,13 +144,17 @@ static void xhci_soft_reset(struct brcm_usb_init_params *params,
int on_off)
{
void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *xhci_gbl = params->regs[BRCM_REGS_XHCI_GBL];
/* Assert reset */
- if (on_off)
+ if (on_off) {
USB_CTRL_UNSET(ctrl, USB_PM, XHC_SOFT_RESETB);
/* De-assert reset */
- else
+ } else {
USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
+ /* Required for COMMONONN to be set */
+ USB_XHCI_GBL_UNSET(xhci_gbl, GUSB2PHYCFG, U2_FREECLK_EXISTS);
+ }
}
static void usb_init_ipp(struct brcm_usb_init_params *params)
@@ -320,6 +328,9 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params)
/* 1 millisecond - for USB clocks to settle down */
usleep_range(1000, 2000);
+ /* Disable PHY when port is suspended */
+ USB_CTRL_SET(ctrl, P0_U2PHY_CFG1, COMMONONN);
+
usb_wake_enable_7216(params, false);
usb_init_common(params);
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index a1ca83308f98..39536b6d96a9 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -35,6 +35,11 @@
#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK BIT(25) /* option */
#define USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK BIT(26) /* option */
#define USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK BIT(27) /* opt */
+#define USB_CTRL_SETUP_OC_DISABLE_PORT0_MASK BIT(28)
+#define USB_CTRL_SETUP_OC_DISABLE_PORT1_MASK BIT(29)
+#define USB_CTRL_SETUP_OC_DISABLE_MASK GENMASK(29, 28) /* option */
+#define USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK BIT(30)
+#define USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK BIT(31)
#define USB_CTRL_SETUP_OC3_DISABLE_MASK GENMASK(31, 30) /* option */
#define USB_CTRL_PLL_CTL 0x04
#define USB_CTRL_PLL_CTL_PLL_SUSPEND_EN_MASK BIT(27)
@@ -114,6 +119,8 @@ enum {
USB_CTRL_SETUP_SCB2_EN_SELECTOR,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR,
USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_SELECTOR,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_SELECTOR,
USB_CTRL_SETUP_OC3_DISABLE_SELECTOR,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR,
USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR,
@@ -190,6 +197,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -232,6 +241,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -253,6 +264,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
@@ -274,6 +287,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -295,6 +310,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -316,6 +333,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */
0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -337,6 +356,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
@@ -358,6 +379,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_SCB2_EN_MASK,
USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
@@ -379,6 +402,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
@@ -400,6 +425,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
0, /*USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
@@ -872,6 +899,13 @@ static void usb_init_common(struct brcm_usb_init_params *params)
brcmusb_memc_fix(params);
+ /* Workaround for false positive OC for 7439b2 in DRD/Device mode */
+ if ((params->family_id == 0x74390012) &&
+ (params->supported_port_modes != USB_CTLR_MODE_HOST)) {
+ USB_CTRL_SET(ctrl, SETUP, OC_DISABLE_PORT1);
+ USB_CTRL_SET_FAMILY(params, SETUP, OC3_DISABLE_PORT1);
+ }
+
if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index f9fbf8fb80e5..c1a88f5cd4cd 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -34,6 +34,14 @@ enum brcmusb_reg_sel {
brcm_usb_ctrl_unset(USB_CTRL_REG(base, reg), \
USB_CTRL_##reg##_##field##_MASK)
+#define USB_XHCI_GBL_REG(base, reg) ((void __iomem *)base + USB_XHCI_GBL_##reg)
+#define USB_XHCI_GBL_SET(base, reg, field) \
+ brcm_usb_ctrl_set(USB_XHCI_GBL_REG(base, reg), \
+ USB_XHCI_GBL_##reg##_##field##_MASK)
+#define USB_XHCI_GBL_UNSET(base, reg, field) \
+ brcm_usb_ctrl_unset(USB_XHCI_GBL_REG(base, reg), \
+ USB_XHCI_GBL_##reg##_##field##_MASK)
+
struct brcm_usb_init_params;
struct brcm_usb_init_ops {
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index e569f5f67578..f461585c84c6 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -6,6 +6,7 @@
* Copyright (c) 2019-2020 NXP
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -15,7 +16,9 @@
#include <linux/of.h>
#include <linux/of_platform.h>
-/* PHY register definition */
+#define USB3_PHY_OFFSET 0x0
+#define USB2_PHY_OFFSET 0x38000
+/* USB3 PHY register definition */
#define PHY_PMA_CMN_CTRL1 0xC800
#define TB_ADDR_CMN_DIAG_HSCLK_SEL 0x01e0
#define TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR 0x0084
@@ -87,8 +90,35 @@
#define TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40f2
#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
+/* USB2 PHY register definition */
+#define UTMI_REG15 0xaf
+#define UTMI_AFE_RX_REG0 0x0d
+#define UTMI_AFE_RX_REG5 0x12
+#define UTMI_AFE_BC_REG4 0x29
+
+/* Align UTMI_AFE_RX_REG0 bit[7:6] define */
+enum usb2_disconn_threshold {
+ USB2_DISCONN_THRESHOLD_575 = 0x0,
+ USB2_DISCONN_THRESHOLD_610 = 0x1,
+ USB2_DISCONN_THRESHOLD_645 = 0x3,
+};
+
+#define RX_USB2_DISCONN_MASK GENMASK(7, 6)
+
/* TB_ADDR_TX_RCVDETSC_CTRL */
#define RXDET_IN_P3_32KHZ BIT(0)
+/*
+ * UTMI_REG15
+ *
+ * Gate how many us for the txvalid signal until analog
+ * HS/FS transmitters have powered up
+ */
+#define TXVALID_GATE_THRESHOLD_HS_MASK (BIT(4) | BIT(5))
+/* 0us, txvalid is ready just after HS/FS transmitters have powered up */
+#define TXVALID_GATE_THRESHOLD_HS_0US (BIT(4) | BIT(5))
+
+#define SET_B_SESSION_VALID (BIT(6) | BIT(5))
+#define CLR_B_SESSION_VALID (BIT(6))
struct cdns_reg_pairs {
u16 val;
@@ -106,19 +136,27 @@ struct cdns_salvo_phy {
struct clk *clk;
void __iomem *base;
struct cdns_salvo_data *data;
+ enum usb2_disconn_threshold usb2_disconn;
};
static const struct of_device_id cdns_salvo_phy_of_match[];
-static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 reg)
+static const struct cdns_salvo_data cdns_nxp_salvo_data;
+
+static bool cdns_is_nxp_phy(struct cdns_salvo_phy *salvo_phy)
+{
+ return salvo_phy->data == &cdns_nxp_salvo_data;
+}
+
+static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 offset, u32 reg)
{
- return (u16)readl(salvo_phy->base +
+ return (u16)readl(salvo_phy->base + offset +
reg * (1 << salvo_phy->data->reg_offset_shift));
}
-static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy,
+static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy, u32 offset,
u32 reg, u16 val)
{
- writel(val, salvo_phy->base +
+ writel(val, salvo_phy->base + offset +
reg * (1 << salvo_phy->data->reg_offset_shift));
}
@@ -219,15 +257,27 @@ static int cdns_salvo_phy_init(struct phy *phy)
for (i = 0; i < data->init_sequence_length; i++) {
const struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
- cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val);
+ cdns_salvo_write(salvo_phy, USB3_PHY_OFFSET, reg_pair->off, reg_pair->val);
}
/* RXDET_IN_P3_32KHZ, Receiver detect slow clock enable */
- value = cdns_salvo_read(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL);
+ value = cdns_salvo_read(salvo_phy, USB3_PHY_OFFSET, TB_ADDR_TX_RCVDETSC_CTRL);
value |= RXDET_IN_P3_32KHZ;
- cdns_salvo_write(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL,
+ cdns_salvo_write(salvo_phy, USB3_PHY_OFFSET, TB_ADDR_TX_RCVDETSC_CTRL,
RXDET_IN_P3_32KHZ);
+ value = cdns_salvo_read(salvo_phy, USB2_PHY_OFFSET, UTMI_REG15);
+ value &= ~TXVALID_GATE_THRESHOLD_HS_MASK;
+ cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_REG15,
+ value | TXVALID_GATE_THRESHOLD_HS_0US);
+
+ cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG5, 0x5);
+
+ value = cdns_salvo_read(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG0);
+ value &= ~RX_USB2_DISCONN_MASK;
+ value = FIELD_PREP(RX_USB2_DISCONN_MASK, salvo_phy->usb2_disconn);
+ cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG0, value);
+
udelay(10);
clk_disable_unprepare(salvo_phy->clk);
@@ -251,11 +301,29 @@ static int cdns_salvo_phy_power_off(struct phy *phy)
return 0;
}
+static int cdns_salvo_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ if (!cdns_is_nxp_phy(salvo_phy))
+ return 0;
+
+ if (mode == PHY_MODE_USB_DEVICE)
+ cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_BC_REG4,
+ SET_B_SESSION_VALID);
+ else
+ cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_BC_REG4,
+ CLR_B_SESSION_VALID);
+
+ return 0;
+}
+
static const struct phy_ops cdns_salvo_phy_ops = {
.init = cdns_salvo_phy_init,
.power_on = cdns_salvo_phy_power_on,
.power_off = cdns_salvo_phy_power_off,
.owner = THIS_MODULE,
+ .set_mode = cdns_salvo_set_mode,
};
static int cdns_salvo_phy_probe(struct platform_device *pdev)
@@ -264,6 +332,7 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct cdns_salvo_phy *salvo_phy;
struct cdns_salvo_data *data;
+ u32 val;
data = (struct cdns_salvo_data *)of_device_get_match_data(dev);
salvo_phy = devm_kzalloc(dev, sizeof(*salvo_phy), GFP_KERNEL);
@@ -275,6 +344,16 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
if (IS_ERR(salvo_phy->clk))
return PTR_ERR(salvo_phy->clk);
+ if (of_property_read_u32(dev->of_node, "cdns,usb2-disconnect-threshold-microvolt", &val))
+ val = 575;
+
+ if (val < 610)
+ salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_575;
+ else if (val < 645)
+ salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_610;
+ else
+ salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_645;
+
salvo_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(salvo_phy->base))
return PTR_ERR(salvo_phy->base);
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 62e59d1bb9c3..37b6b5c05be8 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -38,6 +38,9 @@
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
+#define DP_PLL0 BIT(0)
+#define DP_PLL1 BIT(1)
+
#define TORRENT_COMMON_CDB_OFFSET 0x0
#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
@@ -66,16 +69,11 @@
*/
#define PHY_AUX_CTRL 0x04
#define PHY_RESET 0x20
-#define PMA_TX_ELEC_IDLE_MASK 0xF0U
#define PMA_TX_ELEC_IDLE_SHIFT 4
-#define PHY_L00_RESET_N_MASK 0x01U
#define PHY_PMA_XCVR_PLLCLK_EN 0x24
#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
-#define PHY_POWER_STATE_LN_0 0x0000
-#define PHY_POWER_STATE_LN_1 0x0008
-#define PHY_POWER_STATE_LN_2 0x0010
-#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_POWER_STATE_LN(ln) ((ln) * 8)
#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
#define PHY_PMA_CMN_READY 0x34
@@ -323,6 +321,7 @@ struct cdns_torrent_phy {
void __iomem *base; /* DPTX registers base */
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ u32 dp_pll;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
@@ -905,88 +904,129 @@ void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy,
/* Setting VCO for 10.8GHz */
case 2700:
case 5400:
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
+
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ }
break;
/* Setting VCO for 9.72GHz */
case 1620:
case 2430:
case 3240:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
/* Setting VCO for 8.64GHz */
case 2160:
case 4320:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
/* Setting VCO for 8.1GHz */
case 8100:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
}
}
+/* Set PLL used for DP configuration */
+static int cdns_torrent_dp_get_pll(struct cdns_torrent_phy *cdns_phy,
+ enum cdns_torrent_phy_type phy_t2)
+{
+ switch (phy_t2) {
+ case TYPE_PCIE:
+ case TYPE_USB:
+ cdns_phy->dp_pll = DP_PLL1;
+ break;
+ case TYPE_SGMII:
+ case TYPE_QSGMII:
+ cdns_phy->dp_pll = DP_PLL0;
+ break;
+ case TYPE_NONE:
+ cdns_phy->dp_pll = DP_PLL0 | DP_PLL1;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported PHY configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Enable or disable PLL for selected lanes.
*/
static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp,
bool enable)
{
- u32 rd_val;
- u32 ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 rd_val, pll_ack_val;
+ int ret;
/*
* Used to determine, which bits to check for or enable in
@@ -996,28 +1036,18 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
/* Used to enable or disable lanes. */
u32 pll_val;
- /* Select values of registers and mask, depending on enabled lane
- * count.
- */
- switch (dp->lanes) {
- /* lane 0 */
- case (1):
- pll_bits = 0x00000001;
- break;
- /* lanes 0-1 */
- case (2):
- pll_bits = 0x00000003;
- break;
- /* lanes 0-3, all */
- default:
- pll_bits = 0x0000000F;
- break;
- }
+ /* Select values of registers and mask, depending on enabled lane count. */
+ pll_val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
- if (enable)
- pll_val = pll_bits;
- else
- pll_val = 0x00000000;
+ if (enable) {
+ pll_bits = ((1 << dp->lanes) - 1);
+ pll_val |= pll_bits;
+ pll_ack_val = pll_bits;
+ } else {
+ pll_bits = ((1 << inst->num_lanes) - 1);
+ pll_val &= (~pll_bits);
+ pll_ack_val = 0;
+ }
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
@@ -1025,22 +1055,23 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
ret = regmap_read_poll_timeout(regmap,
PHY_PMA_XCVR_PLLCLK_EN_ACK,
rd_val,
- (rd_val & pll_bits) == pll_val,
+ (rd_val & pll_bits) == pll_ack_val,
0, POLL_TIMEOUT_US);
ndelay(100);
return ret;
}
static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 num_lanes,
enum phy_powerstate powerstate)
{
/* Register value for power state for a single byte. */
- u32 value_part;
- u32 value;
- u32 mask;
+ u32 value_part, i;
+ u32 value = 0;
+ u32 mask = 0;
u32 read_val;
- u32 ret;
+ int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
switch (powerstate) {
@@ -1056,29 +1087,11 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
break;
}
- /* Select values of registers and mask, depending on enabled
- * lane count.
- */
- switch (num_lanes) {
- /* lane 0 */
- case (1):
- value = value_part;
- mask = 0x0000003FU;
- break;
- /* lanes 0-1 */
- case (2):
- value = (value_part
- | (value_part << 8));
- mask = 0x00003F3FU;
- break;
- /* lanes 0-3, all */
- default:
- value = (value_part
- | (value_part << 8)
- | (value_part << 16)
- | (value_part << 24));
- mask = 0x3F3F3F3FU;
- break;
+ /* Select values of registers and mask, depending on enabled lane count. */
+
+ for (i = 0; i < num_lanes; i++) {
+ value |= (value_part << PHY_POWER_STATE_LN(i));
+ mask |= (PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN(i));
}
/* Set power state A<n>. */
@@ -1093,7 +1106,8 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
return ret;
}
-static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst, u32 num_lanes)
{
unsigned int read_val;
int ret;
@@ -1114,12 +1128,12 @@ static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
ndelay(100);
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A2);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A0);
return ret;
@@ -1143,6 +1157,7 @@ static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
}
static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 rate, u32 num_lanes)
{
unsigned int clk_sel_val = 0;
@@ -1175,14 +1190,17 @@ static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
break;
}
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
+
+ if (cdns_phy->dp_pll & DP_PLL1)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
/* PMA lane configuration to deal with multi-link operation */
for (i = 0; i < num_lanes; i++)
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + i],
XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
}
@@ -1191,23 +1209,44 @@ static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
* set and PLL disable request was processed.
*/
static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 read_val, ret;
+ u32 read_val, field_val;
+ int ret;
- /* Disable the cmn_pll0_en before re-programming the new data rate. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0);
+ /*
+ * Disable the associated PLL (cmn_pll0_en or cmn_pll1_en) before
+ * re-programming the new data rate.
+ */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val &= ~(cdns_phy->dp_pll);
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready de-assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[3] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- ((read_val >> 2) & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
- if (ret)
- return ret;
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 2) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 3) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
ndelay(200);
/* DP Rate Change - VCO Output settings. */
@@ -1221,19 +1260,35 @@ static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
/* PMA common configuration 100MHz */
cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc);
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, dp->link_rate, dp->lanes);
- /* Enable the cmn_pll0_en. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3);
+ /* Enable the associated PLL (cmn_pll0_en or cmn_pll1_en) */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val |= cdns_phy->dp_pll;
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[1] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- (read_val & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ (read_val & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1)
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 1) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+
return ret;
}
@@ -1301,6 +1356,7 @@ static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 num_lanes)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
@@ -1308,27 +1364,13 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
PHY_PMA_XCVR_POWER_STATE_REQ);
u32 pll_clk_en = cdns_torrent_dp_read(regmap,
PHY_PMA_XCVR_PLLCLK_EN);
+ u32 i;
- /* Lane 0 is always enabled. */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_0);
- pll_clk_en &= ~0x01U;
-
- if (num_lanes > 1) {
- /* lane 1 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_1);
- pll_clk_en &= ~(0x01U << 1);
- }
+ for (i = 0; i < num_lanes; i++) {
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK
+ << PHY_POWER_STATE_LN(inst->mlane + i));
- if (num_lanes > 2) {
- /* lanes 2 and 3 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_2);
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_3);
- pll_clk_en &= ~(0x01U << 2);
- pll_clk_en &= ~(0x01U << 3);
+ pll_clk_en &= ~(0x01U << (inst->mlane + i));
}
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
@@ -1337,36 +1379,57 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
/* Configure lane count as required. */
static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 value;
- u32 ret;
+ u32 value, i;
+ int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u8 lane_mask = (1 << dp->lanes) - 1;
+ u8 pma_tx_elec_idle_mask = 0;
+ u32 clane = inst->mlane;
+
+ lane_mask <<= clane;
value = cdns_torrent_dp_read(regmap, PHY_RESET);
/* clear pma_tx_elec_idle_ln_* bits. */
- value &= ~PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask = ((1 << inst->num_lanes) - 1) << clane;
+
+ pma_tx_elec_idle_mask <<= PMA_TX_ELEC_IDLE_SHIFT;
+
+ value &= ~pma_tx_elec_idle_mask;
+
/* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
- PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask;
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- /* reset the link by asserting phy_l00_reset_n low */
+ /* reset the link by asserting master lane phy_l0*_reset_n low */
cdns_torrent_dp_write(regmap, PHY_RESET,
- value & (~PHY_L00_RESET_N_MASK));
+ value & (~(1 << clane)));
/*
- * Assert lane reset on unused lanes and lane 0 so they remain in reset
+ * Assert lane reset on unused lanes and master lane so they remain in reset
* and powered down when re-enabling the link
*/
- value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 1; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, dp->lanes);
/* release phy_l0*_reset_n based on used laneCount */
- value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 0; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
/* Wait, until PHY gets ready after releasing PHY reset signal. */
@@ -1377,41 +1440,44 @@ static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
ndelay(100);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ value = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ value |= (1 << clane);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, value);
- ret = cdns_torrent_dp_run(cdns_phy, dp->lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, dp->lanes);
return ret;
}
/* Configure link rate as required. */
static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 ret;
+ int ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A3);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, false);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
+ ret = cdns_torrent_dp_configure_rate(cdns_phy, inst, dp);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, true);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A2);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A0);
if (ret)
return ret;
@@ -1422,44 +1488,45 @@ static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
/* Configure voltage swing and pre-emphasis for all enabled lanes. */
static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u8 lane;
u16 val;
for (lane = 0; lane < dp->lanes; lane++) {
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
* current state of the analog TX driver.
*/
val |= TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CTRL, 0x08A4);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
DRV_DIAG_TX_DRV, val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_MGNFS_MULT_000,
val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CPOST_MULT_00,
val);
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
* analog TX driver to reflect the new programmed one.
*/
val &= ~TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
}
};
@@ -1478,7 +1545,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_lanes) {
- ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_lanes(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
return ret;
@@ -1486,7 +1553,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_rate) {
- ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_rate(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
return ret;
@@ -1494,7 +1561,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_voltages)
- cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
+ cdns_torrent_dp_set_voltages(cdns_phy, inst, &opts->dp);
return ret;
}
@@ -1562,6 +1629,7 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
unsigned char lane_bits;
+ u32 val;
cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
@@ -1569,18 +1637,23 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
* Set lines power state to A0
* Set lines pll clk enable to 0
*/
- cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, inst->num_lanes);
/*
* release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
* used lanes
*/
lane_bits = (1 << inst->num_lanes) - 1;
- cdns_torrent_dp_write(regmap, PHY_RESET,
- ((0xF & ~lane_bits) << 4) | (0xF & lane_bits));
+
+ val = cdns_torrent_dp_read(regmap, PHY_RESET);
+ val |= (0xF & lane_bits);
+ val &= ~(lane_bits << 4);
+ cdns_torrent_dp_write(regmap, PHY_RESET, val);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ val |= 1;
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, val);
/*
* PHY PMA registers configuration functions
@@ -1599,7 +1672,7 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
cdns_phy->max_bit_rate,
false);
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate,
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, cdns_phy->max_bit_rate,
inst->num_lanes);
/* take out of reset */
@@ -1612,13 +1685,15 @@ static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy,
{
int ret;
- cdns_torrent_phy_on(phy);
+ ret = cdns_torrent_phy_on(phy);
+ if (ret)
+ return ret;
ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
if (ret)
return ret;
- ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, inst->num_lanes);
return ret;
}
@@ -1627,6 +1702,7 @@ static int cdns_torrent_dp_init(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
switch (cdns_phy->ref_clk_rate) {
case CLK_19_2_MHZ:
@@ -1639,6 +1715,24 @@ static int cdns_torrent_dp_init(struct phy *phy)
return -EINVAL;
}
+ ret = cdns_torrent_dp_get_pll(cdns_phy, TYPE_NONE);
+ if (ret)
+ return ret;
+
+ cdns_torrent_dp_common_init(cdns_phy, inst);
+
+ return cdns_torrent_dp_start(cdns_phy, inst, phy);
+}
+
+static int cdns_torrent_dp_multilink_init(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ struct phy *phy)
+{
+ if (cdns_phy->ref_clk_rate != CLK_100_MHZ) {
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
+ }
+
cdns_torrent_dp_common_init(cdns_phy, inst);
return cdns_torrent_dp_start(cdns_phy, inst, phy);
@@ -2156,8 +2250,11 @@ static int cdns_torrent_phy_init(struct phy *phy)
u32 num_regs;
int i, j;
- if (cdns_phy->nsubnodes > 1)
+ if (cdns_phy->nsubnodes > 1) {
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy);
return 0;
+ }
/**
* Spread spectrum generation is not required or supported
@@ -2399,6 +2496,12 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
}
+ if (phy_t1 == TYPE_DP) {
+ ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
+ if (ret)
+ return ret;
+ }
+
reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
@@ -2794,6 +2897,109 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
cdns_torrent_clk_cleanup(cdns_phy);
}
+/* USB and DP link configuration */
+static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
+ .reg_pairs = usb_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
+};
+
+/* PCIe and DP link configuration */
+static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
+};
+
+static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
+ .reg_pairs = pcie_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
+};
+
+/* DP Multilink, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
+ .reg_pairs = dp_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
+};
+
/* Single DisplayPort(DP) link configuration */
static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
@@ -3736,6 +3942,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_link_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3758,6 +3970,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
[INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -3810,6 +4025,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
[INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
},
.xcvr_diag_vals = {
@@ -3817,6 +4035,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3839,6 +4063,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -3891,6 +4118,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
+ },
},
},
.pcs_cmn_vals = {
@@ -3915,6 +4145,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ },
},
},
.cmn_vals = {
@@ -3937,6 +4170,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3959,6 +4198,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4011,6 +4253,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ },
},
},
},
@@ -4034,6 +4279,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4056,6 +4307,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = NULL,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4108,6 +4362,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
},
},
},
@@ -4131,6 +4388,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4153,6 +4416,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4205,6 +4471,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
},
},
},
@@ -4218,6 +4487,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_link_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4240,6 +4515,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
[INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4292,6 +4570,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
[INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
},
.xcvr_diag_vals = {
@@ -4299,6 +4580,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4321,6 +4608,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4373,6 +4663,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
+ },
},
},
.pcs_cmn_vals = {
@@ -4397,6 +4690,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ },
},
},
.cmn_vals = {
@@ -4419,6 +4715,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4441,6 +4743,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4493,6 +4798,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ },
},
},
},
@@ -4516,6 +4824,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4538,6 +4852,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = NULL,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4590,6 +4907,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
},
},
},
@@ -4613,6 +4933,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4635,6 +4961,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4687,6 +5016,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
},
},
},
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index afc63552ecaf..d4c92498ad1e 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -206,7 +206,6 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx8_pcie_phy *imx8_phy;
- struct resource *res;
imx8_phy = devm_kzalloc(dev, sizeof(*imx8_phy), GFP_KERNEL);
if (!imx8_phy)
@@ -259,8 +258,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
"Failed to get PCIE PHY PERST control\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx8_phy->base = devm_ioremap_resource(dev, res);
+ imx8_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx8_phy->base))
return PTR_ERR(imx8_phy->base);
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index a29b4a6f7c24..88826ceb72f8 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -27,17 +27,231 @@
#define PHY_CTRL2_TXENABLEN0 BIT(8)
#define PHY_CTRL2_OTG_DISABLE BIT(9)
+#define PHY_CTRL3 0xc
+#define PHY_CTRL3_COMPDISTUNE_MASK GENMASK(2, 0)
+#define PHY_CTRL3_TXPREEMP_TUNE_MASK GENMASK(16, 15)
+#define PHY_CTRL3_TXRISE_TUNE_MASK GENMASK(21, 20)
+#define PHY_CTRL3_TXVREF_TUNE_MASK GENMASK(25, 22)
+#define PHY_CTRL3_TX_VBOOST_LEVEL_MASK GENMASK(31, 29)
+
+#define PHY_CTRL4 0x10
+#define PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(20, 15)
+
+#define PHY_CTRL5 0x14
+#define PHY_CTRL5_DMPWD_OVERRIDE_SEL BIT(23)
+#define PHY_CTRL5_DMPWD_OVERRIDE BIT(22)
+#define PHY_CTRL5_DPPWD_OVERRIDE_SEL BIT(21)
+#define PHY_CTRL5_DPPWD_OVERRIDE BIT(20)
+#define PHY_CTRL5_PCS_TX_SWING_FULL_MASK GENMASK(6, 0)
+
#define PHY_CTRL6 0x18
#define PHY_CTRL6_ALT_CLK_EN BIT(1)
#define PHY_CTRL6_ALT_CLK_SEL BIT(0)
+#define PHY_TUNE_DEFAULT 0xffffffff
+
struct imx8mq_usb_phy {
struct phy *phy;
struct clk *clk;
void __iomem *base;
struct regulator *vbus;
+ u32 pcs_tx_swing_full;
+ u32 pcs_tx_deemph_3p5db;
+ u32 tx_vref_tune;
+ u32 tx_rise_tune;
+ u32 tx_preemp_amp_tune;
+ u32 tx_vboost_level;
+ u32 comp_dis_tune;
};
+static u32 phy_tx_vref_tune_from_property(u32 percent)
+{
+ percent = clamp(percent, 94U, 124U);
+
+ return DIV_ROUND_CLOSEST(percent - 94U, 2);
+}
+
+static u32 phy_tx_rise_tune_from_property(u32 percent)
+{
+ switch (percent) {
+ case 0 ... 98:
+ return 3;
+ case 99:
+ return 2;
+ case 100 ... 101:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp)
+{
+ microamp = min(microamp, 1800U);
+
+ return microamp / 600;
+}
+
+static u32 phy_tx_vboost_level_from_property(u32 microvolt)
+{
+ switch (microvolt) {
+ case 0 ... 960:
+ return 0;
+ case 961 ... 1160:
+ return 2;
+ default:
+ return 3;
+ }
+}
+
+static u32 phy_pcs_tx_deemph_3p5db_from_property(u32 decibel)
+{
+ return min(decibel, 36U);
+}
+
+static u32 phy_comp_dis_tune_from_property(u32 percent)
+{
+ switch (percent) {
+ case 0 ... 92:
+ return 0;
+ case 93 ... 95:
+ return 1;
+ case 96 ... 97:
+ return 2;
+ case 98 ... 102:
+ return 3;
+ case 103 ... 105:
+ return 4;
+ case 106 ... 109:
+ return 5;
+ case 110 ... 113:
+ return 6;
+ default:
+ return 7;
+ }
+}
+static u32 phy_pcs_tx_swing_full_from_property(u32 percent)
+{
+ percent = min(percent, 100U);
+
+ return (percent * 127) / 100;
+}
+
+static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
+{
+ struct device *dev = imx_phy->phy->dev.parent;
+
+ if (device_property_read_u32(dev, "fsl,phy-tx-vref-tune-percent",
+ &imx_phy->tx_vref_tune))
+ imx_phy->tx_vref_tune = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->tx_vref_tune =
+ phy_tx_vref_tune_from_property(imx_phy->tx_vref_tune);
+
+ if (device_property_read_u32(dev, "fsl,phy-tx-rise-tune-percent",
+ &imx_phy->tx_rise_tune))
+ imx_phy->tx_rise_tune = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->tx_rise_tune =
+ phy_tx_rise_tune_from_property(imx_phy->tx_rise_tune);
+
+ if (device_property_read_u32(dev, "fsl,phy-tx-preemp-amp-tune-microamp",
+ &imx_phy->tx_preemp_amp_tune))
+ imx_phy->tx_preemp_amp_tune = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->tx_preemp_amp_tune =
+ phy_tx_preemp_amp_tune_from_property(imx_phy->tx_preemp_amp_tune);
+
+ if (device_property_read_u32(dev, "fsl,phy-tx-vboost-level-microvolt",
+ &imx_phy->tx_vboost_level))
+ imx_phy->tx_vboost_level = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->tx_vboost_level =
+ phy_tx_vboost_level_from_property(imx_phy->tx_vboost_level);
+
+ if (device_property_read_u32(dev, "fsl,phy-comp-dis-tune-percent",
+ &imx_phy->comp_dis_tune))
+ imx_phy->comp_dis_tune = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->comp_dis_tune =
+ phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
+
+ if (device_property_read_u32(dev, "fsl,pcs-tx-deemph-3p5db-attenuation-db",
+ &imx_phy->pcs_tx_deemph_3p5db))
+ imx_phy->pcs_tx_deemph_3p5db = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->pcs_tx_deemph_3p5db =
+ phy_pcs_tx_deemph_3p5db_from_property(imx_phy->pcs_tx_deemph_3p5db);
+
+ if (device_property_read_u32(dev, "fsl,phy-pcs-tx-swing-full-percent",
+ &imx_phy->pcs_tx_swing_full))
+ imx_phy->pcs_tx_swing_full = PHY_TUNE_DEFAULT;
+ else
+ imx_phy->pcs_tx_swing_full =
+ phy_pcs_tx_swing_full_from_property(imx_phy->pcs_tx_swing_full);
+}
+
+static void imx8m_phy_tune(struct imx8mq_usb_phy *imx_phy)
+{
+ u32 value;
+
+ /* PHY tuning */
+ if (imx_phy->pcs_tx_deemph_3p5db != PHY_TUNE_DEFAULT) {
+ value = readl(imx_phy->base + PHY_CTRL4);
+ value &= ~PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK;
+ value |= FIELD_PREP(PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK,
+ imx_phy->pcs_tx_deemph_3p5db);
+ writel(value, imx_phy->base + PHY_CTRL4);
+ }
+
+ if (imx_phy->pcs_tx_swing_full != PHY_TUNE_DEFAULT) {
+ value = readl(imx_phy->base + PHY_CTRL5);
+ value |= FIELD_PREP(PHY_CTRL5_PCS_TX_SWING_FULL_MASK,
+ imx_phy->pcs_tx_swing_full);
+ writel(value, imx_phy->base + PHY_CTRL5);
+ }
+
+ if ((imx_phy->tx_vref_tune & imx_phy->tx_rise_tune &
+ imx_phy->tx_preemp_amp_tune & imx_phy->comp_dis_tune &
+ imx_phy->tx_vboost_level) == PHY_TUNE_DEFAULT)
+ /* If all are the default values, no need update. */
+ return;
+
+ value = readl(imx_phy->base + PHY_CTRL3);
+
+ if (imx_phy->tx_vref_tune != PHY_TUNE_DEFAULT) {
+ value &= ~PHY_CTRL3_TXVREF_TUNE_MASK;
+ value |= FIELD_PREP(PHY_CTRL3_TXVREF_TUNE_MASK,
+ imx_phy->tx_vref_tune);
+ }
+
+ if (imx_phy->tx_rise_tune != PHY_TUNE_DEFAULT) {
+ value &= ~PHY_CTRL3_TXRISE_TUNE_MASK;
+ value |= FIELD_PREP(PHY_CTRL3_TXRISE_TUNE_MASK,
+ imx_phy->tx_rise_tune);
+ }
+
+ if (imx_phy->tx_preemp_amp_tune != PHY_TUNE_DEFAULT) {
+ value &= ~PHY_CTRL3_TXPREEMP_TUNE_MASK;
+ value |= FIELD_PREP(PHY_CTRL3_TXPREEMP_TUNE_MASK,
+ imx_phy->tx_preemp_amp_tune);
+ }
+
+ if (imx_phy->comp_dis_tune != PHY_TUNE_DEFAULT) {
+ value &= ~PHY_CTRL3_COMPDISTUNE_MASK;
+ value |= FIELD_PREP(PHY_CTRL3_COMPDISTUNE_MASK,
+ imx_phy->comp_dis_tune);
+ }
+
+ if (imx_phy->tx_vboost_level != PHY_TUNE_DEFAULT) {
+ value &= ~PHY_CTRL3_TX_VBOOST_LEVEL_MASK;
+ value |= FIELD_PREP(PHY_CTRL3_TX_VBOOST_LEVEL_MASK,
+ imx_phy->tx_vboost_level);
+ }
+
+ writel(value, imx_phy->base + PHY_CTRL3);
+}
+
static int imx8mq_usb_phy_init(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
@@ -99,6 +313,8 @@ static int imx8mp_usb_phy_init(struct phy *phy)
value &= ~(PHY_CTRL1_RESET | PHY_CTRL1_ATERESET);
writel(value, imx_phy->base + PHY_CTRL1);
+ imx8m_phy_tune(imx_phy);
+
return 0;
}
@@ -182,6 +398,8 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(imx_phy->phy, imx_phy);
+ imx8m_get_phy_tuning_data(imx_phy);
+
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index d3b92c288554..6c89136fc8c2 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -54,7 +54,7 @@ config PHY_HISTB_COMBPHY
config PHY_HISI_INNO_USB2
tristate "HiSilicon INNO USB2 PHY support"
- depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ depends on ARCH_HISI || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
help
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index b133ae06757a..15dafe359552 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
@@ -20,12 +20,25 @@
#define PHY_CLK_STABLE_TIME 2 /* unit:ms */
#define UTMI_RST_COMPLETE_TIME 2 /* unit:ms */
#define POR_RST_COMPLETE_TIME 300 /* unit:us */
+
+#define PHY_TYPE_0 0
+#define PHY_TYPE_1 1
+
#define PHY_TEST_DATA GENMASK(7, 0)
-#define PHY_TEST_ADDR GENMASK(15, 8)
-#define PHY_TEST_PORT GENMASK(18, 16)
-#define PHY_TEST_WREN BIT(21)
-#define PHY_TEST_CLK BIT(22) /* rising edge active */
-#define PHY_TEST_RST BIT(23) /* low active */
+#define PHY_TEST_ADDR_OFFSET 8
+#define PHY0_TEST_ADDR GENMASK(15, 8)
+#define PHY0_TEST_PORT_OFFSET 16
+#define PHY0_TEST_PORT GENMASK(18, 16)
+#define PHY0_TEST_WREN BIT(21)
+#define PHY0_TEST_CLK BIT(22) /* rising edge active */
+#define PHY0_TEST_RST BIT(23) /* low active */
+#define PHY1_TEST_ADDR GENMASK(11, 8)
+#define PHY1_TEST_PORT_OFFSET 12
+#define PHY1_TEST_PORT BIT(12)
+#define PHY1_TEST_WREN BIT(13)
+#define PHY1_TEST_CLK BIT(14) /* rising edge active */
+#define PHY1_TEST_RST BIT(15) /* low active */
+
#define PHY_CLK_ENABLE BIT(2)
struct hisi_inno_phy_port {
@@ -37,6 +50,7 @@ struct hisi_inno_phy_priv {
void __iomem *mmio;
struct clk *ref_clk;
struct reset_control *por_rst;
+ unsigned int type;
struct hisi_inno_phy_port ports[INNO_PHY_PORT_NUM];
};
@@ -45,17 +59,27 @@ static void hisi_inno_phy_write_reg(struct hisi_inno_phy_priv *priv,
{
void __iomem *reg = priv->mmio;
u32 val;
-
- val = (data & PHY_TEST_DATA) |
- ((addr << 8) & PHY_TEST_ADDR) |
- ((port << 16) & PHY_TEST_PORT) |
- PHY_TEST_WREN | PHY_TEST_RST;
+ u32 value;
+
+ if (priv->type == PHY_TYPE_0)
+ val = (data & PHY_TEST_DATA) |
+ ((addr << PHY_TEST_ADDR_OFFSET) & PHY0_TEST_ADDR) |
+ ((port << PHY0_TEST_PORT_OFFSET) & PHY0_TEST_PORT) |
+ PHY0_TEST_WREN | PHY0_TEST_RST;
+ else
+ val = (data & PHY_TEST_DATA) |
+ ((addr << PHY_TEST_ADDR_OFFSET) & PHY1_TEST_ADDR) |
+ ((port << PHY1_TEST_PORT_OFFSET) & PHY1_TEST_PORT) |
+ PHY1_TEST_WREN | PHY1_TEST_RST;
writel(val, reg);
- val |= PHY_TEST_CLK;
- writel(val, reg);
+ value = val;
+ if (priv->type == PHY_TYPE_0)
+ value |= PHY0_TEST_CLK;
+ else
+ value |= PHY1_TEST_CLK;
+ writel(value, reg);
- val &= ~PHY_TEST_CLK;
writel(val, reg);
}
@@ -135,6 +159,8 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->por_rst))
return PTR_ERR(priv->por_rst);
+ priv->type = (uintptr_t) of_device_get_match_data(dev);
+
for_each_child_of_node(np, child) {
struct reset_control *rst;
struct phy *phy;
@@ -170,8 +196,12 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
}
static const struct of_device_id hisi_inno_phy_of_match[] = {
- { .compatible = "hisilicon,inno-usb2-phy", },
- { .compatible = "hisilicon,hi3798cv200-usb2-phy", },
+ { .compatible = "hisilicon,inno-usb2-phy",
+ .data = (void *) PHY_TYPE_0 },
+ { .compatible = "hisilicon,hi3798cv200-usb2-phy",
+ .data = (void *) PHY_TYPE_0 },
+ { .compatible = "hisilicon,hi3798mv100-usb2-phy",
+ .data = (void *) PHY_TYPE_1 },
{ },
};
MODULE_DEVICE_TABLE(of, hisi_inno_phy_of_match);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index e906a82791bd..0d110e50bbfd 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -7,6 +7,7 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
@@ -264,6 +265,8 @@
#define TPHY_CLKS_CNT 2
+#define USER_BUF_LEN(count) min_t(size_t, 8, (count))
+
enum mtk_phy_version {
MTK_PHY_V1 = 1,
MTK_PHY_V2,
@@ -336,6 +339,358 @@ struct mtk_tphy {
int src_coef; /* coefficient for slew rate calibrate */
};
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+enum u2_phy_params {
+ U2P_EYE_VRT = 0,
+ U2P_EYE_TERM,
+ U2P_EFUSE_EN,
+ U2P_EFUSE_INTR,
+ U2P_DISCTH,
+ U2P_PRE_EMPHASIS,
+};
+
+enum u3_phy_params {
+ U3P_EFUSE_EN = 0,
+ U3P_EFUSE_INTR,
+ U3P_EFUSE_TX_IMP,
+ U3P_EFUSE_RX_IMP,
+};
+
+static const char *const u2_phy_files[] = {
+ [U2P_EYE_VRT] = "vrt",
+ [U2P_EYE_TERM] = "term",
+ [U2P_EFUSE_EN] = "efuse",
+ [U2P_EFUSE_INTR] = "intr",
+ [U2P_DISCTH] = "discth",
+ [U2P_PRE_EMPHASIS] = "preemph",
+};
+
+static const char *const u3_phy_files[] = {
+ [U3P_EFUSE_EN] = "efuse",
+ [U3P_EFUSE_INTR] = "intr",
+ [U3P_EFUSE_TX_IMP] = "tx-imp",
+ [U3P_EFUSE_RX_IMP] = "rx-imp",
+};
+
+static int u2_phy_params_show(struct seq_file *sf, void *unused)
+{
+ struct mtk_phy_instance *inst = sf->private;
+ const char *fname = file_dentry(sf->file)->d_iname;
+ struct u2phy_banks *u2_banks = &inst->u2_banks;
+ void __iomem *com = u2_banks->com;
+ u32 max = 0;
+ u32 tmp = 0;
+ u32 val = 0;
+ int ret;
+
+ ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case U2P_EYE_VRT:
+ tmp = readl(com + U3P_USBPHYACR1);
+ val = FIELD_GET(PA1_RG_VRT_SEL, tmp);
+ max = FIELD_MAX(PA1_RG_VRT_SEL);
+ break;
+
+ case U2P_EYE_TERM:
+ tmp = readl(com + U3P_USBPHYACR1);
+ val = FIELD_GET(PA1_RG_TERM_SEL, tmp);
+ max = FIELD_MAX(PA1_RG_TERM_SEL);
+ break;
+
+ case U2P_EFUSE_EN:
+ if (u2_banks->misc) {
+ tmp = readl(u2_banks->misc + U3P_MISC_REG1);
+ max = 1;
+ }
+
+ val = !!(tmp & MR1_EFUSE_AUTO_LOAD_DIS);
+ break;
+
+ case U2P_EFUSE_INTR:
+ tmp = readl(com + U3P_USBPHYACR1);
+ val = FIELD_GET(PA1_RG_INTR_CAL, tmp);
+ max = FIELD_MAX(PA1_RG_INTR_CAL);
+ break;
+
+ case U2P_DISCTH:
+ tmp = readl(com + U3P_USBPHYACR6);
+ val = FIELD_GET(PA6_RG_U2_DISCTH, tmp);
+ max = FIELD_MAX(PA6_RG_U2_DISCTH);
+ break;
+
+ case U2P_PRE_EMPHASIS:
+ tmp = readl(com + U3P_USBPHYACR6);
+ val = FIELD_GET(PA6_RG_U2_PRE_EMP, tmp);
+ max = FIELD_MAX(PA6_RG_U2_PRE_EMP);
+ break;
+
+ default:
+ seq_printf(sf, "invalid, %d\n", ret);
+ break;
+ }
+
+ seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+
+ return 0;
+}
+
+static int u2_phy_params_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, u2_phy_params_show, inode->i_private);
+}
+
+static ssize_t u2_phy_params_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ const char *fname = file_dentry(file)->d_iname;
+ struct seq_file *sf = file->private_data;
+ struct mtk_phy_instance *inst = sf->private;
+ struct u2phy_banks *u2_banks = &inst->u2_banks;
+ void __iomem *com = u2_banks->com;
+ ssize_t rc;
+ u32 val;
+ int ret;
+
+ rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
+ if (rc)
+ return rc;
+
+ ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
+ if (ret < 0)
+ return (ssize_t)ret;
+
+ switch (ret) {
+ case U2P_EYE_VRT:
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, val);
+ break;
+
+ case U2P_EYE_TERM:
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL, val);
+ break;
+
+ case U2P_EFUSE_EN:
+ if (u2_banks->misc)
+ mtk_phy_update_field(u2_banks->misc + U3P_MISC_REG1,
+ MR1_EFUSE_AUTO_LOAD_DIS, !!val);
+ break;
+
+ case U2P_EFUSE_INTR:
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL, val);
+ break;
+
+ case U2P_DISCTH:
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH, val);
+ break;
+
+ case U2P_PRE_EMPHASIS:
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_PRE_EMP, val);
+ break;
+
+ default:
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations u2_phy_fops = {
+ .open = u2_phy_params_open,
+ .write = u2_phy_params_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void u2_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
+{
+ u32 count = ARRAY_SIZE(u2_phy_files);
+ int i;
+
+ for (i = 0; i < count; i++)
+ debugfs_create_file(u2_phy_files[i], 0644, inst->phy->debugfs,
+ inst, &u2_phy_fops);
+}
+
+static int u3_phy_params_show(struct seq_file *sf, void *unused)
+{
+ struct mtk_phy_instance *inst = sf->private;
+ const char *fname = file_dentry(sf->file)->d_iname;
+ struct u3phy_banks *u3_banks = &inst->u3_banks;
+ u32 val = 0;
+ u32 max = 0;
+ u32 tmp;
+ int ret;
+
+ ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case U3P_EFUSE_EN:
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
+ val = !!(tmp & P3D_RG_EFUSE_AUTO_LOAD_DIS);
+ max = 1;
+ break;
+
+ case U3P_EFUSE_INTR:
+ tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
+ val = FIELD_GET(P3A_RG_IEXT_INTR, tmp);
+ max = FIELD_MAX(P3A_RG_IEXT_INTR);
+ break;
+
+ case U3P_EFUSE_TX_IMP:
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
+ val = FIELD_GET(P3D_RG_TX_IMPEL, tmp);
+ max = FIELD_MAX(P3D_RG_TX_IMPEL);
+ break;
+
+ case U3P_EFUSE_RX_IMP:
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
+ val = FIELD_GET(P3D_RG_RX_IMPEL, tmp);
+ max = FIELD_MAX(P3D_RG_RX_IMPEL);
+ break;
+
+ default:
+ seq_printf(sf, "invalid, %d\n", ret);
+ break;
+ }
+
+ seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+
+ return 0;
+}
+
+static int u3_phy_params_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, u3_phy_params_show, inode->i_private);
+}
+
+static ssize_t u3_phy_params_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ const char *fname = file_dentry(file)->d_iname;
+ struct seq_file *sf = file->private_data;
+ struct mtk_phy_instance *inst = sf->private;
+ struct u3phy_banks *u3_banks = &inst->u3_banks;
+ void __iomem *phyd = u3_banks->phyd;
+ ssize_t rc;
+ u32 val;
+ int ret;
+
+ rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
+ if (rc)
+ return rc;
+
+ ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
+ if (ret < 0)
+ return (ssize_t)ret;
+
+ switch (ret) {
+ case U3P_EFUSE_EN:
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_RSV,
+ P3D_RG_EFUSE_AUTO_LOAD_DIS, !!val);
+ break;
+
+ case U3P_EFUSE_INTR:
+ mtk_phy_update_field(u3_banks->phya + U3P_U3_PHYA_REG0,
+ P3A_RG_IEXT_INTR, val);
+ break;
+
+ case U3P_EFUSE_TX_IMP:
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL, val);
+ mtk_phy_set_bits(phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
+ break;
+
+ case U3P_EFUSE_RX_IMP:
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL, val);
+ mtk_phy_set_bits(phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
+ break;
+
+ default:
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations u3_phy_fops = {
+ .open = u3_phy_params_open,
+ .write = u3_phy_params_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void u3_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
+{
+ u32 count = ARRAY_SIZE(u3_phy_files);
+ int i;
+
+ for (i = 0; i < count; i++)
+ debugfs_create_file(u3_phy_files[i], 0644, inst->phy->debugfs,
+ inst, &u3_phy_fops);
+}
+
+static int phy_type_show(struct seq_file *sf, void *unused)
+{
+ struct mtk_phy_instance *inst = sf->private;
+ const char *type;
+
+ switch (inst->type) {
+ case PHY_TYPE_USB2:
+ type = "USB2";
+ break;
+ case PHY_TYPE_USB3:
+ type = "USB3";
+ break;
+ case PHY_TYPE_PCIE:
+ type = "PCIe";
+ break;
+ case PHY_TYPE_SGMII:
+ type = "SGMII";
+ break;
+ case PHY_TYPE_SATA:
+ type = "SATA";
+ break;
+ default:
+ type = "";
+ }
+
+ seq_printf(sf, "%s\n", type);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(phy_type);
+
+/* these files will be removed when phy is released by phy core */
+static void phy_debugfs_init(struct mtk_phy_instance *inst)
+{
+ debugfs_create_file("type", 0444, inst->phy->debugfs, inst, &phy_type_fops);
+
+ switch (inst->type) {
+ case PHY_TYPE_USB2:
+ u2_phy_dbgfs_files_create(inst);
+ break;
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
+ u3_phy_dbgfs_files_create(inst);
+ break;
+ default:
+ break;
+ }
+}
+
+#else
+
+static void phy_debugfs_init(struct mtk_phy_instance *inst)
+{}
+
+#endif
+
static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
@@ -1140,6 +1495,7 @@ static struct phy *mtk_phy_xlate(struct device *dev,
phy_parse_property(tphy, instance);
phy_type_set(instance);
+ phy_debugfs_init(instance);
return instance->phy;
}
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index ab1b0986aa67..01bd5ea620c5 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -25,12 +25,17 @@
#define SPX5_SERDES_10G_START 13
#define SPX5_SERDES_25G_START 25
+#define SPX5_SERDES_6G10G_CNT SPX5_SERDES_25G_START
+
+/* Optimal power settings from GUC */
+#define SPX5_SERDES_QUIET_MODE_VAL 0x01ef4e0c
enum sparx5_10g28cmu_mode {
SPX5_SD10G28_CMU_MAIN = 0,
SPX5_SD10G28_CMU_AUX1 = 1,
SPX5_SD10G28_CMU_AUX2 = 3,
SPX5_SD10G28_CMU_NONE = 4,
+ SPX5_SD10G28_CMU_MAX,
};
enum sparx5_sd25g28_mode_preset_type {
@@ -922,6 +927,222 @@ static void sparx5_sd10g28_get_params(struct sparx5_serdes_macro *macro,
*params = init;
}
+static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
+ u32 cmu_idx,
+ void __iomem *cmu_tgt,
+ void __iomem *cmu_cfg_tgt,
+ u32 spd10g)
+{
+ void __iomem **regs = priv->regs;
+ struct device *dev = priv->dev;
+ int value;
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
+ SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
+ cmu_tgt,
+ SD_CMU_CMU_45(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
+ SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
+ cmu_tgt,
+ SD_CMU_CMU_47(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
+ SD_CMU_CMU_1B_CFG_RESERVE_7_0,
+ cmu_tgt,
+ SD_CMU_CMU_1B(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
+ SD_CMU_CMU_0D_CFG_JC_BYP,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
+ SD_CMU_CMU_1F_CFG_VTUNE_SEL,
+ cmu_tgt,
+ SD_CMU_CMU_1F(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_00(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_05(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
+ SD_CMU_CMU_30_R_PLL_DLOL_EN,
+ cmu_tgt,
+ SD_CMU_CMU_30(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
+ SD_CMU_CMU_09_CFG_SW_10G,
+ cmu_tgt,
+ SD_CMU_CMU_09(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ msleep(20);
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ msleep(20);
+
+ value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
+ value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
+
+ if (value) {
+ dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
+ return -EINVAL;
+ }
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
+ SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+ return 0;
+}
+
+static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
+{
+ void __iomem *cmu_tgt, *cmu_cfg_tgt;
+ u32 spd10g = 1;
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
+}
+
+/* Map of 6G/10G serdes mode and index to CMU index. */
+static const int
+sparx5_serdes_cmu_map[SPX5_SD10G28_CMU_MAX][SPX5_SERDES_6G10G_CNT] = {
+ [SPX5_SD10G28_CMU_MAIN] = { 2, 2, 2, 2, 2,
+ 2, 2, 2, 5, 5,
+ 5, 5, 5, 5, 5,
+ 5, 8, 11, 11, 11,
+ 11, 11, 11, 11, 11 },
+ [SPX5_SD10G28_CMU_AUX1] = { 0, 0, 3, 3, 3,
+ 3, 3, 3, 3, 3,
+ 6, 6, 6, 6, 6,
+ 6, 6, 9, 9, 12,
+ 12, 12, 12, 12, 12 },
+ [SPX5_SD10G28_CMU_AUX2] = { 1, 1, 1, 1, 4,
+ 4, 4, 4, 4, 4,
+ 4, 4, 7, 7, 7,
+ 7, 7, 10, 10, 10,
+ 10, 13, 13, 13, 13 },
+ [SPX5_SD10G28_CMU_NONE] = { 1, 1, 1, 1, 4,
+ 4, 4, 4, 4, 4,
+ 4, 4, 7, 7, 7,
+ 7, 7, 10, 10, 10,
+ 10, 13, 13, 13, 13 },
+};
+
+/* Get the index of the CMU which provides the clock for the specified serdes
+ * mode and index.
+ */
+static int sparx5_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index)
+{
+ return sparx5_serdes_cmu_map[mode][sd_index];
+}
+
+static void sparx5_serdes_cmu_power_off(struct sparx5_serdes_private *priv)
+{
+ void __iomem *cmu_inst, *cmu_cfg_inst;
+ int i;
+
+ /* Power down each CMU */
+ for (i = 0; i < SPX5_CMU_MAX; i++) {
+ cmu_inst = sdx5_inst_get(priv, TARGET_SD_CMU, i);
+ cmu_cfg_inst = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, i);
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, cmu_cfg_inst,
+ SD_CMU_CFG_SD_CMU_CFG(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(0),
+ SD_CMU_CMU_05_CFG_REFCK_TERM_EN, cmu_inst,
+ SD_CMU_CMU_05(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_DN_SET(0),
+ SD_CMU_CMU_09_CFG_EN_TX_CK_DN, cmu_inst,
+ SD_CMU_CMU_09(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_06_CFG_VCO_PD_SET(1),
+ SD_CMU_CMU_06_CFG_VCO_PD, cmu_inst,
+ SD_CMU_CMU_06(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(0),
+ SD_CMU_CMU_09_CFG_EN_TX_CK_UP, cmu_inst,
+ SD_CMU_CMU_09(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_08_CFG_CK_TREE_PD_SET(1),
+ SD_CMU_CMU_08_CFG_CK_TREE_PD, cmu_inst,
+ SD_CMU_CMU_08(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_REFCK_PD_SET(1) |
+ SD_CMU_CMU_0D_CFG_PD_DIV64_SET(1) |
+ SD_CMU_CMU_0D_CFG_PD_DIV66_SET(1),
+ SD_CMU_CMU_0D_CFG_REFCK_PD |
+ SD_CMU_CMU_0D_CFG_PD_DIV64 |
+ SD_CMU_CMU_0D_CFG_PD_DIV66, cmu_inst,
+ SD_CMU_CMU_0D(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_SET(1),
+ SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, cmu_inst,
+ SD_CMU_CMU_06(0));
+ }
+}
+
static void sparx5_sd25g28_reset(void __iomem *regs[],
struct sparx5_sd25g28_params *params,
u32 sd_index)
@@ -1422,7 +1643,17 @@ static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro,
u32 lane_index = macro->sidx;
u32 sd_index = macro->stpidx;
void __iomem *sd_inst;
- u32 value;
+ u32 value, cmu_idx;
+ int err;
+
+ /* Do not configure serdes if CMU is not to be configured too */
+ if (params->skip_cmu_cfg)
+ return 0;
+
+ cmu_idx = sparx5_serdes_cmu_get(params->cmu_sel, lane_index);
+ err = sparx5_cmu_cfg(priv, cmu_idx);
+ if (err)
+ return err;
if (params->is_6g)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, sd_index);
@@ -1884,6 +2115,7 @@ static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
.rxinvert = 1,
.txswing = 240,
.reg_rst = reset,
+ .skip_cmu_cfg = reset,
};
int err;
@@ -1899,7 +2131,7 @@ static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
{
struct sparx5_serdes_private *priv = macro->priv;
- void __iomem *sd_inst;
+ void __iomem *sd_inst, *sd_lane_inst;
if (macro->serdestype == SPX5_SDT_6G)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, macro->stpidx);
@@ -1909,12 +2141,36 @@ static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
sd_inst = sdx5_inst_get(priv, TARGET_SD25G_LANE, macro->stpidx);
if (macro->serdestype == SPX5_SDT_25G) {
+ sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE_25G,
+ macro->stpidx);
+ /* Take serdes out of reset */
+ sdx5_inst_rmw(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
+ SD_LANE_25G_SD_LANE_CFG(0));
+
+ /* Configure optimal settings for quiet mode */
+ sdx5_inst_rmw(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
+ SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE,
+ sd_lane_inst, SD_LANE_25G_QUIET_MODE_6G(0));
+
sdx5_inst_rmw(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(pwdn),
SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER,
sd_inst,
SD25G_LANE_LANE_04(0));
} else {
/* 6G and 10G */
+ sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE, macro->sidx);
+
+ /* Take serdes out of reset */
+ sdx5_inst_rmw(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
+ SD_LANE_SD_LANE_CFG(0));
+
+ /* Configure optimal settings for quiet mode */
+ sdx5_inst_rmw(SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
+ SD_LANE_QUIET_MODE_6G_QUIET_MODE, sd_lane_inst,
+ SD_LANE_QUIET_MODE_6G(0));
+
sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(pwdn),
SD10G_LANE_LANE_06_CFG_PD_DRIVER,
sd_inst,
@@ -1939,159 +2195,6 @@ static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro)
return 0;
}
-static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
- u32 cmu_idx,
- void __iomem *cmu_tgt,
- void __iomem *cmu_cfg_tgt,
- u32 spd10g)
-{
- void __iomem **regs = priv->regs;
- struct device *dev = priv->dev;
- int value;
-
- cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
- cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
-
- if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
- cmu_idx == 10 || cmu_idx == 13) {
- spd10g = 0;
- }
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
- SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
- SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
- SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
- SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
- SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
- SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
- SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
- SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
- cmu_tgt,
- SD_CMU_CMU_45(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
- SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
- cmu_tgt,
- SD_CMU_CMU_47(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
- SD_CMU_CMU_1B_CFG_RESERVE_7_0,
- cmu_tgt,
- SD_CMU_CMU_1B(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
- SD_CMU_CMU_0D_CFG_JC_BYP,
- cmu_tgt,
- SD_CMU_CMU_0D(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
- SD_CMU_CMU_1F_CFG_VTUNE_SEL,
- cmu_tgt,
- SD_CMU_CMU_1F(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
- SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
- cmu_tgt,
- SD_CMU_CMU_00(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
- SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
- cmu_tgt,
- SD_CMU_CMU_05(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
- SD_CMU_CMU_30_R_PLL_DLOL_EN,
- cmu_tgt,
- SD_CMU_CMU_30(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
- SD_CMU_CMU_09_CFG_SW_10G,
- cmu_tgt,
- SD_CMU_CMU_09(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
- SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- msleep(20);
-
- sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
- SD_CMU_CMU_44_R_PLL_RSTN,
- cmu_tgt,
- SD_CMU_CMU_44(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
- SD_CMU_CMU_44_R_PLL_RSTN,
- cmu_tgt,
- SD_CMU_CMU_44(cmu_idx));
-
- msleep(20);
-
- value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
- value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
-
- if (value) {
- dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
- return -EINVAL;
- }
- sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
- SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
- cmu_tgt,
- SD_CMU_CMU_0D(cmu_idx));
- return 0;
-}
-
-static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
-{
- void __iomem *cmu_tgt, *cmu_cfg_tgt;
- u32 spd10g = 1;
-
- if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
- cmu_idx == 10 || cmu_idx == 13) {
- spd10g = 0;
- }
-
- cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
- cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
-
- return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
-}
-
-static int sparx5_serdes_cmu_enable(struct sparx5_serdes_private *priv)
-{
- int idx, err = 0;
-
- if (!priv->cmu_enabled) {
- for (idx = 0; idx < SPX5_CMU_MAX; idx++) {
- err = sparx5_cmu_cfg(priv, idx);
- if (err) {
- dev_err(priv->dev, "CMU %u, error: %d\n", idx, err);
- goto leave;
- }
- }
- priv->cmu_enabled = true;
- }
-leave:
- return err;
-}
-
static int sparx5_serdes_get_serdesmode(phy_interface_t portmode, int speed)
{
switch (portmode) {
@@ -2120,10 +2223,6 @@ static int sparx5_serdes_config(struct sparx5_serdes_macro *macro)
int serdesmode;
int err;
- err = sparx5_serdes_cmu_enable(macro->priv);
- if (err)
- return err;
-
serdesmode = sparx5_serdes_get_serdesmode(macro->portmode, macro->speed);
if (serdesmode < 0) {
dev_err(dev, "SerDes %u, interface not supported: %s\n",
@@ -2215,9 +2314,6 @@ static int sparx5_serdes_reset(struct phy *phy)
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
int err;
- err = sparx5_serdes_cmu_enable(macro->priv);
- if (err)
- return err;
if (macro->serdestype == SPX5_SDT_25G)
err = sparx5_sd25g28_config(macro, true);
else
@@ -2308,6 +2404,9 @@ static int sparx5_phy_create(struct sparx5_serdes_private *priv,
phy_set_drvdata(*phy, macro);
+ /* Power off serdes by default */
+ sparx5_serdes_power_off(*phy);
+
return 0;
}
@@ -2491,6 +2590,9 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
return err;
}
+ /* Power down all CMUs by default */
+ sparx5_serdes_cmu_power_off(priv);
+
provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate);
return PTR_ERR_OR_ZERO(provider);
diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h
index 0a3e496e6210..13f94a29225a 100644
--- a/drivers/phy/microchip/sparx5_serdes.h
+++ b/drivers/phy/microchip/sparx5_serdes.h
@@ -30,7 +30,6 @@ struct sparx5_serdes_private {
struct device *dev;
void __iomem *regs[NUM_TARGETS];
struct phy *phys[SPX5_SERDES_MAX];
- bool cmu_enabled;
unsigned long coreclock;
};
diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h
index b96386a4df5a..d0543fd3dc94 100644
--- a/drivers/phy/microchip/sparx5_serdes_regs.h
+++ b/drivers/phy/microchip/sparx5_serdes_regs.h
@@ -2149,6 +2149,92 @@ enum sparx5_serdes_target {
#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\
FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */
+#define SD_CMU_CMU_06(t) \
+ __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 4, 0, 1, 4)
+
+#define SD_CMU_CMU_06_CFG_DISLOS BIT(0)
+#define SD_CMU_CMU_06_CFG_DISLOS_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DISLOS, x)
+#define SD_CMU_CMU_06_CFG_DISLOS_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DISLOS, x)
+
+#define SD_CMU_CMU_06_CFG_DISLOL BIT(1)
+#define SD_CMU_CMU_06_CFG_DISLOL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DISLOL, x)
+#define SD_CMU_CMU_06_CFG_DISLOL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DISLOL, x)
+
+#define SD_CMU_CMU_06_CFG_DCLOL BIT(2)
+#define SD_CMU_CMU_06_CFG_DCLOL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DCLOL, x)
+#define SD_CMU_CMU_06_CFG_DCLOL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DCLOL, x)
+
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT BIT(3)
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_FORCE_RX_FILT, x)
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_FORCE_RX_FILT, x)
+
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD BIT(4)
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, x)
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_PD BIT(5)
+#define SD_CMU_CMU_06_CFG_VCO_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_PD, x)
+#define SD_CMU_CMU_06_CFG_VCO_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_PD, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN BIT(6)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_CAL_RESETN, x)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_RESETN, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP BIT(7)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */
+#define SD_CMU_CMU_08(t) \
+ __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 12, 0, 1, 4)
+
+#define SD_CMU_CMU_08_CFG_VFILT2PAD BIT(0)
+#define SD_CMU_CMU_08_CFG_VFILT2PAD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_VFILT2PAD, x)
+#define SD_CMU_CMU_08_CFG_VFILT2PAD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_VFILT2PAD, x)
+
+#define SD_CMU_CMU_08_CFG_EN_DUMMY BIT(1)
+#define SD_CMU_CMU_08_CFG_EN_DUMMY_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_EN_DUMMY, x)
+#define SD_CMU_CMU_08_CFG_EN_DUMMY_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_EN_DUMMY, x)
+
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD BIT(2)
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_CK_TREE_PD, x)
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_CK_TREE_PD, x)
+
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN BIT(3)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN, x)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN, x)
+
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN BIT(4)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x)
+
/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */
#define SD_CMU_CMU_09(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4)
@@ -2443,6 +2529,16 @@ enum sparx5_serdes_target {
#define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\
FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */
+#define SD_LANE_QUIET_MODE_6G(t) \
+ __REG(TARGET_SD_LANE, t, 25, 24, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0)
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(x)\
+ FIELD_PREP(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x)
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE_GET(x)\
+ FIELD_GET(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x)
+
/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */
#define SD_LANE_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4)
@@ -2692,4 +2788,14 @@ enum sparx5_serdes_target {
#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\
FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */
+#define SD_LANE_25G_QUIET_MODE_6G(t) \
+ __REG(TARGET_SD_LANE_25G, t, 8, 28, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0)
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE, x)
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_GET(x)\
+ FIELD_GET(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE, x)
+
#endif /* _SPARX5_SERDES_REGS_H_ */
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 6464dcb56d56..96a0b1e111f3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -20,6 +21,7 @@
#include <linux/regulator/consumer.h>
static struct class *phy_class;
+static struct dentry *phy_debugfs_root;
static DEFINE_MUTEX(phy_provider_mutex);
static LIST_HEAD(phy_provider_list);
static LIST_HEAD(phys);
@@ -996,6 +998,8 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
pm_runtime_no_callbacks(&phy->dev);
}
+ phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root);
+
return phy;
put_dev:
@@ -1226,6 +1230,7 @@ static void phy_release(struct device *dev)
phy = to_phy(dev);
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
+ debugfs_remove_recursive(phy->debugfs);
regulator_put(phy->pwr);
ida_simple_remove(&phy_ida, phy->id);
kfree(phy);
@@ -1242,6 +1247,15 @@ static int __init phy_core_init(void)
phy_class->dev_release = phy_release;
+ phy_debugfs_root = debugfs_create_dir("phy", NULL);
+
return 0;
}
device_initcall(phy_core_init);
+
+static void __exit phy_core_exit(void)
+{
+ debugfs_remove_recursive(phy_debugfs_root);
+ class_destroy(phy_class);
+}
+module_exit(phy_core_exit);
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 4850d48f31fa..97ca5952e34e 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -59,8 +59,11 @@ if PHY_QCOM_QMP
config PHY_QCOM_QMP_COMBO
tristate "Qualcomm QMP Combo PHY Driver"
default PHY_QCOM_QMP
+ depends on TYPEC || TYPEC=n
+ depends on DRM || DRM=n
select GENERIC_PHY
select MFD_SYSCON
+ select DRM_PANEL_BRIDGE if DRM
help
Enable this to support the QMP Combo PHY transceiver that is used
with USB3 and DisplayPort controllers on Qualcomm chips.
@@ -185,3 +188,12 @@ config PHY_QCOM_IPQ806X_USB
This option enables support for the Synopsis PHYs present inside the
Qualcomm USB3.0 DWC3 controller on ipq806x SoC. This driver supports
both HS and SS PHY controllers.
+
+config PHY_QCOM_SGMII_ETH
+ tristate "Qualcomm DWMAC SGMII SerDes/PHY driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the internal SerDes/SGMII PHY on various
+ Qualcomm chipsets.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index de3dc9ccf067..b030858e0f8d 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
obj-$(CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2)+= phy-qcom-snps-femto-v2.o
-obj-$(CONFIG_PHY_QCOM_IPQ806X_USB) += phy-qcom-ipq806x-usb.o
+obj-$(CONFIG_PHY_QCOM_IPQ806X_USB) += phy-qcom-ipq806x-usb.o
+obj-$(CONFIG_PHY_QCOM_SGMII_ETH) += phy-qcom-sgmii-eth.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 87b17e5877ab..bebce8c591a3 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -19,6 +19,10 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_mux.h>
+
+#include <drm/drm_bridge.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -63,6 +67,10 @@
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+/* QPHY_V3_DP_COM_TYPEC_CTRL register bits */
+#define SW_PORTSELECT_VAL BIT(0)
+#define SW_PORTSELECT_MUX BIT(1)
+
#define PHY_INIT_COMPLETE_TIMEOUT 10000
struct qmp_phy_init_tbl {
@@ -1315,14 +1323,21 @@ struct qmp_combo {
struct phy *usb_phy;
enum phy_mode mode;
+ unsigned int usb_init_count;
struct phy *dp_phy;
unsigned int dp_aux_cfg;
struct phy_configure_opts_dp dp_opts;
+ unsigned int dp_init_count;
struct clk_fixed_rate pipe_clk_fixed;
struct clk_hw dp_link_hw;
struct clk_hw dp_pixel_hw;
+
+ struct drm_bridge bridge;
+
+ struct typec_switch_dev *sw;
+ enum typec_orientation orientation;
};
static void qmp_v3_dp_aux_init(struct qmp_combo *qmp);
@@ -1954,30 +1969,24 @@ static void qmp_v3_configure_dp_tx(struct qmp_combo *qmp)
static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
{
+ bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
+ const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 val;
- bool reverse = false;
val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
- /*
- * TODO: Assume orientation is CC1 for now and two lanes, need to
- * use type-c connector to understand orientation and lanes.
- *
- * Otherwise val changes to be like below if this code understood
- * the orientation of the type-c cable.
- *
- * if (lane_cnt == 4 || orientation == ORIENTATION_CC2)
- * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN;
- * if (lane_cnt == 4 || orientation == ORIENTATION_CC1)
- * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
- * if (orientation == ORIENTATION_CC2)
- * writel(0x4c, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_MODE);
- */
- val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+ if (dp_opts->lanes == 4 || reverse)
+ val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN;
+ if (dp_opts->lanes == 4 || !reverse)
+ val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
- writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
+ if (reverse)
+ writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ else
+ writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2142,6 +2151,7 @@ static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp)
static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
unsigned int com_resetm_ctrl_reg,
unsigned int com_c_ready_status_reg,
+ unsigned int com_cmn_status_reg,
unsigned int dp_phy_status_reg)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
@@ -2198,14 +2208,14 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_CMN_STATUS,
+ if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_CMN_STATUS,
+ if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
status,
((status & BIT(1)) > 0),
500,
@@ -2233,14 +2243,15 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
{
+ bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse = false;
u32 status;
int ret;
ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
QSERDES_V4_COM_C_READY_STATUS,
+ QSERDES_V4_COM_CMN_STATUS,
QSERDES_V4_DP_PHY_STATUS);
if (ret < 0)
return ret;
@@ -2297,14 +2308,15 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
{
+ bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse = false;
u32 status;
int ret;
ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
QSERDES_V4_COM_C_READY_STATUS,
+ QSERDES_V4_COM_CMN_STATUS,
QSERDES_V4_DP_PHY_STATUS);
if (ret < 0)
return ret;
@@ -2356,14 +2368,15 @@ static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp)
{
+ bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse = false;
u32 status;
int ret;
ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V6_COM_RESETSM_CNTRL,
QSERDES_V6_COM_C_READY_STATUS,
+ QSERDES_V6_COM_CMN_STATUS,
QSERDES_V6_DP_PHY_STATUS);
if (ret < 0)
return ret;
@@ -2437,12 +2450,16 @@ static int qmp_combo_dp_configure(struct phy *phy, union phy_configure_opts *opt
struct qmp_combo *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
+ mutex_lock(&qmp->phy_mutex);
+
memcpy(&qmp->dp_opts, dp_opts, sizeof(*dp_opts));
if (qmp->dp_opts.set_voltages) {
cfg->configure_dp_tx(qmp);
qmp->dp_opts.set_voltages = 0;
}
+ mutex_unlock(&qmp->phy_mutex);
+
return 0;
}
@@ -2450,24 +2467,27 @@ static int qmp_combo_dp_calibrate(struct phy *phy)
{
struct qmp_combo *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ mutex_lock(&qmp->phy_mutex);
if (cfg->calibrate_dp_phy)
- return cfg->calibrate_dp_phy(qmp);
+ ret = cfg->calibrate_dp_phy(qmp);
- return 0;
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
}
-static int qmp_combo_com_init(struct qmp_combo *qmp)
+static int qmp_combo_com_init(struct qmp_combo *qmp, bool force)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *com = qmp->com;
int ret;
+ u32 val;
- mutex_lock(&qmp->phy_mutex);
- if (qmp->init_count++) {
- mutex_unlock(&qmp->phy_mutex);
+ if (!force && qmp->init_count++)
return 0;
- }
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
@@ -2498,10 +2518,12 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
-
- qphy_setbits(com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE);
+ /* Use software based port select and switch on typec orientation */
+ val = SW_PORTSELECT_MUX;
+ if (qmp->orientation == TYPEC_ORIENTATION_REVERSE)
+ val |= SW_PORTSELECT_VAL;
+ writel(val, com + QPHY_V3_DP_COM_TYPEC_CTRL);
+ writel(USB3_MODE | DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
/* bring both QMP USB and QMP DP PHYs PCS block out of reset */
qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
@@ -2514,8 +2536,6 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
qphy_setbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
SW_PWRDN);
- mutex_unlock(&qmp->phy_mutex);
-
return 0;
err_assert_reset:
@@ -2524,20 +2544,16 @@ err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
err_decrement_count:
qmp->init_count--;
- mutex_unlock(&qmp->phy_mutex);
return ret;
}
-static int qmp_combo_com_exit(struct qmp_combo *qmp)
+static int qmp_combo_com_exit(struct qmp_combo *qmp, bool force)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
- mutex_lock(&qmp->phy_mutex);
- if (--qmp->init_count) {
- mutex_unlock(&qmp->phy_mutex);
+ if (!force && --qmp->init_count)
return 0;
- }
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -2545,8 +2561,6 @@ static int qmp_combo_com_exit(struct qmp_combo *qmp)
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
- mutex_unlock(&qmp->phy_mutex);
-
return 0;
}
@@ -2556,20 +2570,32 @@ static int qmp_combo_dp_init(struct phy *phy)
const struct qmp_phy_cfg *cfg = qmp->cfg;
int ret;
- ret = qmp_combo_com_init(qmp);
+ mutex_lock(&qmp->phy_mutex);
+
+ ret = qmp_combo_com_init(qmp, false);
if (ret)
- return ret;
+ goto out_unlock;
cfg->dp_aux_init(qmp);
- return 0;
+ qmp->dp_init_count++;
+
+out_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+ return ret;
}
static int qmp_combo_dp_exit(struct phy *phy)
{
struct qmp_combo *qmp = phy_get_drvdata(phy);
- qmp_combo_com_exit(qmp);
+ mutex_lock(&qmp->phy_mutex);
+
+ qmp_combo_com_exit(qmp, false);
+
+ qmp->dp_init_count--;
+
+ mutex_unlock(&qmp->phy_mutex);
return 0;
}
@@ -2581,6 +2607,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
void __iomem *tx = qmp->dp_tx;
void __iomem *tx2 = qmp->dp_tx2;
+ mutex_lock(&qmp->phy_mutex);
+
qmp_combo_dp_serdes_init(qmp);
qmp_combo_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
@@ -2592,6 +2620,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
/* Configure link rate, swing, etc. */
cfg->configure_dp_phy(qmp);
+ mutex_unlock(&qmp->phy_mutex);
+
return 0;
}
@@ -2599,9 +2629,13 @@ static int qmp_combo_dp_power_off(struct phy *phy)
{
struct qmp_combo *qmp = phy_get_drvdata(phy);
+ mutex_lock(&qmp->phy_mutex);
+
/* Assert DP PHY power down */
writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+ mutex_unlock(&qmp->phy_mutex);
+
return 0;
}
@@ -2687,14 +2721,21 @@ static int qmp_combo_usb_init(struct phy *phy)
struct qmp_combo *qmp = phy_get_drvdata(phy);
int ret;
- ret = qmp_combo_com_init(qmp);
+ mutex_lock(&qmp->phy_mutex);
+ ret = qmp_combo_com_init(qmp, false);
if (ret)
- return ret;
+ goto out_unlock;
ret = qmp_combo_usb_power_on(phy);
- if (ret)
- qmp_combo_com_exit(qmp);
+ if (ret) {
+ qmp_combo_com_exit(qmp, false);
+ goto out_unlock;
+ }
+
+ qmp->usb_init_count++;
+out_unlock:
+ mutex_unlock(&qmp->phy_mutex);
return ret;
}
@@ -2703,11 +2744,20 @@ static int qmp_combo_usb_exit(struct phy *phy)
struct qmp_combo *qmp = phy_get_drvdata(phy);
int ret;
+ mutex_lock(&qmp->phy_mutex);
ret = qmp_combo_usb_power_off(phy);
if (ret)
- return ret;
+ goto out_unlock;
+
+ ret = qmp_combo_com_exit(qmp, false);
+ if (ret)
+ goto out_unlock;
+
+ qmp->usb_init_count--;
- return qmp_combo_com_exit(qmp);
+out_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+ return ret;
}
static int qmp_combo_usb_set_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -3173,6 +3223,103 @@ static int qmp_combo_register_clocks(struct qmp_combo *qmp, struct device_node *
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, dp_np);
}
+#if IS_ENABLED(CONFIG_TYPEC)
+static int qmp_combo_typec_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct qmp_combo *qmp = typec_switch_get_drvdata(sw);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ if (orientation == qmp->orientation || orientation == TYPEC_ORIENTATION_NONE)
+ return 0;
+
+ mutex_lock(&qmp->phy_mutex);
+ qmp->orientation = orientation;
+
+ if (qmp->init_count) {
+ if (qmp->usb_init_count)
+ qmp_combo_usb_power_off(qmp->usb_phy);
+ qmp_combo_com_exit(qmp, true);
+
+ qmp_combo_com_init(qmp, true);
+ if (qmp->usb_init_count)
+ qmp_combo_usb_power_on(qmp->usb_phy);
+ if (qmp->dp_init_count)
+ cfg->dp_aux_init(qmp);
+ }
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+static void qmp_combo_typec_unregister(void *data)
+{
+ struct qmp_combo *qmp = data;
+
+ typec_switch_unregister(qmp->sw);
+}
+
+static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+{
+ struct typec_switch_desc sw_desc = {};
+ struct device *dev = qmp->dev;
+
+ sw_desc.drvdata = qmp;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = qmp_combo_typec_switch_set;
+ qmp->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(qmp->sw)) {
+ dev_err(dev, "Unable to register typec switch: %pe\n", qmp->sw);
+ return PTR_ERR(qmp->sw);
+ }
+
+ return devm_add_action_or_reset(dev, qmp_combo_typec_unregister, qmp);
+}
+#else
+static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM)
+static int qmp_combo_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge);
+ struct drm_bridge *next_bridge;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
+ return PTR_ERR(next_bridge);
+ }
+
+ return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs qmp_combo_bridge_funcs = {
+ .attach = qmp_combo_bridge_attach,
+};
+
+static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
+{
+ qmp->bridge.funcs = &qmp_combo_bridge_funcs;
+ qmp->bridge.of_node = qmp->dev->of_node;
+
+ return devm_drm_bridge_add(qmp->dev, &qmp->bridge);
+}
+#else
+static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
+{
+ return 0;
+}
+#endif
+
static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np)
{
struct device *dev = qmp->dev;
@@ -3353,6 +3500,8 @@ static int qmp_combo_probe(struct platform_device *pdev)
qmp->dev = dev;
+ qmp->orientation = TYPEC_ORIENTATION_NORMAL;
+
qmp->cfg = of_device_get_match_data(dev);
if (!qmp->cfg)
return -EINVAL;
@@ -3371,6 +3520,14 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = qmp_combo_typec_switch_register(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_combo_dp_register_bridge(qmp);
+ if (ret)
+ return ret;
+
/* Check for legacy binding with child nodes. */
usb_np = of_get_child_by_name(dev->of_node, "usb3-phy");
if (usb_np) {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index a49711c5a63d..466f0a56c82e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -139,6 +139,88 @@ static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
};
+static const struct qmp_phy_init_tbl ipq9574_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x05),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+};
+
static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -1408,12 +1490,36 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
+ u16 pcs_misc;
u16 pcs_usb;
u16 tx;
u16 rx;
+ /* for PHYs with >= 2 lanes */
+ u16 tx2;
+ u16 rx2;
};
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -1558,6 +1664,24 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
+ .serdes = 0,
+ .pcs = 0x800,
+ .pcs_usb = 0x800,
+ .tx = 0x200,
+ .rx = 0x400,
+};
+
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+ .serdes = 0,
+ .pcs = 0xc00,
+ .pcs_misc = 0xa00,
+ .tx = 0x200,
+ .rx = 0x400,
+ .tx2 = 0x600,
+ .rx2 = 0x800,
+};
+
static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
.serdes = 0,
.pcs = 0x0200,
@@ -1586,6 +1710,28 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.regs = qmp_v3_usb3phy_regs_layout,
};
+static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_usb_offsets_ipq9574,
+
+ .serdes_tbl = ipq9574_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
+ .tx_tbl = ipq9574_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq9574_usb3_tx_tbl),
+ .rx_tbl = ipq9574_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq9574_usb3_rx_tbl),
+ .pcs_tbl = ipq9574_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq9574_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = qcm2290_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
@@ -1629,6 +1775,28 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.has_phy_dp_com_ctrl = true,
};
+static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_usb_offsets_v5,
+
+ .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sc8280xp_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_tx_tbl),
+ .rx_tbl = sc8280xp_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = qcm2290_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v5_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.lanes = 2,
@@ -1922,6 +2090,8 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.lanes = 2,
+ .offsets = &qmp_usb_offsets_v3,
+
.serdes_tbl = qcm2290_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
.tx_tbl = qcm2290_usb3_tx_tbl,
@@ -2493,10 +2663,16 @@ static int qmp_usb_parse_dt(struct qmp_usb *qmp)
qmp->serdes = base + offs->serdes;
qmp->pcs = base + offs->pcs;
+ qmp->pcs_misc = base + offs->pcs_misc;
qmp->pcs_usb = base + offs->pcs_usb;
qmp->tx = base + offs->tx;
qmp->rx = base + offs->rx;
+ if (cfg->lanes >= 2) {
+ qmp->tx2 = base + offs->tx2;
+ qmp->rx2 = base + offs->rx2;
+ }
+
qmp->pipe_clk = devm_clk_get(dev, "pipe");
if (IS_ERR(qmp->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
@@ -2589,6 +2765,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-usb3-phy",
.data = &ipq8074_usb3phy_cfg,
}, {
+ .compatible = "qcom,ipq9574-qmp-usb3-phy",
+ .data = &ipq9574_usb3phy_cfg,
+ }, {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
@@ -2598,6 +2777,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
}, {
+ .compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
+ .data = &sa8775p_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sc7180-qmp-usb3-phy",
.data = &sc7180_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 2ef638b32e8f..bec6e40d5280 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -912,6 +912,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,ipq8074-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
+ .compatible = "qcom,ipq9574-qusb2-phy",
+ .data = &ipq6018_phy_cfg,
+ }, {
.compatible = "qcom,msm8953-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c b/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c
new file mode 100644
index 000000000000..03dc753f0de1
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-sgmii-eth.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define QSERDES_QMP_PLL 0x0
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 (QSERDES_QMP_PLL + 0x1ac)
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 (QSERDES_QMP_PLL + 0x1b0)
+#define QSERDES_COM_BIN_VCOCAL_HSCLK_SEL (QSERDES_QMP_PLL + 0x1bc)
+#define QSERDES_COM_CORE_CLK_EN (QSERDES_QMP_PLL + 0x174)
+#define QSERDES_COM_CORECLK_DIV_MODE0 (QSERDES_QMP_PLL + 0x168)
+#define QSERDES_COM_CP_CTRL_MODE0 (QSERDES_QMP_PLL + 0x74)
+#define QSERDES_COM_DEC_START_MODE0 (QSERDES_QMP_PLL + 0xbc)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 (QSERDES_QMP_PLL + 0xcc)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 (QSERDES_QMP_PLL + 0xd0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 (QSERDES_QMP_PLL + 0xd4)
+#define QSERDES_COM_HSCLK_HS_SWITCH_SEL (QSERDES_QMP_PLL + 0x15c)
+#define QSERDES_COM_HSCLK_SEL (QSERDES_QMP_PLL + 0x158)
+#define QSERDES_COM_LOCK_CMP1_MODE0 (QSERDES_QMP_PLL + 0xac)
+#define QSERDES_COM_LOCK_CMP2_MODE0 (QSERDES_QMP_PLL + 0xb0)
+#define QSERDES_COM_PLL_CCTRL_MODE0 (QSERDES_QMP_PLL + 0x84)
+#define QSERDES_COM_PLL_IVCO (QSERDES_QMP_PLL + 0x58)
+#define QSERDES_COM_PLL_RCTRL_MODE0 (QSERDES_QMP_PLL + 0x7c)
+#define QSERDES_COM_SYSCLK_EN_SEL (QSERDES_QMP_PLL + 0x94)
+#define QSERDES_COM_VCO_TUNE1_MODE0 (QSERDES_QMP_PLL + 0x110)
+#define QSERDES_COM_VCO_TUNE2_MODE0 (QSERDES_QMP_PLL + 0x114)
+#define QSERDES_COM_VCO_TUNE_INITVAL2 (QSERDES_QMP_PLL + 0x124)
+#define QSERDES_COM_C_READY_STATUS (QSERDES_QMP_PLL + 0x178)
+#define QSERDES_COM_CMN_STATUS (QSERDES_QMP_PLL + 0x140)
+
+#define QSERDES_RX 0x600
+#define QSERDES_RX_UCDR_FO_GAIN (QSERDES_RX + 0x8)
+#define QSERDES_RX_UCDR_SO_GAIN (QSERDES_RX + 0x14)
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN (QSERDES_RX + 0x30)
+#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE (QSERDES_RX + 0x34)
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW (QSERDES_RX + 0x3c)
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH (QSERDES_RX + 0x40)
+#define QSERDES_RX_UCDR_PI_CONTROLS (QSERDES_RX + 0x44)
+#define QSERDES_RX_UCDR_PI_CTRL2 (QSERDES_RX + 0x48)
+#define QSERDES_RX_RX_TERM_BW (QSERDES_RX + 0x80)
+#define QSERDES_RX_VGA_CAL_CNTRL2 (QSERDES_RX + 0xd8)
+#define QSERDES_RX_GM_CAL (QSERDES_RX + 0xdc)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 (QSERDES_RX + 0xe8)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 (QSERDES_RX + 0xec)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 (QSERDES_RX + 0xf0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 (QSERDES_RX + 0xf4)
+#define QSERDES_RX_RX_IDAC_TSETTLE_LOW (QSERDES_RX + 0xf8)
+#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH (QSERDES_RX + 0xfc)
+#define QSERDES_RX_RX_IDAC_MEASURE_TIME (QSERDES_RX + 0x100)
+#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 (QSERDES_RX + 0x110)
+#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 (QSERDES_RX + 0x114)
+#define QSERDES_RX_SIGDET_CNTRL (QSERDES_RX + 0x11c)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL (QSERDES_RX + 0x124)
+#define QSERDES_RX_RX_BAND (QSERDES_RX + 0x128)
+#define QSERDES_RX_RX_MODE_00_LOW (QSERDES_RX + 0x15c)
+#define QSERDES_RX_RX_MODE_00_HIGH (QSERDES_RX + 0x160)
+#define QSERDES_RX_RX_MODE_00_HIGH2 (QSERDES_RX + 0x164)
+#define QSERDES_RX_RX_MODE_00_HIGH3 (QSERDES_RX + 0x168)
+#define QSERDES_RX_RX_MODE_00_HIGH4 (QSERDES_RX + 0x16c)
+#define QSERDES_RX_RX_MODE_01_LOW (QSERDES_RX + 0x170)
+#define QSERDES_RX_RX_MODE_01_HIGH (QSERDES_RX + 0x174)
+#define QSERDES_RX_RX_MODE_01_HIGH2 (QSERDES_RX + 0x178)
+#define QSERDES_RX_RX_MODE_01_HIGH3 (QSERDES_RX + 0x17c)
+#define QSERDES_RX_RX_MODE_01_HIGH4 (QSERDES_RX + 0x180)
+#define QSERDES_RX_RX_MODE_10_LOW (QSERDES_RX + 0x184)
+#define QSERDES_RX_RX_MODE_10_HIGH (QSERDES_RX + 0x188)
+#define QSERDES_RX_RX_MODE_10_HIGH2 (QSERDES_RX + 0x18c)
+#define QSERDES_RX_RX_MODE_10_HIGH3 (QSERDES_RX + 0x190)
+#define QSERDES_RX_RX_MODE_10_HIGH4 (QSERDES_RX + 0x194)
+#define QSERDES_RX_DCC_CTRL1 (QSERDES_RX + 0x1a8)
+
+#define QSERDES_TX 0x400
+#define QSERDES_TX_TX_BAND (QSERDES_TX + 0x24)
+#define QSERDES_TX_SLEW_CNTL (QSERDES_TX + 0x28)
+#define QSERDES_TX_RES_CODE_LANE_OFFSET_TX (QSERDES_TX + 0x3c)
+#define QSERDES_TX_RES_CODE_LANE_OFFSET_RX (QSERDES_TX + 0x40)
+#define QSERDES_TX_LANE_MODE_1 (QSERDES_TX + 0x84)
+#define QSERDES_TX_LANE_MODE_3 (QSERDES_TX + 0x8c)
+#define QSERDES_TX_RCV_DETECT_LVL_2 (QSERDES_TX + 0xa4)
+#define QSERDES_TX_TRAN_DRVR_EMP_EN (QSERDES_TX + 0xc0)
+
+#define QSERDES_PCS 0xC00
+#define QSERDES_PCS_PHY_START (QSERDES_PCS + 0x0)
+#define QSERDES_PCS_POWER_DOWN_CONTROL (QSERDES_PCS + 0x4)
+#define QSERDES_PCS_SW_RESET (QSERDES_PCS + 0x8)
+#define QSERDES_PCS_LINE_RESET_TIME (QSERDES_PCS + 0xc)
+#define QSERDES_PCS_TX_LARGE_AMP_DRV_LVL (QSERDES_PCS + 0x20)
+#define QSERDES_PCS_TX_SMALL_AMP_DRV_LVL (QSERDES_PCS + 0x28)
+#define QSERDES_PCS_TX_MID_TERM_CTRL1 (QSERDES_PCS + 0xd8)
+#define QSERDES_PCS_TX_MID_TERM_CTRL2 (QSERDES_PCS + 0xdc)
+#define QSERDES_PCS_SGMII_MISC_CTRL8 (QSERDES_PCS + 0x118)
+#define QSERDES_PCS_PCS_READY_STATUS (QSERDES_PCS + 0x94)
+
+#define QSERDES_COM_C_READY BIT(0)
+#define QSERDES_PCS_READY BIT(0)
+#define QSERDES_PCS_SGMIIPHY_READY BIT(7)
+#define QSERDES_COM_C_PLL_LOCKED BIT(1)
+
+struct qcom_dwmac_sgmii_phy_data {
+ struct regmap *regmap;
+ struct clk *refclk;
+ int speed;
+};
+
+static void qcom_dwmac_sgmii_phy_init_1g(struct regmap *regmap)
+{
+ regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x01);
+ regmap_write(regmap, QSERDES_PCS_POWER_DOWN_CONTROL, 0x01);
+
+ regmap_write(regmap, QSERDES_COM_PLL_IVCO, 0x0F);
+ regmap_write(regmap, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+ regmap_write(regmap, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ regmap_write(regmap, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+ regmap_write(regmap, QSERDES_COM_SYSCLK_EN_SEL, 0x1A);
+ regmap_write(regmap, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
+ regmap_write(regmap, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
+ regmap_write(regmap, QSERDES_COM_DEC_START_MODE0, 0x82);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE1_MODE0, 0x24);
+
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE2_MODE0, 0x02);
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE_INITVAL2, 0x00);
+ regmap_write(regmap, QSERDES_COM_HSCLK_SEL, 0x04);
+ regmap_write(regmap, QSERDES_COM_HSCLK_HS_SWITCH_SEL, 0x00);
+ regmap_write(regmap, QSERDES_COM_CORECLK_DIV_MODE0, 0x0A);
+ regmap_write(regmap, QSERDES_COM_CORE_CLK_EN, 0x00);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xB9);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1E);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
+
+ regmap_write(regmap, QSERDES_TX_TX_BAND, 0x05);
+ regmap_write(regmap, QSERDES_TX_SLEW_CNTL, 0x0A);
+ regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
+ regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_RX, 0x09);
+ regmap_write(regmap, QSERDES_TX_LANE_MODE_1, 0x05);
+ regmap_write(regmap, QSERDES_TX_LANE_MODE_3, 0x00);
+ regmap_write(regmap, QSERDES_TX_RCV_DETECT_LVL_2, 0x12);
+ regmap_write(regmap, QSERDES_TX_TRAN_DRVR_EMP_EN, 0x0C);
+
+ regmap_write(regmap, QSERDES_RX_UCDR_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX_UCDR_SO_GAIN, 0x06);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX_UCDR_PI_CONTROLS, 0x81);
+ regmap_write(regmap, QSERDES_RX_UCDR_PI_CTRL2, 0x80);
+ regmap_write(regmap, QSERDES_RX_RX_TERM_BW, 0x04);
+ regmap_write(regmap, QSERDES_RX_VGA_CAL_CNTRL2, 0x08);
+ regmap_write(regmap, QSERDES_RX_GM_CAL, 0x0F);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_LOW, 0x80);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_MEASURE_TIME, 0x20);
+ regmap_write(regmap, QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
+ regmap_write(regmap, QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX_SIGDET_CNTRL, 0x0F);
+ regmap_write(regmap, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
+ regmap_write(regmap, QSERDES_RX_RX_BAND, 0x05);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH3, 0x3B);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH4, 0xB7);
+ regmap_write(regmap, QSERDES_RX_DCC_CTRL1, 0x0C);
+
+ regmap_write(regmap, QSERDES_PCS_LINE_RESET_TIME, 0x0C);
+ regmap_write(regmap, QSERDES_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
+ regmap_write(regmap, QSERDES_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
+ regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL1, 0x83);
+ regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(regmap, QSERDES_PCS_SGMII_MISC_CTRL8, 0x0C);
+ regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x00);
+
+ regmap_write(regmap, QSERDES_PCS_PHY_START, 0x01);
+}
+
+static void qcom_dwmac_sgmii_phy_init_2p5g(struct regmap *regmap)
+{
+ regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x01);
+ regmap_write(regmap, QSERDES_PCS_POWER_DOWN_CONTROL, 0x01);
+
+ regmap_write(regmap, QSERDES_COM_PLL_IVCO, 0x0F);
+ regmap_write(regmap, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+ regmap_write(regmap, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ regmap_write(regmap, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+ regmap_write(regmap, QSERDES_COM_SYSCLK_EN_SEL, 0x1A);
+ regmap_write(regmap, QSERDES_COM_LOCK_CMP1_MODE0, 0x1A);
+ regmap_write(regmap, QSERDES_COM_LOCK_CMP2_MODE0, 0x41);
+ regmap_write(regmap, QSERDES_COM_DEC_START_MODE0, 0x7A);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x20);
+ regmap_write(regmap, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x01);
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE1_MODE0, 0xA1);
+
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE2_MODE0, 0x02);
+ regmap_write(regmap, QSERDES_COM_VCO_TUNE_INITVAL2, 0x00);
+ regmap_write(regmap, QSERDES_COM_HSCLK_SEL, 0x03);
+ regmap_write(regmap, QSERDES_COM_HSCLK_HS_SWITCH_SEL, 0x00);
+ regmap_write(regmap, QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
+ regmap_write(regmap, QSERDES_COM_CORE_CLK_EN, 0x00);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xCD);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1C);
+ regmap_write(regmap, QSERDES_COM_BIN_VCOCAL_HSCLK_SEL, 0x11);
+
+ regmap_write(regmap, QSERDES_TX_TX_BAND, 0x04);
+ regmap_write(regmap, QSERDES_TX_SLEW_CNTL, 0x0A);
+ regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_TX, 0x09);
+ regmap_write(regmap, QSERDES_TX_RES_CODE_LANE_OFFSET_RX, 0x02);
+ regmap_write(regmap, QSERDES_TX_LANE_MODE_1, 0x05);
+ regmap_write(regmap, QSERDES_TX_LANE_MODE_3, 0x00);
+ regmap_write(regmap, QSERDES_TX_RCV_DETECT_LVL_2, 0x12);
+ regmap_write(regmap, QSERDES_TX_TRAN_DRVR_EMP_EN, 0x0C);
+
+ regmap_write(regmap, QSERDES_RX_UCDR_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX_UCDR_SO_GAIN, 0x06);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0A);
+ regmap_write(regmap, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7F);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00);
+ regmap_write(regmap, QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX_UCDR_PI_CONTROLS, 0x81);
+ regmap_write(regmap, QSERDES_RX_UCDR_PI_CTRL2, 0x80);
+ regmap_write(regmap, QSERDES_RX_RX_TERM_BW, 0x00);
+ regmap_write(regmap, QSERDES_RX_VGA_CAL_CNTRL2, 0x08);
+ regmap_write(regmap, QSERDES_RX_GM_CAL, 0x0F);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4A);
+ regmap_write(regmap, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0A);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_LOW, 0x80);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_TSETTLE_HIGH, 0x01);
+ regmap_write(regmap, QSERDES_RX_RX_IDAC_MEASURE_TIME, 0x20);
+ regmap_write(regmap, QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17);
+ regmap_write(regmap, QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00);
+ regmap_write(regmap, QSERDES_RX_SIGDET_CNTRL, 0x0F);
+ regmap_write(regmap, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E);
+ regmap_write(regmap, QSERDES_RX_RX_BAND, 0x18);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_LOW, 0x18);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH3, 0x0C);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_00_HIGH4, 0xB8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH3, 0x09);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_01_HIGH4, 0xB1);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_LOW, 0xE0);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH2, 0xC8);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH3, 0x3B);
+ regmap_write(regmap, QSERDES_RX_RX_MODE_10_HIGH4, 0xB7);
+ regmap_write(regmap, QSERDES_RX_DCC_CTRL1, 0x0C);
+
+ regmap_write(regmap, QSERDES_PCS_LINE_RESET_TIME, 0x0C);
+ regmap_write(regmap, QSERDES_PCS_TX_LARGE_AMP_DRV_LVL, 0x1F);
+ regmap_write(regmap, QSERDES_PCS_TX_SMALL_AMP_DRV_LVL, 0x03);
+ regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL1, 0x83);
+ regmap_write(regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(regmap, QSERDES_PCS_SGMII_MISC_CTRL8, 0x8C);
+ regmap_write(regmap, QSERDES_PCS_SW_RESET, 0x00);
+
+ regmap_write(regmap, QSERDES_PCS_PHY_START, 0x01);
+}
+
+static inline int
+qcom_dwmac_sgmii_phy_poll_status(struct regmap *regmap, unsigned int reg,
+ unsigned int bit)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(regmap, reg, val,
+ val & bit, 1500, 750000);
+}
+
+static int qcom_dwmac_sgmii_phy_calibrate(struct phy *phy)
+{
+ struct qcom_dwmac_sgmii_phy_data *data = phy_get_drvdata(phy);
+ struct device *dev = phy->dev.parent;
+
+ switch (data->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ qcom_dwmac_sgmii_phy_init_1g(data->regmap);
+ break;
+ case SPEED_2500:
+ qcom_dwmac_sgmii_phy_init_2p5g(data->regmap);
+ break;
+ }
+
+ if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
+ QSERDES_COM_C_READY_STATUS,
+ QSERDES_COM_C_READY)) {
+ dev_err(dev, "QSERDES_COM_C_READY_STATUS timed-out");
+ return -ETIMEDOUT;
+ }
+
+ if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
+ QSERDES_PCS_PCS_READY_STATUS,
+ QSERDES_PCS_READY)) {
+ dev_err(dev, "PCS_READY timed-out");
+ return -ETIMEDOUT;
+ }
+
+ if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
+ QSERDES_PCS_PCS_READY_STATUS,
+ QSERDES_PCS_SGMIIPHY_READY)) {
+ dev_err(dev, "SGMIIPHY_READY timed-out");
+ return -ETIMEDOUT;
+ }
+
+ if (qcom_dwmac_sgmii_phy_poll_status(data->regmap,
+ QSERDES_COM_CMN_STATUS,
+ QSERDES_COM_C_PLL_LOCKED)) {
+ dev_err(dev, "PLL Lock Status timed-out");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int qcom_dwmac_sgmii_phy_power_on(struct phy *phy)
+{
+ struct qcom_dwmac_sgmii_phy_data *data = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(data->refclk);
+}
+
+static int qcom_dwmac_sgmii_phy_power_off(struct phy *phy)
+{
+ struct qcom_dwmac_sgmii_phy_data *data = phy_get_drvdata(phy);
+
+ regmap_write(data->regmap, QSERDES_PCS_TX_MID_TERM_CTRL2, 0x08);
+ regmap_write(data->regmap, QSERDES_PCS_SW_RESET, 0x01);
+ udelay(100);
+ regmap_write(data->regmap, QSERDES_PCS_SW_RESET, 0x00);
+ regmap_write(data->regmap, QSERDES_PCS_PHY_START, 0x01);
+
+ clk_disable_unprepare(data->refclk);
+
+ return 0;
+}
+
+static int qcom_dwmac_sgmii_phy_set_speed(struct phy *phy, int speed)
+{
+ struct qcom_dwmac_sgmii_phy_data *data = phy_get_drvdata(phy);
+
+ if (speed != data->speed)
+ data->speed = speed;
+
+ return qcom_dwmac_sgmii_phy_calibrate(phy);
+}
+
+static const struct phy_ops qcom_dwmac_sgmii_phy_ops = {
+ .power_on = qcom_dwmac_sgmii_phy_power_on,
+ .power_off = qcom_dwmac_sgmii_phy_power_off,
+ .set_speed = qcom_dwmac_sgmii_phy_set_speed,
+ .calibrate = qcom_dwmac_sgmii_phy_calibrate,
+ .owner = THIS_MODULE,
+};
+
+static const struct regmap_config qcom_dwmac_sgmii_phy_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .use_relaxed_mmio = true,
+ .disable_locking = true,
+};
+
+static int qcom_dwmac_sgmii_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_dwmac_sgmii_phy_data *data;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ void __iomem *base;
+ struct phy *phy;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->speed = SPEED_10;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ data->regmap = devm_regmap_init_mmio(dev, base,
+ &qcom_dwmac_sgmii_phy_regmap_cfg);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ phy = devm_phy_create(dev, NULL, &qcom_dwmac_sgmii_phy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ data->refclk = devm_clk_get(dev, "sgmi_ref");
+ if (IS_ERR(data->refclk))
+ return PTR_ERR(data->refclk);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return PTR_ERR(provider);
+
+ phy_set_drvdata(phy, data);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_dwmac_sgmii_phy_of_match[] = {
+ { .compatible = "qcom,sa8775p-dwmac-sgmii-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_dwmac_sgmii_phy_of_match);
+
+static struct platform_driver qcom_dwmac_sgmii_phy_driver = {
+ .probe = qcom_dwmac_sgmii_phy_probe,
+ .driver = {
+ .name = "qcom-dwmac-sgmii-phy",
+ .of_match_table = qcom_dwmac_sgmii_phy_of_match,
+ }
+};
+
+module_platform_driver(qcom_dwmac_sgmii_phy_driver);
+
+MODULE_DESCRIPTION("Qualcomm DWMAC SGMII PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 3ccaabf2850a..f10afa3d7ff5 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -59,7 +59,7 @@ config PHY_EXYNOS4210_USB2
config PHY_EXYNOS4X12_USB2
bool
depends on PHY_SAMSUNG_USB2
- default SOC_EXYNOS3250 || SOC_EXYNOS4412
+ default SOC_EXYNOS3250 || SOC_EXYNOS4212 || SOC_EXYNOS4412
config PHY_EXYNOS5250_USB2
bool
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index b55d4e9f42b5..a296b87dced1 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -568,6 +568,7 @@ static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
usb_role_switch_unregister(port->usb_role_sw);
cancel_work_sync(&port->usb_phy_work);
usb_remove_phy(&port->usb_phy);
+ port->usb_phy.dev->driver = NULL;
}
if (port->ops->remove)
@@ -675,6 +676,9 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
port->dev.driver = devm_kzalloc(&port->dev,
sizeof(struct device_driver),
GFP_KERNEL);
+ if (!port->dev.driver)
+ return -ENOMEM;
+
port->dev.driver->owner = THIS_MODULE;
port->usb_role_sw = usb_role_switch_register(&port->dev,
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 8c667819c39a..6286cf25a426 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -23,7 +23,9 @@
#define AM33XX_GMII_SEL_MODE_RGMII 2
/* J72xx SoC specific definitions for the CONTROL port */
+#define J72XX_GMII_SEL_MODE_SGMII 3
#define J72XX_GMII_SEL_MODE_QSGMII 4
+#define J72XX_GMII_SEL_MODE_USXGMII 5
#define J72XX_GMII_SEL_MODE_QSGMII_SUB 6
#define PHY_GMII_PORT(n) BIT((n) - 1)
@@ -106,6 +108,20 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII_SUB;
break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_SGMII)))
+ goto unsupported;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_SGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_USXGMII)))
+ goto unsupported;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_USXGMII;
+ break;
+
default:
goto unsupported;
}
@@ -213,7 +229,7 @@ static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
.num_ports = 4,
.num_qsgmii_main_ports = 1,
};
@@ -222,7 +238,17 @@ static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+ .num_ports = 8,
+ .num_qsgmii_main_ports = 2,
+};
+
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
.num_ports = 8,
.num_qsgmii_main_ports = 2,
};
@@ -256,6 +282,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = {
.compatible = "ti,j721e-cpsw9g-phy-gmii-sel",
.data = &phy_gmii_sel_cpsw9g_soc_j721e,
},
+ {
+ .compatible = "ti,j784s4-cpsw9g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw9g_soc_j784s4,
+ },
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index a673c3342470..25f9767c28e8 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -77,7 +77,7 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
{
int ret = 0;
- port->mux = fwnode_typec_mux_get(fwnode, NULL);
+ port->mux = fwnode_typec_mux_get(fwnode);
if (IS_ERR(port->mux)) {
ret = PTR_ERR(port->mux);
dev_dbg(dev, "Mux handle not found: %d.\n", ret);
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index b3622419cd1a..c773995b230d 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -68,9 +68,10 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
- if (ledmode == SIMATIC_IPC_DEVICE_127E ||
- ledmode == SIMATIC_IPC_DEVICE_227G)
- pdevname = KBUILD_MODNAME "_leds_gpio";
+ if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_leds_gpio_apollolake";
+ if (ledmode == SIMATIC_IPC_DEVICE_227G)
+ pdevname = KBUILD_MODNAME "_leds_gpio_f7188x";
platform_data.devmode = ledmode;
ipc_led_platform_device =
platform_device_register_data(NULL,
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 8c87eeda0fec..fff07b2bd77b 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -158,6 +158,7 @@ config POWER_RESET_OXNAS
config POWER_RESET_PIIX4_POWEROFF
tristate "Intel PIIX4 power-off driver"
depends on PCI
+ depends on HAS_IOPORT
depends on MIPS || COMPILE_TEST
help
This driver supports powering off a system using the Intel PIIX4
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 741e44a017c3..d6884841a6dc 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -149,11 +149,10 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-static void __init at91_reset_status(struct platform_device *pdev,
- void __iomem *base)
+static const char * __init at91_reset_reason(struct at91_reset *reset)
{
+ u32 reg = readl(reset->rstc_base + AT91_RSTC_SR);
const char *reason;
- u32 reg = readl(base + AT91_RSTC_SR);
switch ((reg & AT91_RSTC_RSTTYP) >> 8) {
case RESET_TYPE_GENERAL:
@@ -185,7 +184,7 @@ static void __init at91_reset_status(struct platform_device *pdev,
break;
}
- dev_info(&pdev->dev, "Starting after %s\n", reason);
+ return reason;
}
static const struct of_device_id at91_ramc_of_match[] = {
@@ -392,7 +391,7 @@ static int __init at91_reset_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
- at91_reset_status(pdev, reset->rstc_base);
+ dev_info(&pdev->dev, "Starting after %s\n", at91_reset_reason(reset));
return 0;
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 35d981d5e6c8..a479d3536eb1 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -105,7 +105,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_restart_remove(struct platform_device *pdev)
+static void gpio_restart_remove(struct platform_device *pdev)
{
struct gpio_restart *gpio_restart = platform_get_drvdata(pdev);
int ret;
@@ -115,10 +115,7 @@ static int gpio_restart_remove(struct platform_device *pdev)
dev_err(&pdev->dev,
"%s: cannot unregister restart handler, %d\n",
__func__, ret);
- return -ENODEV;
}
-
- return 0;
}
static const struct of_device_id of_gpio_restart_match[] = {
@@ -128,7 +125,7 @@ static const struct of_device_id of_gpio_restart_match[] = {
static struct platform_driver gpio_restart_driver = {
.probe = gpio_restart_probe,
- .remove = gpio_restart_remove,
+ .remove_new = gpio_restart_remove,
.driver = {
.name = "restart-gpio",
.of_match_table = of_gpio_restart_match,
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index ebdcfb28c4a0..1344b361a475 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -17,6 +17,8 @@
#define GEN1_REASON_SHIFT 2
#define GEN2_REASON_SHIFT 1
+#define NO_REASON_SHIFT 0
+
struct pm8916_pon {
struct device *dev;
struct regmap *regmap;
@@ -45,6 +47,7 @@ static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot,
static int pm8916_pon_probe(struct platform_device *pdev)
{
struct pm8916_pon *pon;
+ long reason_shift;
int error;
pon = devm_kzalloc(&pdev->dev, sizeof(*pon), GFP_KERNEL);
@@ -64,13 +67,17 @@ static int pm8916_pon_probe(struct platform_device *pdev)
if (error)
return error;
- pon->reboot_mode.dev = &pdev->dev;
- pon->reason_shift = (long)of_device_get_match_data(&pdev->dev);
- pon->reboot_mode.write = pm8916_reboot_mode_write;
- error = devm_reboot_mode_register(&pdev->dev, &pon->reboot_mode);
- if (error) {
- dev_err(&pdev->dev, "can't register reboot mode\n");
- return error;
+ reason_shift = (long)of_device_get_match_data(&pdev->dev);
+
+ if (reason_shift != NO_REASON_SHIFT) {
+ pon->reboot_mode.dev = &pdev->dev;
+ pon->reason_shift = reason_shift;
+ pon->reboot_mode.write = pm8916_reboot_mode_write;
+ error = devm_reboot_mode_register(&pdev->dev, &pon->reboot_mode);
+ if (error) {
+ dev_err(&pdev->dev, "can't register reboot mode\n");
+ return error;
+ }
}
platform_set_drvdata(pdev, pon);
@@ -80,6 +87,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon", .data = (void *)GEN1_REASON_SHIFT },
+ { .compatible = "qcom,pm8941-pon", .data = (void *)NO_REASON_SHIFT },
{ .compatible = "qcom,pms405-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pm8998-pon", .data = (void *)GEN2_REASON_SHIFT },
{ .compatible = "qcom,pmk8350-pon", .data = (void *)GEN2_REASON_SHIFT },
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 4a5e8e1d1237..663a1c423806 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -766,6 +766,14 @@ config BATTERY_RT5033
The fuelgauge calculates and determines the battery state of charge
according to battery open circuit voltage.
+config CHARGER_RT5033
+ tristate "RT5033 battery charger support"
+ depends on MFD_RT5033
+ help
+ This adds support for battery charger in Richtek RT5033 PMIC.
+ The device supports pre-charge mode, fast charge mode and
+ constant voltage mode.
+
config CHARGER_RT9455
tristate "Richtek RT9455 battery charger driver"
depends on I2C
@@ -934,4 +942,13 @@ config BATTERY_UG3105
device is off or suspended, the functionality of this driver is
limited to reporting capacity only.
+config CHARGER_QCOM_SMB2
+ tristate "Qualcomm PMI8998 PMIC charger driver"
+ depends on MFD_SPMI_PMIC
+ depends on IIO
+ help
+ Say Y here to enable the Qualcomm PMIC Charger driver. This
+ adds support for the SMB2 switch mode battery charger found
+ in PMI8998 and related PMICs.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 4adbfba02d05..a8a9fa6de1e9 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_MAX1721X) += max1721x_battery.o
obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
+obj-$(CONFIG_CHARGER_RT5033) += rt5033_charger.o
obj-$(CONFIG_CHARGER_RT9455) += rt9455_charger.o
obj-$(CONFIG_CHARGER_RT9467) += rt9467-charger.o
obj-$(CONFIG_CHARGER_RT9471) += rt9471.o
@@ -109,3 +110,4 @@ obj-$(CONFIG_BATTERY_ACER_A500) += acer_a500_battery.o
obj-$(CONFIG_BATTERY_SURFACE) += surface_battery.o
obj-$(CONFIG_CHARGER_SURFACE) += surface_charger.o
obj-$(CONFIG_BATTERY_UG3105) += ug3105_battery.o
+obj-$(CONFIG_CHARGER_QCOM_SMB2) += qcom_pmi8998_charger.o
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 840db629a46c..3e3a0d118ce5 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -736,7 +736,7 @@ static struct i2c_driver adp5061_driver = {
.driver = {
.name = KBUILD_MODNAME,
},
- .probe_new = adp5061_probe,
+ .probe = adp5061_probe,
.id_table = adp5061_id,
};
module_i2c_driver(adp5061_driver);
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index a1e6d1d44808..51c3f9b6458d 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -26,45 +26,48 @@
#define DRVNAME "axp20x-usb-power-supply"
+#define AXP192_USB_OTG_STATUS 0x04
+
#define AXP20X_PWR_STATUS_VBUS_PRESENT BIT(5)
#define AXP20X_PWR_STATUS_VBUS_USED BIT(4)
#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
-#define AXP20X_VBUS_PATH_SEL BIT(7)
-#define AXP20X_VBUS_PATH_SEL_OFFSET 7
-
#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
-#define AXP20X_VBUS_CLIMIT_MASK 3
-#define AXP20X_VBUS_CLIMIT_900mA 0
-#define AXP20X_VBUS_CLIMIT_500mA 1
-#define AXP20X_VBUS_CLIMIT_100mA 2
-#define AXP20X_VBUS_CLIMIT_NONE 3
-
-#define AXP813_VBUS_CLIMIT_900mA 0
-#define AXP813_VBUS_CLIMIT_1500mA 1
-#define AXP813_VBUS_CLIMIT_2000mA 2
-#define AXP813_VBUS_CLIMIT_2500mA 3
#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
-#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
-
-#define AXP813_BC_EN BIT(0)
-
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
*/
#define DEBOUNCE_TIME msecs_to_jiffies(50)
+struct axp_data {
+ const struct power_supply_desc *power_desc;
+ const char * const *irq_names;
+ unsigned int num_irq_names;
+ const int *curr_lim_table;
+ struct reg_field curr_lim_fld;
+ struct reg_field vbus_valid_bit;
+ struct reg_field vbus_mon_bit;
+ struct reg_field usb_bc_en_bit;
+ struct reg_field vbus_disable_bit;
+ bool vbus_needs_polling: 1;
+};
+
struct axp20x_usb_power {
struct regmap *regmap;
+ struct regmap_field *curr_lim_fld;
+ struct regmap_field *vbus_valid_bit;
+ struct regmap_field *vbus_mon_bit;
+ struct regmap_field *usb_bc_en_bit;
+ struct regmap_field *vbus_disable_bit;
struct power_supply *supply;
- enum axp20x_variants axp20x_id;
+ const struct axp_data *axp_data;
struct iio_channel *vbus_v;
struct iio_channel *vbus_i;
struct delayed_work vbus_detect;
@@ -81,7 +84,7 @@ static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
* present->absent transition implies an online->offline transition
* and will trigger the VBUS_REMOVAL IRQ.
*/
- if (power->axp20x_id >= AXP221_ID && !power->online)
+ if (power->axp_data->vbus_needs_polling && !power->online)
return true;
return false;
@@ -121,60 +124,6 @@ out:
mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
-static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
-{
- unsigned int v;
- int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
-
- if (ret)
- return ret;
-
- switch (v & AXP20X_VBUS_CLIMIT_MASK) {
- case AXP20X_VBUS_CLIMIT_100mA:
- if (power->axp20x_id == AXP221_ID)
- *val = -1; /* No 100mA limit */
- else
- *val = 100000;
- break;
- case AXP20X_VBUS_CLIMIT_500mA:
- *val = 500000;
- break;
- case AXP20X_VBUS_CLIMIT_900mA:
- *val = 900000;
- break;
- case AXP20X_VBUS_CLIMIT_NONE:
- *val = -1;
- break;
- }
-
- return 0;
-}
-
-static int axp813_get_current_max(struct axp20x_usb_power *power, int *val)
-{
- unsigned int v;
- int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
-
- if (ret)
- return ret;
-
- switch (v & AXP20X_VBUS_CLIMIT_MASK) {
- case AXP813_VBUS_CLIMIT_900mA:
- *val = 900000;
- break;
- case AXP813_VBUS_CLIMIT_1500mA:
- *val = 1500000;
- break;
- case AXP813_VBUS_CLIMIT_2000mA:
- *val = 2000000;
- break;
- case AXP813_VBUS_CLIMIT_2500mA:
- *val = 2500000;
- break;
- }
- return 0;
-}
-
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -213,9 +162,12 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- if (power->axp20x_id == AXP813_ID)
- return axp813_get_current_max(power, &val->intval);
- return axp20x_get_current_max(power, &val->intval);
+ ret = regmap_field_read(power->curr_lim_fld, &v);
+ if (ret)
+ return ret;
+
+ val->intval = power->axp_data->curr_lim_table[v];
+ return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
ret = iio_read_channel_processed(power->vbus_i,
@@ -256,16 +208,15 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_HEALTH_GOOD;
- if (power->axp20x_id == AXP202_ID) {
- ret = regmap_read(power->regmap,
- AXP20X_USB_OTG_STATUS, &v);
+ if (power->vbus_valid_bit) {
+ ret = regmap_field_read(power->vbus_valid_bit, &v);
if (ret)
return ret;
- if (!(v & AXP20X_USB_STATUS_VBUS_VALID))
- val->intval =
- POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ if (v == 0)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
}
+
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = !!(input & AXP20X_PWR_STATUS_VBUS_PRESENT);
@@ -280,16 +231,6 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
return 0;
}
-static int axp813_usb_power_set_online(struct axp20x_usb_power *power,
- int intval)
-{
- int val = !intval << AXP20X_VBUS_PATH_SEL_OFFSET;
-
- return regmap_update_bits(power->regmap,
- AXP20X_VBUS_IPSOUT_MGMT,
- AXP20X_VBUS_PATH_SEL, val);
-}
-
static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
int intval)
{
@@ -316,50 +257,17 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
-static int axp813_usb_power_set_current_max(struct axp20x_usb_power *power,
- int intval)
+static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power, int intval)
{
- int val;
+ const unsigned int max = GENMASK(power->axp_data->curr_lim_fld.msb,
+ power->axp_data->curr_lim_fld.lsb);
- switch (intval) {
- case 900000:
- return regmap_update_bits(power->regmap,
- AXP20X_VBUS_IPSOUT_MGMT,
- AXP20X_VBUS_CLIMIT_MASK,
- AXP813_VBUS_CLIMIT_900mA);
- case 1500000:
- case 2000000:
- case 2500000:
- val = (intval - 1000000) / 500000;
- return regmap_update_bits(power->regmap,
- AXP20X_VBUS_IPSOUT_MGMT,
- AXP20X_VBUS_CLIMIT_MASK, val);
- default:
+ if (intval == -1)
return -EINVAL;
- }
-
- return -EINVAL;
-}
-
-static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
- int intval)
-{
- int val;
- switch (intval) {
- case 100000:
- if (power->axp20x_id == AXP221_ID)
- return -EINVAL;
- fallthrough;
- case 500000:
- case 900000:
- val = (900000 - intval) / 400000;
- return regmap_update_bits(power->regmap,
- AXP20X_VBUS_IPSOUT_MGMT,
- AXP20X_VBUS_CLIMIT_MASK, val);
- default:
- return -EINVAL;
- }
+ for (unsigned int i = 0; i <= max; ++i)
+ if (power->axp_data->curr_lim_table[i] == intval)
+ return regmap_field_write(power->curr_lim_fld, i);
return -EINVAL;
}
@@ -372,17 +280,15 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- if (power->axp20x_id != AXP813_ID)
+ if (!power->vbus_disable_bit)
return -EINVAL;
- return axp813_usb_power_set_online(power, val->intval);
+
+ return regmap_field_write(power->vbus_disable_bit, !val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_usb_power_set_voltage_min(power, val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
- if (power->axp20x_id == AXP813_ID)
- return axp813_usb_power_set_current_max(power,
- val->intval);
return axp20x_usb_power_set_current_max(power, val->intval);
default:
@@ -405,7 +311,7 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
* the VBUS input offline.
*/
if (psp == POWER_SUPPLY_PROP_ONLINE)
- return power->axp20x_id == AXP813_ID;
+ return power->vbus_disable_bit != NULL;
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_CURRENT_MAX;
@@ -461,39 +367,81 @@ static const char * const axp22x_irq_names[] = {
"VBUS_REMOVAL",
};
-struct axp_data {
- const struct power_supply_desc *power_desc;
- const char * const *irq_names;
- unsigned int num_irq_names;
- enum axp20x_variants axp20x_id;
+static int axp192_usb_curr_lim_table[] = {
+ -1,
+ -1,
+ 500000,
+ 100000,
+};
+
+static int axp20x_usb_curr_lim_table[] = {
+ 900000,
+ 500000,
+ 100000,
+ -1,
+};
+
+static int axp221_usb_curr_lim_table[] = {
+ 900000,
+ 500000,
+ -1,
+ -1,
+};
+
+static int axp813_usb_curr_lim_table[] = {
+ 900000,
+ 1500000,
+ 2000000,
+ 2500000,
+};
+
+static const struct axp_data axp192_data = {
+ .power_desc = &axp20x_usb_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .curr_lim_table = axp192_usb_curr_lim_table,
+ .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .vbus_valid_bit = REG_FIELD(AXP192_USB_OTG_STATUS, 2, 2),
+ .vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
};
static const struct axp_data axp202_data = {
.power_desc = &axp20x_usb_power_desc,
.irq_names = axp20x_irq_names,
.num_irq_names = ARRAY_SIZE(axp20x_irq_names),
- .axp20x_id = AXP202_ID,
+ .curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .vbus_valid_bit = REG_FIELD(AXP20X_USB_OTG_STATUS, 2, 2),
+ .vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
};
static const struct axp_data axp221_data = {
.power_desc = &axp22x_usb_power_desc,
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
- .axp20x_id = AXP221_ID,
+ .curr_lim_table = axp221_usb_curr_lim_table,
+ .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .vbus_needs_polling = true,
};
static const struct axp_data axp223_data = {
.power_desc = &axp22x_usb_power_desc,
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
- .axp20x_id = AXP223_ID,
+ .curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .vbus_needs_polling = true,
};
static const struct axp_data axp813_data = {
.power_desc = &axp22x_usb_power_desc,
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
- .axp20x_id = AXP813_ID,
+ .curr_lim_table = axp813_usb_curr_lim_table,
+ .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .usb_bc_en_bit = REG_FIELD(AXP288_BC_GLOBAL, 0, 0),
+ .vbus_disable_bit = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 7, 7),
+ .vbus_needs_polling = true,
};
#ifdef CONFIG_PM_SLEEP
@@ -565,6 +513,26 @@ static int configure_adc_registers(struct axp20x_usb_power *power)
AXP20X_ADC_EN1_VBUS_VOLT);
}
+static int axp20x_regmap_field_alloc_optional(struct device *dev,
+ struct regmap *regmap,
+ struct reg_field fdesc,
+ struct regmap_field **fieldp)
+{
+ struct regmap_field *field;
+
+ if (fdesc.reg == 0) {
+ *fieldp = NULL;
+ return 0;
+ }
+
+ field = devm_regmap_field_alloc(dev, regmap, fdesc);
+ if (IS_ERR(field))
+ return PTR_ERR(field);
+
+ *fieldp = field;
+ return 0;
+}
+
static int axp20x_usb_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -591,20 +559,47 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, power);
- power->axp20x_id = axp_data->axp20x_id;
+ power->axp_data = axp_data;
power->regmap = axp20x->regmap;
power->num_irqs = axp_data->num_irq_names;
+ power->curr_lim_fld = devm_regmap_field_alloc(&pdev->dev, power->regmap,
+ axp_data->curr_lim_fld);
+ if (IS_ERR(power->curr_lim_fld))
+ return PTR_ERR(power->curr_lim_fld);
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->vbus_valid_bit,
+ &power->vbus_valid_bit);
+ if (ret)
+ return ret;
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->vbus_mon_bit,
+ &power->vbus_mon_bit);
+ if (ret)
+ return ret;
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->usb_bc_en_bit,
+ &power->usb_bc_en_bit);
+ if (ret)
+ return ret;
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->vbus_disable_bit,
+ &power->vbus_disable_bit);
+ if (ret)
+ return ret;
+
ret = devm_delayed_work_autocancel(&pdev->dev, &power->vbus_detect,
axp20x_usb_power_poll_vbus);
if (ret)
return ret;
- if (power->axp20x_id == AXP202_ID) {
+ if (power->vbus_mon_bit) {
/* Enable vbus valid checking */
- ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
- AXP20X_VBUS_MON_VBUS_VALID,
- AXP20X_VBUS_MON_VBUS_VALID);
+ ret = regmap_field_write(power->vbus_mon_bit, 1);
if (ret)
return ret;
@@ -617,10 +612,9 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
}
- if (power->axp20x_id == AXP813_ID) {
+ if (power->usb_bc_en_bit) {
/* Enable USB Battery Charging specification detection */
- ret = regmap_update_bits(axp20x->regmap, AXP288_BC_GLOBAL,
- AXP813_BC_EN, AXP813_BC_EN);
+ ret = regmap_field_write(power->usb_bc_en_bit, 1);
if (ret)
return ret;
}
@@ -659,6 +653,9 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_usb_power_match[] = {
{
+ .compatible = "x-powers,axp192-usb-power-supply",
+ .data = &axp192_data,
+ }, {
.compatible = "x-powers,axp202-usb-power-supply",
.data = &axp202_data,
}, {
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index 250362e15c98..086dcf4033c1 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -1077,7 +1077,7 @@ static struct i2c_driver bd9995x_driver = {
.name = "bd9995x-charger",
.of_match_table = bd9995x_of_match,
},
- .probe_new = bd9995x_probe,
+ .probe = bd9995x_probe,
};
module_i2c_driver(bd9995x_driver);
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 349b69d634e6..6a4798a62588 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1780,7 +1780,7 @@ static struct i2c_driver bq2415x_driver = {
.of_match_table = of_match_ptr(bq2415x_of_match_table),
.acpi_match_table = ACPI_PTR(bq2415x_i2c_acpi_match),
},
- .probe_new = bq2415x_probe,
+ .probe = bq2415x_probe,
.remove = bq2415x_remove,
.id_table = bq2415x_i2c_id_table,
};
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index dc33f00fcc06..ef8235848f56 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -2034,7 +2034,7 @@ static const struct of_device_id bq24190_of_match[] = {
MODULE_DEVICE_TABLE(of, bq24190_of_match);
static struct i2c_driver bq24190_driver = {
- .probe_new = bq24190_probe,
+ .probe = bq24190_probe,
.remove = bq24190_remove,
.shutdown = bq24190_shutdown,
.id_table = bq24190_i2c_ids,
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 45e4ba30da98..2852860abf86 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -1165,7 +1165,7 @@ static struct i2c_driver bq24257_driver = {
.acpi_match_table = ACPI_PTR(bq24257_acpi_match),
.pm = &bq24257_pm,
},
- .probe_new = bq24257_probe,
+ .probe = bq24257_probe,
.remove = bq24257_remove,
.id_table = bq24257_i2c_ids,
};
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index cfca3a82d5a8..8efceeae864c 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -505,7 +505,7 @@ static struct i2c_driver bq24735_charger_driver = {
.name = "bq24735-charger",
.of_match_table = bq24735_match_ids,
},
- .probe_new = bq24735_charger_probe,
+ .probe = bq24735_charger_probe,
.id_table = bq24735_charger_id,
};
diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c
index da224ae8dc61..1dbacc9b015d 100644
--- a/drivers/power/supply/bq2515x_charger.c
+++ b/drivers/power/supply/bq2515x_charger.c
@@ -1158,7 +1158,7 @@ static struct i2c_driver bq2515x_driver = {
.name = "bq2515x-charger",
.of_match_table = bq2515x_of_match,
},
- .probe_new = bq2515x_probe,
+ .probe = bq2515x_probe,
.id_table = bq2515x_i2c_ids,
};
module_i2c_driver(bq2515x_driver);
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index e624834ae66c..82d3cd5ee2f9 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -41,6 +41,9 @@
#define BQ256XX_IINDPM_MAX_uA 3200000
#define BQ256XX_IINDPM_DEF_uA 2400000
+#define BQ256XX_TS_IGNORE BIT(6)
+#define BQ256XX_TS_IGNORE_SHIFT 6
+
#define BQ256XX_VINDPM_MASK GENMASK(3, 0)
#define BQ256XX_VINDPM_STEP_uV 100000
#define BQ256XX_VINDPM_OFFSET_uV 3900000
@@ -156,6 +159,7 @@
* @vindpm: input voltage limit
* @ichg_max: maximum fast charge current
* @vbatreg_max: maximum charge voltage
+ * @ts_ignore: TS_IGNORE flag
*/
struct bq256xx_init_data {
u32 ichg;
@@ -166,6 +170,7 @@ struct bq256xx_init_data {
u32 vindpm;
u32 ichg_max;
u32 vbatreg_max;
+ bool ts_ignore;
};
/**
@@ -263,6 +268,7 @@ struct bq256xx_device {
* @bq256xx_set_iprechg: pointer to instance specific set_iprechg function
* @bq256xx_set_vindpm: pointer to instance specific set_vindpm function
* @bq256xx_set_charge_type: pointer to instance specific set_charge_type function
+ * @bq256xx_set_ts_ignore: pointer to instance specific set_ts_ignore function
*
* @bq256xx_def_ichg: default ichg value in microamps
* @bq256xx_def_iindpm: default iindpm value in microamps
@@ -295,6 +301,7 @@ struct bq256xx_chip_info {
int (*bq256xx_set_iprechg)(struct bq256xx_device *bq, int iprechg);
int (*bq256xx_set_vindpm)(struct bq256xx_device *bq, int vindpm);
int (*bq256xx_set_charge_type)(struct bq256xx_device *bq, int type);
+ int (*bq256xx_set_ts_ignore)(struct bq256xx_device *bq, bool ts_ignore);
int bq256xx_def_ichg;
int bq256xx_def_iindpm;
@@ -696,6 +703,12 @@ static int bq25601d_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
BQ256XX_VBATREG_BIT_SHIFT);
}
+static int bq256xx_set_ts_ignore(struct bq256xx_device *bq, bool ts_ignore)
+{
+ return regmap_update_bits(bq->regmap, BQ256XX_INPUT_CURRENT_LIMIT,
+ BQ256XX_TS_IGNORE, (ts_ignore ? 1 : 0) << BQ256XX_TS_IGNORE_SHIFT);
+}
+
static int bq256xx_get_prechrg_curr(struct bq256xx_device *bq)
{
unsigned int prechg_and_term_curr_lim;
@@ -1312,6 +1325,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_get_iterm = bq256xx_get_term_curr,
.bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
.bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_set_ichg = bq256xx_set_ichg_curr,
.bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
@@ -1351,6 +1365,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1382,6 +1397,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1413,6 +1429,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1444,6 +1461,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25611D_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1475,6 +1493,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1506,6 +1525,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1622,6 +1642,12 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
if (ret)
return ret;
+ if (bq->chip_info->bq256xx_set_ts_ignore) {
+ ret = bq->chip_info->bq256xx_set_ts_ignore(bq, bq->init_data.ts_ignore);
+ if (ret)
+ return ret;
+ }
+
power_supply_put_battery_info(bq->charger, bat_info);
return 0;
@@ -1656,6 +1682,8 @@ static int bq256xx_parse_dt(struct bq256xx_device *bq,
if (ret)
bq->init_data.iindpm = bq->chip_info->bq256xx_def_iindpm;
+ bq->init_data.ts_ignore = device_property_read_bool(bq->dev, "ti,no-thermistor");
+
return 0;
}
@@ -1784,7 +1812,7 @@ static struct i2c_driver bq256xx_driver = {
.of_match_table = bq256xx_of_match,
.acpi_match_table = bq256xx_acpi_match,
},
- .probe_new = bq256xx_probe,
+ .probe = bq256xx_probe,
.id_table = bq256xx_i2c_ids,
};
module_i2c_driver(bq256xx_driver);
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index f8636cf86505..03fa11a1c9b6 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -1649,7 +1649,7 @@ static struct i2c_driver bq25890_driver = {
.acpi_match_table = ACPI_PTR(bq25890_acpi_match),
.pm = &bq25890_pm,
},
- .probe_new = bq25890_probe,
+ .probe = bq25890_probe,
.remove = bq25890_remove,
.shutdown = bq25890_shutdown,
.id_table = bq25890_i2c_ids,
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
index a59d9762bc91..d8411722266f 100644
--- a/drivers/power/supply/bq25980_charger.c
+++ b/drivers/power/supply/bq25980_charger.c
@@ -1287,7 +1287,7 @@ static struct i2c_driver bq25980_driver = {
.name = "bq25980-charger",
.of_match_table = bq25980_of_match,
},
- .probe_new = bq25980_probe,
+ .probe = bq25980_probe,
.id_table = bq25980_i2c_ids,
};
module_i2c_driver(bq25980_driver);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 6d3c74876339..9b5475590518 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -296,7 +296,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = {
.name = "bq27xxx-battery",
.of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
},
- .probe_new = bq27xxx_battery_i2c_probe,
+ .probe = bq27xxx_battery_i2c_probe,
.remove = bq27xxx_battery_i2c_remove,
.id_table = bq27xxx_i2c_id_table,
};
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 1379afd9698d..a204f2355be4 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -227,8 +227,7 @@ static int cros_pchg_get_prop(struct power_supply *psy,
return 0;
}
-static int cros_pchg_event(const struct charger_data *charger,
- unsigned long host_event)
+static int cros_pchg_event(const struct charger_data *charger)
{
int i;
@@ -256,7 +255,7 @@ static int cros_ec_notify(struct notifier_block *nb,
if (!(host_event & EC_MKBP_PCHG_DEVICE_EVENT))
return NOTIFY_DONE;
- return cros_pchg_event(charger, host_event);
+ return cros_pchg_event(charger);
}
static int cros_pchg_probe(struct platform_device *pdev)
@@ -281,6 +280,8 @@ static int cros_pchg_probe(struct platform_device *pdev)
charger->ec_dev = ec_dev;
charger->ec_device = ec_device;
+ platform_set_drvdata(pdev, charger);
+
ret = cros_pchg_port_count(charger);
if (ret <= 0) {
/*
@@ -349,9 +350,27 @@ static int cros_pchg_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused cros_pchg_resume(struct device *dev)
+{
+ struct charger_data *charger = dev_get_drvdata(dev);
+
+ /*
+ * Sync all ports on resume in case reports from EC are lost during
+ * the last suspend.
+ */
+ cros_pchg_event(charger);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_pchg_pm_ops, NULL, cros_pchg_resume);
+
static struct platform_driver cros_pchg_driver = {
.driver = {
.name = DRV_NAME,
+ .pm = &cros_pchg_pm_ops,
},
.probe = cros_pchg_probe
};
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 473522b4326a..bb29e9ebd24a 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -747,7 +747,7 @@ static struct i2c_driver cw_bat_driver = {
.of_match_table = cw2015_of_match,
.pm = &cw_bat_pm_ops,
},
- .probe_new = cw_bat_probe,
+ .probe = cw_bat_probe,
.id_table = cw_bat_id_table,
};
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index 9b9619246902..85aa9c465aa4 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -458,7 +458,7 @@ static struct i2c_driver ds278x_battery_driver = {
.name = "ds2782-battery",
.pm = &ds278x_battery_pm_ops,
},
- .probe_new = ds278x_battery_probe,
+ .probe = ds278x_battery_probe,
.remove = ds278x_battery_remove,
.id_table = ds278x_id,
};
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index 00221e9c0bfc..82263646ddc6 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -625,7 +625,7 @@ static const struct of_device_id ip5xxx_power_of_match[] = {
MODULE_DEVICE_TABLE(of, ip5xxx_power_of_match);
static struct i2c_driver ip5xxx_power_driver = {
- .probe_new = ip5xxx_power_probe,
+ .probe = ip5xxx_power_probe,
.driver = {
.name = "ip5xxx-power",
.of_match_table = ip5xxx_power_of_match,
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index dc42d354b892..0875391f7ac6 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -615,7 +615,7 @@ static struct i2c_driver lp8727_driver = {
.name = "lp8727",
.of_match_table = of_match_ptr(lp8727_dt_ids),
},
- .probe_new = lp8727_probe,
+ .probe = lp8727_probe,
.remove = lp8727_remove,
.id_table = lp8727_ids,
};
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index d3fb42825983..cad39a8f829d 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -635,7 +635,7 @@ static struct i2c_driver ltc294x_driver = {
.of_match_table = ltc294x_i2c_of_match,
.pm = LTC294X_PM_OPS,
},
- .probe_new = ltc294x_i2c_probe,
+ .probe = ltc294x_i2c_probe,
.shutdown = ltc294x_i2c_shutdown,
.id_table = ltc294x_i2c_id,
};
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index 285580845e2f..ec5b4a20ad43 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -915,7 +915,7 @@ static const struct of_device_id ltc4162l_of_match[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, ltc4162l_of_match);
static struct i2c_driver ltc4162l_driver = {
- .probe_new = ltc4162l_probe,
+ .probe = ltc4162l_probe,
.alert = ltc4162l_alert,
.id_table = ltc4162l_i2c_id_table,
.driver = {
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index 0d0180fcfa63..fbdf12cf64eb 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -316,7 +316,7 @@ static struct i2c_driver max14656_i2c_driver = {
.name = "max14656",
.of_match_table = max14656_match_table,
},
- .probe_new = max14656_probe,
+ .probe = max14656_probe,
.id_table = max14656_id,
};
module_i2c_driver(max14656_i2c_driver);
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index d1075959dd46..22ea7de47a53 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -599,7 +599,7 @@ static struct i2c_driver max17040_i2c_driver = {
.of_match_table = max17040_of_match,
.pm = MAX17040_PM_OPS,
},
- .probe_new = max17040_probe,
+ .probe = max17040_probe,
.id_table = max17040_id,
};
module_i2c_driver(max17040_i2c_driver);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 89cabe8ed3b0..17ac2ab78c4e 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -499,11 +499,6 @@ static int max17042_property_is_writeable(struct power_supply *psy,
return ret;
}
-static void max17042_external_power_changed(struct power_supply *psy)
-{
- power_supply_changed(psy);
-}
-
static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value)
{
int retries = 8;
@@ -1016,7 +1011,7 @@ static const struct power_supply_desc max17042_psy_desc = {
.get_property = max17042_get_property,
.set_property = max17042_set_property,
.property_is_writeable = max17042_property_is_writeable,
- .external_power_changed = max17042_external_power_changed,
+ .external_power_changed = power_supply_changed,
.properties = max17042_battery_props,
.num_properties = ARRAY_SIZE(max17042_battery_props),
};
@@ -1220,7 +1215,7 @@ static struct i2c_driver max17042_i2c_driver = {
.of_match_table = of_match_ptr(max17042_dt_match),
.pm = &max17042_pm_ops,
},
- .probe_new = max17042_probe,
+ .probe = max17042_probe,
.id_table = max17042_id,
};
module_i2c_driver(max17042_i2c_driver);
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 4fed74511931..99659dc8f5a6 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -499,7 +499,7 @@ static struct i2c_driver max77976_driver = {
.name = MAX77976_DRIVER_NAME,
.of_match_table = max77976_of_id,
},
- .probe_new = max77976_probe,
+ .probe = max77976_probe,
.id_table = max77976_i2c_id,
};
module_i2c_driver(max77976_driver);
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index a48aa4afb828..c97893d4c25e 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -293,7 +293,7 @@ static const struct hwmon_ops power_supply_hwmon_ops = {
.read_string = power_supply_hwmon_read_string,
};
-static const struct hwmon_channel_info *power_supply_hwmon_info[] = {
+static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp,
HWMON_T_LABEL |
HWMON_T_INPUT |
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 0674483279d7..c7db29d5fcb8 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -22,8 +22,6 @@
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
- unsigned long delay_on = 0;
- unsigned long delay_off = 0;
if (power_supply_get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
return;
@@ -43,8 +41,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
- led_trigger_blink(psy->charging_blink_full_solid_trig,
- &delay_on, &delay_off);
+ led_trigger_blink(psy->charging_blink_full_solid_trig, 0, 0);
break;
default:
led_trigger_event(psy->charging_full_trig, LED_OFF);
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
new file mode 100644
index 000000000000..d16c5ee17249
--- /dev/null
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -0,0 +1,1059 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ *
+ * This driver is for the switch-mode battery charger and boost
+ * hardware found in pmi8998 and related PMICs.
+ */
+
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/iio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/of.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* clang-format off */
+#define BATTERY_CHARGER_STATUS_1 0x06
+#define BVR_INITIAL_RAMP_BIT BIT(7)
+#define CC_SOFT_TERMINATE_BIT BIT(6)
+#define STEP_CHARGING_STATUS_SHIFT 3
+#define STEP_CHARGING_STATUS_MASK GENMASK(5, 3)
+#define BATTERY_CHARGER_STATUS_MASK GENMASK(2, 0)
+
+#define BATTERY_CHARGER_STATUS_2 0x07
+#define INPUT_CURRENT_LIMITED_BIT BIT(7)
+#define CHARGER_ERROR_STATUS_SFT_EXPIRE_BIT BIT(6)
+#define CHARGER_ERROR_STATUS_BAT_OV_BIT BIT(5)
+#define CHARGER_ERROR_STATUS_BAT_TERM_MISSING_BIT BIT(4)
+#define BAT_TEMP_STATUS_MASK GENMASK(3, 0)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK GENMASK(3, 2)
+#define BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT BIT(3)
+#define BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT BIT(2)
+#define BAT_TEMP_STATUS_TOO_HOT_BIT BIT(1)
+#define BAT_TEMP_STATUS_TOO_COLD_BIT BIT(0)
+
+#define BATTERY_CHARGER_STATUS_4 0x0A
+#define CHARGE_CURRENT_POST_JEITA_MASK GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_7 0x0D
+#define ENABLE_TRICKLE_BIT BIT(7)
+#define ENABLE_PRE_CHARGING_BIT BIT(6)
+#define ENABLE_FAST_CHARGING_BIT BIT(5)
+#define ENABLE_FULLON_MODE_BIT BIT(4)
+#define TOO_COLD_ADC_BIT BIT(3)
+#define TOO_HOT_ADC_BIT BIT(2)
+#define HOT_SL_ADC_BIT BIT(1)
+#define COLD_SL_ADC_BIT BIT(0)
+
+#define CHARGING_ENABLE_CMD 0x42
+#define CHARGING_ENABLE_CMD_BIT BIT(0)
+
+#define CHGR_CFG2 0x51
+#define CHG_EN_SRC_BIT BIT(7)
+#define CHG_EN_POLARITY_BIT BIT(6)
+#define PRETOFAST_TRANSITION_CFG_BIT BIT(5)
+#define BAT_OV_ECC_BIT BIT(4)
+#define I_TERM_BIT BIT(3)
+#define AUTO_RECHG_BIT BIT(2)
+#define EN_ANALOG_DROP_IN_VBATT_BIT BIT(1)
+#define CHARGER_INHIBIT_BIT BIT(0)
+
+#define PRE_CHARGE_CURRENT_CFG 0x60
+#define PRE_CHARGE_CURRENT_SETTING_MASK GENMASK(5, 0)
+
+#define FAST_CHARGE_CURRENT_CFG 0x61
+#define FAST_CHARGE_CURRENT_SETTING_MASK GENMASK(7, 0)
+
+#define FLOAT_VOLTAGE_CFG 0x70
+#define FLOAT_VOLTAGE_SETTING_MASK GENMASK(7, 0)
+
+#define FG_UPDATE_CFG_2_SEL 0x7D
+#define SOC_LT_OTG_THRESH_SEL_BIT BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_SEL_BIT BIT(0)
+
+#define JEITA_EN_CFG 0x90
+#define JEITA_EN_HARDLIMIT_BIT BIT(4)
+#define JEITA_EN_HOT_SL_FCV_BIT BIT(3)
+#define JEITA_EN_COLD_SL_FCV_BIT BIT(2)
+#define JEITA_EN_HOT_SL_CCC_BIT BIT(1)
+#define JEITA_EN_COLD_SL_CCC_BIT BIT(0)
+
+#define INT_RT_STS 0x310
+#define TYPE_C_CHANGE_RT_STS_BIT BIT(7)
+#define USBIN_ICL_CHANGE_RT_STS_BIT BIT(6)
+#define USBIN_SOURCE_CHANGE_RT_STS_BIT BIT(5)
+#define USBIN_PLUGIN_RT_STS_BIT BIT(4)
+#define USBIN_OV_RT_STS_BIT BIT(3)
+#define USBIN_UV_RT_STS_BIT BIT(2)
+#define USBIN_LT_3P6V_RT_STS_BIT BIT(1)
+#define USBIN_COLLAPSE_RT_STS_BIT BIT(0)
+
+#define OTG_CFG 0x153
+#define OTG_RESERVED_MASK GENMASK(7, 6)
+#define DIS_OTG_ON_TLIM_BIT BIT(5)
+#define QUICKSTART_OTG_FASTROLESWAP_BIT BIT(4)
+#define INCREASE_DFP_TIME_BIT BIT(3)
+#define ENABLE_OTG_IN_DEBUG_MODE_BIT BIT(2)
+#define OTG_EN_SRC_CFG_BIT BIT(1)
+#define CONCURRENT_MODE_CFG_BIT BIT(0)
+
+#define OTG_ENG_OTG_CFG 0x1C0
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0)
+
+#define APSD_STATUS 0x307
+#define APSD_STATUS_7_BIT BIT(7)
+#define HVDCP_CHECK_TIMEOUT_BIT BIT(6)
+#define SLOW_PLUGIN_TIMEOUT_BIT BIT(5)
+#define ENUMERATION_DONE_BIT BIT(4)
+#define VADP_CHANGE_DONE_AFTER_AUTH_BIT BIT(3)
+#define QC_AUTH_DONE_STATUS_BIT BIT(2)
+#define QC_CHARGER_BIT BIT(1)
+#define APSD_DTC_STATUS_DONE_BIT BIT(0)
+
+#define APSD_RESULT_STATUS 0x308
+#define ICL_OVERRIDE_LATCH_BIT BIT(7)
+#define APSD_RESULT_STATUS_MASK GENMASK(6, 0)
+#define QC_3P0_BIT BIT(6)
+#define QC_2P0_BIT BIT(5)
+#define FLOAT_CHARGER_BIT BIT(4)
+#define DCP_CHARGER_BIT BIT(3)
+#define CDP_CHARGER_BIT BIT(2)
+#define OCP_CHARGER_BIT BIT(1)
+#define SDP_CHARGER_BIT BIT(0)
+
+#define TYPE_C_STATUS_1 0x30B
+#define UFP_TYPEC_MASK GENMASK(7, 5)
+#define UFP_TYPEC_RDSTD_BIT BIT(7)
+#define UFP_TYPEC_RD1P5_BIT BIT(6)
+#define UFP_TYPEC_RD3P0_BIT BIT(5)
+#define UFP_TYPEC_FMB_255K_BIT BIT(4)
+#define UFP_TYPEC_FMB_301K_BIT BIT(3)
+#define UFP_TYPEC_FMB_523K_BIT BIT(2)
+#define UFP_TYPEC_FMB_619K_BIT BIT(1)
+#define UFP_TYPEC_OPEN_OPEN_BIT BIT(0)
+
+#define TYPE_C_STATUS_2 0x30C
+#define DFP_RA_OPEN_BIT BIT(7)
+#define TIMER_STAGE_BIT BIT(6)
+#define EXIT_UFP_MODE_BIT BIT(5)
+#define EXIT_DFP_MODE_BIT BIT(4)
+#define DFP_TYPEC_MASK GENMASK(3, 0)
+#define DFP_RD_OPEN_BIT BIT(3)
+#define DFP_RD_RA_VCONN_BIT BIT(2)
+#define DFP_RD_RD_BIT BIT(1)
+#define DFP_RA_RA_BIT BIT(0)
+
+#define TYPE_C_STATUS_3 0x30D
+#define ENABLE_BANDGAP_BIT BIT(7)
+#define U_USB_GND_NOVBUS_BIT BIT(6)
+#define U_USB_FLOAT_NOVBUS_BIT BIT(5)
+#define U_USB_GND_BIT BIT(4)
+#define U_USB_FMB1_BIT BIT(3)
+#define U_USB_FLOAT1_BIT BIT(2)
+#define U_USB_FMB2_BIT BIT(1)
+#define U_USB_FLOAT2_BIT BIT(0)
+
+#define TYPE_C_STATUS_4 0x30E
+#define UFP_DFP_MODE_STATUS_BIT BIT(7)
+#define TYPEC_VBUS_STATUS_BIT BIT(6)
+#define TYPEC_VBUS_ERROR_STATUS_BIT BIT(5)
+#define TYPEC_DEBOUNCE_DONE_STATUS_BIT BIT(4)
+#define TYPEC_UFP_AUDIO_ADAPT_STATUS_BIT BIT(3)
+#define TYPEC_VCONN_OVERCURR_STATUS_BIT BIT(2)
+#define CC_ORIENTATION_BIT BIT(1)
+#define CC_ATTACHED_BIT BIT(0)
+
+#define TYPE_C_STATUS_5 0x30F
+#define TRY_SOURCE_FAILED_BIT BIT(6)
+#define TRY_SINK_FAILED_BIT BIT(5)
+#define TIMER_STAGE_2_BIT BIT(4)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT BIT(3)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT BIT(2)
+#define TYPEC_TRYSOURCE_DETECT_STATUS_BIT BIT(1)
+#define TYPEC_TRYSINK_DETECT_STATUS_BIT BIT(0)
+
+#define CMD_APSD 0x341
+#define ICL_OVERRIDE_BIT BIT(1)
+#define APSD_RERUN_BIT BIT(0)
+
+#define TYPE_C_CFG 0x358
+#define APSD_START_ON_CC_BIT BIT(7)
+#define WAIT_FOR_APSD_BIT BIT(6)
+#define FACTORY_MODE_DETECTION_EN_BIT BIT(5)
+#define FACTORY_MODE_ICL_3A_4A_BIT BIT(4)
+#define FACTORY_MODE_DIS_CHGING_CFG_BIT BIT(3)
+#define SUSPEND_NON_COMPLIANT_CFG_BIT BIT(2)
+#define VCONN_OC_CFG_BIT BIT(1)
+#define TYPE_C_OR_U_USB_BIT BIT(0)
+
+#define TYPE_C_CFG_2 0x359
+#define TYPE_C_DFP_CURRSRC_MODE_BIT BIT(7)
+#define DFP_CC_1P4V_OR_1P6V_BIT BIT(6)
+#define VCONN_SOFTSTART_CFG_MASK GENMASK(5, 4)
+#define EN_TRY_SOURCE_MODE_BIT BIT(3)
+#define USB_FACTORY_MODE_ENABLE_BIT BIT(2)
+#define TYPE_C_UFP_MODE_BIT BIT(1)
+#define EN_80UA_180UA_CUR_SOURCE_BIT BIT(0)
+
+#define TYPE_C_CFG_3 0x35A
+#define TVBUS_DEBOUNCE_BIT BIT(7)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT BIT(6)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_B BIT(5)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT BIT(4)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT BIT(3)
+#define EN_TRYSINK_MODE_BIT BIT(2)
+#define EN_LEGACY_CABLE_DETECTION_BIT BIT(1)
+#define ALLOW_PD_DRING_UFP_TCCDB_BIT BIT(0)
+
+#define USBIN_OPTIONS_1_CFG 0x362
+#define CABLE_R_SEL_BIT BIT(7)
+#define HVDCP_AUTH_ALG_EN_CFG_BIT BIT(6)
+#define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT BIT(5)
+#define INPUT_PRIORITY_BIT BIT(4)
+#define AUTO_SRC_DETECT_BIT BIT(3)
+#define HVDCP_EN_BIT BIT(2)
+#define VADP_INCREMENT_VOLTAGE_LIMIT_BIT BIT(1)
+#define VADP_TAPER_TIMER_EN_BIT BIT(0)
+
+#define USBIN_OPTIONS_2_CFG 0x363
+#define WIPWR_RST_EUD_CFG_BIT BIT(7)
+#define SWITCHER_START_CFG_BIT BIT(6)
+#define DCD_TIMEOUT_SEL_BIT BIT(5)
+#define OCD_CURRENT_SEL_BIT BIT(4)
+#define SLOW_PLUGIN_TIMER_EN_CFG_BIT BIT(3)
+#define FLOAT_OPTIONS_MASK GENMASK(2, 0)
+#define FLOAT_DIS_CHGING_CFG_BIT BIT(2)
+#define SUSPEND_FLOAT_CFG_BIT BIT(1)
+#define FORCE_FLOAT_SDP_CFG_BIT BIT(0)
+
+#define TAPER_TIMER_SEL_CFG 0x364
+#define TYPEC_SPARE_CFG_BIT BIT(7)
+#define TYPEC_DRP_DFP_TIME_CFG_BIT BIT(5)
+#define TAPER_TIMER_SEL_MASK GENMASK(1, 0)
+
+#define USBIN_LOAD_CFG 0x365
+#define USBIN_OV_CH_LOAD_OPTION_BIT BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT BIT(4)
+
+#define USBIN_ICL_OPTIONS 0x366
+#define CFG_USB3P0_SEL_BIT BIT(2)
+#define USB51_MODE_BIT BIT(1)
+#define USBIN_MODE_CHG_BIT BIT(0)
+
+#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL 0x368
+#define EXIT_SNK_BASED_ON_CC_BIT BIT(7)
+#define VCONN_EN_ORIENTATION_BIT BIT(6)
+#define TYPEC_VCONN_OVERCURR_INT_EN_BIT BIT(5)
+#define VCONN_EN_SRC_BIT BIT(4)
+#define VCONN_EN_VALUE_BIT BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define UFP_EN_CMD_BIT BIT(2)
+#define DFP_EN_CMD_BIT BIT(1)
+#define TYPEC_DISABLE_CMD_BIT BIT(0)
+
+#define USBIN_CURRENT_LIMIT_CFG 0x370
+#define USBIN_CURRENT_LIMIT_MASK GENMASK(7, 0)
+
+#define USBIN_AICL_OPTIONS_CFG 0x380
+#define SUSPEND_ON_COLLAPSE_USBIN_BIT BIT(7)
+#define USBIN_AICL_HDC_EN_BIT BIT(6)
+#define USBIN_AICL_START_AT_MAX_BIT BIT(5)
+#define USBIN_AICL_RERUN_EN_BIT BIT(4)
+#define USBIN_AICL_ADC_EN_BIT BIT(3)
+#define USBIN_AICL_EN_BIT BIT(2)
+#define USBIN_HV_COLLAPSE_RESPONSE_BIT BIT(1)
+#define USBIN_LV_COLLAPSE_RESPONSE_BIT BIT(0)
+
+#define USBIN_5V_AICL_THRESHOLD_CFG 0x381
+#define USBIN_5V_AICL_THRESHOLD_CFG_MASK GENMASK(2, 0)
+
+#define USBIN_CONT_AICL_THRESHOLD_CFG 0x384
+#define USBIN_CONT_AICL_THRESHOLD_CFG_MASK GENMASK(5, 0)
+
+#define DC_ENG_SSUPPLY_CFG2 0x4C1
+#define ENG_SSUPPLY_IVREF_OTG_SS_MASK GENMASK(2, 0)
+#define OTG_SS_SLOW 0x3
+
+#define DCIN_AICL_REF_SEL_CFG 0x481
+#define DCIN_CONT_AICL_THRESHOLD_CFG_MASK GENMASK(5, 0)
+
+#define WI_PWR_OPTIONS 0x495
+#define CHG_OK_BIT BIT(7)
+#define WIPWR_UVLO_IRQ_OPT_BIT BIT(6)
+#define BUCK_HOLDOFF_ENABLE_BIT BIT(5)
+#define CHG_OK_HW_SW_SELECT_BIT BIT(4)
+#define WIPWR_RST_ENABLE_BIT BIT(3)
+#define DCIN_WIPWR_IRQ_SELECT_BIT BIT(2)
+#define AICL_SWITCH_ENABLE_BIT BIT(1)
+#define ZIN_ICL_ENABLE_BIT BIT(0)
+
+#define ICL_STATUS 0x607
+#define INPUT_CURRENT_LIMIT_MASK GENMASK(7, 0)
+
+#define POWER_PATH_STATUS 0x60B
+#define P_PATH_INPUT_SS_DONE_BIT BIT(7)
+#define P_PATH_USBIN_SUSPEND_STS_BIT BIT(6)
+#define P_PATH_DCIN_SUSPEND_STS_BIT BIT(5)
+#define P_PATH_USE_USBIN_BIT BIT(4)
+#define P_PATH_USE_DCIN_BIT BIT(3)
+#define P_PATH_POWER_PATH_MASK GENMASK(2, 1)
+#define P_PATH_VALID_INPUT_POWER_SOURCE_STS_BIT BIT(0)
+
+#define BARK_BITE_WDOG_PET 0x643
+#define BARK_BITE_WDOG_PET_BIT BIT(0)
+
+#define WD_CFG 0x651
+#define WATCHDOG_TRIGGER_AFP_EN_BIT BIT(7)
+#define BARK_WDOG_INT_EN_BIT BIT(6)
+#define BITE_WDOG_INT_EN_BIT BIT(5)
+#define SFT_AFTER_WDOG_IRQ_MASK GENMASK(4, 3)
+#define WDOG_IRQ_SFT_BIT BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT BIT(1)
+#define WDOG_TIMER_EN_BIT BIT(0)
+
+#define SNARL_BARK_BITE_WD_CFG 0x653
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK GENMASK(1, 0)
+
+#define AICL_RERUN_TIME_CFG 0x661
+#define AICL_RERUN_TIME_MASK GENMASK(1, 0)
+
+#define STAT_CFG 0x690
+#define STAT_SW_OVERRIDE_VALUE_BIT BIT(7)
+#define STAT_SW_OVERRIDE_CFG_BIT BIT(6)
+#define STAT_PARALLEL_OFF_DG_CFG_MASK GENMASK(5, 4)
+#define STAT_POLARITY_CFG_BIT BIT(3)
+#define STAT_PARALLEL_CFG_BIT BIT(2)
+#define STAT_FUNCTION_CFG_BIT BIT(1)
+#define STAT_IRQ_PULSING_EN_BIT BIT(0)
+
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define CURRENT_MAX_UA DCP_CURRENT_UA
+
+/* pmi8998 registers represent current in increments of 1/40th of an amp */
+#define CURRENT_SCALE_FACTOR 25000
+/* clang-format on */
+
+enum charger_status {
+ TRICKLE_CHARGE = 0,
+ PRE_CHARGE,
+ FAST_CHARGE,
+ FULLON_CHARGE,
+ TAPER_CHARGE,
+ TERMINATE_CHARGE,
+ INHIBIT_CHARGE,
+ DISABLE_CHARGE,
+};
+
+struct smb2_register {
+ u16 addr;
+ u8 mask;
+ u8 val;
+};
+
+/**
+ * struct smb2_chip - smb2 chip structure
+ * @dev: Device reference for power_supply
+ * @name: The platform device name
+ * @base: Base address for smb2 registers
+ * @regmap: Register map
+ * @batt_info: Battery data from DT
+ * @status_change_work: Worker to handle plug/unplug events
+ * @cable_irq: USB plugin IRQ
+ * @wakeup_enabled: If the cable IRQ will cause a wakeup
+ * @usb_in_i_chan: USB_IN current measurement channel
+ * @usb_in_v_chan: USB_IN voltage measurement channel
+ * @chg_psy: Charger power supply instance
+ */
+struct smb2_chip {
+ struct device *dev;
+ const char *name;
+ unsigned int base;
+ struct regmap *regmap;
+ struct power_supply_battery_info *batt_info;
+
+ struct delayed_work status_change_work;
+ int cable_irq;
+ bool wakeup_enabled;
+
+ struct iio_channel *usb_in_i_chan;
+ struct iio_channel *usb_in_v_chan;
+
+ struct power_supply *chg_psy;
+};
+
+static enum power_supply_property smb2_properties[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+};
+
+static enum power_supply_usb_type smb2_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+};
+
+static int smb2_get_prop_usb_online(struct smb2_chip *chip, int *val)
+{
+ unsigned int stat;
+ int rc;
+
+ rc = regmap_read(chip->regmap, chip->base + POWER_PATH_STATUS, &stat);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read power path status: %d\n", rc);
+ return rc;
+ }
+
+ *val = (stat & P_PATH_USE_USBIN_BIT) &&
+ (stat & P_PATH_VALID_INPUT_POWER_SOURCE_STS_BIT);
+ return 0;
+}
+
+/*
+ * Qualcomm "automatic power source detection" aka APSD
+ * tells us what type of charger we're connected to.
+ */
+static int smb2_apsd_get_charger_type(struct smb2_chip *chip, int *val)
+{
+ unsigned int apsd_stat, stat;
+ int usb_online = 0;
+ int rc;
+
+ rc = smb2_get_prop_usb_online(chip, &usb_online);
+ if (!usb_online) {
+ *val = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ return rc;
+ }
+
+ rc = regmap_read(chip->regmap, chip->base + APSD_STATUS, &apsd_stat);
+ if (rc < 0) {
+ dev_err(chip->dev, "Failed to read apsd status, rc = %d", rc);
+ return rc;
+ }
+ if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT)) {
+ dev_dbg(chip->dev, "Apsd not ready");
+ return -EAGAIN;
+ }
+
+ rc = regmap_read(chip->regmap, chip->base + APSD_RESULT_STATUS, &stat);
+ if (rc < 0) {
+ dev_err(chip->dev, "Failed to read apsd result, rc = %d", rc);
+ return rc;
+ }
+
+ stat &= APSD_RESULT_STATUS_MASK;
+
+ if (stat & CDP_CHARGER_BIT)
+ *val = POWER_SUPPLY_USB_TYPE_CDP;
+ else if (stat & (DCP_CHARGER_BIT | OCP_CHARGER_BIT | FLOAT_CHARGER_BIT))
+ *val = POWER_SUPPLY_USB_TYPE_DCP;
+ else /* SDP_CHARGER_BIT (or others) */
+ *val = POWER_SUPPLY_USB_TYPE_SDP;
+
+ return 0;
+}
+
+static int smb2_get_prop_status(struct smb2_chip *chip, int *val)
+{
+ unsigned char stat[2];
+ int usb_online = 0;
+ int rc;
+
+ rc = smb2_get_prop_usb_online(chip, &usb_online);
+ if (!usb_online) {
+ *val = POWER_SUPPLY_STATUS_DISCHARGING;
+ return rc;
+ }
+
+ rc = regmap_bulk_read(chip->regmap,
+ chip->base + BATTERY_CHARGER_STATUS_1, &stat, 2);
+ if (rc < 0) {
+ dev_err(chip->dev, "Failed to read charging status ret=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (stat[1] & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+ }
+
+ stat[0] = stat[0] & BATTERY_CHARGER_STATUS_MASK;
+
+ switch (stat[0]) {
+ case TRICKLE_CHARGE:
+ case PRE_CHARGE:
+ case FAST_CHARGE:
+ case FULLON_CHARGE:
+ case TAPER_CHARGE:
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ return rc;
+ case DISABLE_CHARGE:
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return rc;
+ case TERMINATE_CHARGE:
+ *val = POWER_SUPPLY_STATUS_FULL;
+ return rc;
+ case INHIBIT_CHARGE:
+ default:
+ *val = POWER_SUPPLY_STATUS_UNKNOWN;
+ return rc;
+ }
+}
+
+static inline int smb2_get_current_limit(struct smb2_chip *chip,
+ unsigned int *val)
+{
+ int rc = regmap_read(chip->regmap, chip->base + ICL_STATUS, val);
+
+ if (rc >= 0)
+ *val *= CURRENT_SCALE_FACTOR;
+ return rc;
+}
+
+static int smb2_set_current_limit(struct smb2_chip *chip, unsigned int val)
+{
+ unsigned char val_raw;
+
+ if (val > 4800000) {
+ dev_err(chip->dev,
+ "Can't set current limit higher than 4800000uA");
+ return -EINVAL;
+ }
+ val_raw = val / CURRENT_SCALE_FACTOR;
+
+ return regmap_write(chip->regmap, chip->base + USBIN_CURRENT_LIMIT_CFG,
+ val_raw);
+}
+
+static void smb2_status_change_work(struct work_struct *work)
+{
+ unsigned int charger_type, current_ua;
+ int usb_online, count, rc;
+ struct smb2_chip *chip;
+
+ chip = container_of(work, struct smb2_chip, status_change_work.work);
+
+ smb2_get_prop_usb_online(chip, &usb_online);
+ if (!usb_online)
+ return;
+
+ for (count = 0; count < 3; count++) {
+ dev_dbg(chip->dev, "get charger type retry %d\n", count);
+ rc = smb2_apsd_get_charger_type(chip, &charger_type);
+ if (rc != -EAGAIN)
+ break;
+ msleep(100);
+ }
+
+ if (rc < 0 && rc != -EAGAIN) {
+ dev_err(chip->dev, "get charger type failed: %d\n", rc);
+ return;
+ }
+
+ if (rc < 0) {
+ rc = regmap_update_bits(chip->regmap, chip->base + CMD_APSD,
+ APSD_RERUN_BIT, APSD_RERUN_BIT);
+ schedule_delayed_work(&chip->status_change_work,
+ msecs_to_jiffies(1000));
+ dev_dbg(chip->dev, "get charger type failed, rerun apsd\n");
+ return;
+ }
+
+ switch (charger_type) {
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ current_ua = CDP_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ current_ua = DCP_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_USB_TYPE_SDP:
+ default:
+ current_ua = SDP_CURRENT_UA;
+ break;
+ }
+
+ smb2_set_current_limit(chip, current_ua);
+ power_supply_changed(chip->chg_psy);
+}
+
+static int smb2_get_iio_chan(struct smb2_chip *chip, struct iio_channel *chan,
+ int *val)
+{
+ int rc;
+ union power_supply_propval status;
+
+ rc = power_supply_get_property(chip->chg_psy, POWER_SUPPLY_PROP_STATUS,
+ &status);
+ if (rc < 0 || status.intval != POWER_SUPPLY_STATUS_CHARGING) {
+ *val = 0;
+ return 0;
+ }
+
+ if (IS_ERR(chan)) {
+ dev_err(chip->dev, "Failed to chan, err = %li", PTR_ERR(chan));
+ return PTR_ERR(chan);
+ }
+
+ return iio_read_channel_processed(chan, val);
+}
+
+static int smb2_get_prop_health(struct smb2_chip *chip, int *val)
+{
+ int rc;
+ unsigned int stat;
+
+ rc = regmap_read(chip->regmap, chip->base + BATTERY_CHARGER_STATUS_2,
+ &stat);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read charger status rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (stat) {
+ case CHARGER_ERROR_STATUS_BAT_OV_BIT:
+ *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ return 0;
+ case BAT_TEMP_STATUS_TOO_COLD_BIT:
+ *val = POWER_SUPPLY_HEALTH_COLD;
+ return 0;
+ case BAT_TEMP_STATUS_TOO_HOT_BIT:
+ *val = POWER_SUPPLY_HEALTH_OVERHEAT;
+ return 0;
+ case BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT:
+ *val = POWER_SUPPLY_HEALTH_COOL;
+ return 0;
+ case BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT:
+ *val = POWER_SUPPLY_HEALTH_WARM;
+ return 0;
+ default:
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ }
+}
+
+static int smb2_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb2_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Qualcomm";
+ return 0;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = chip->name;
+ return 0;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return smb2_get_current_limit(chip, &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = DCP_CURRENT_UA;
+ return 0;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return smb2_get_iio_chan(chip, chip->usb_in_i_chan,
+ &val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return smb2_get_iio_chan(chip, chip->usb_in_v_chan,
+ &val->intval);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return smb2_get_prop_usb_online(chip, &val->intval);
+ case POWER_SUPPLY_PROP_STATUS:
+ return smb2_get_prop_status(chip, &val->intval);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return smb2_get_prop_health(chip, &val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return smb2_apsd_get_charger_type(chip, &val->intval);
+ default:
+ dev_err(chip->dev, "invalid property: %d\n", psp);
+ return -EINVAL;
+ }
+}
+
+static int smb2_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb2_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return smb2_set_current_limit(chip, val->intval);
+ default:
+ dev_err(chip->dev, "No setter for property: %d\n", psp);
+ return -EINVAL;
+ }
+}
+
+static int smb2_property_is_writable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static irqreturn_t smb2_handle_batt_overvoltage(int irq, void *data)
+{
+ struct smb2_chip *chip = data;
+ unsigned int status;
+
+ regmap_read(chip->regmap, chip->base + BATTERY_CHARGER_STATUS_2,
+ &status);
+
+ if (status & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+ /* The hardware stops charging automatically */
+ dev_err(chip->dev, "battery overvoltage detected\n");
+ power_supply_changed(chip->chg_psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smb2_handle_usb_plugin(int irq, void *data)
+{
+ struct smb2_chip *chip = data;
+
+ power_supply_changed(chip->chg_psy);
+
+ schedule_delayed_work(&chip->status_change_work,
+ msecs_to_jiffies(1500));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smb2_handle_usb_icl_change(int irq, void *data)
+{
+ struct smb2_chip *chip = data;
+
+ power_supply_changed(chip->chg_psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smb2_handle_wdog_bark(int irq, void *data)
+{
+ struct smb2_chip *chip = data;
+ int rc;
+
+ power_supply_changed(chip->chg_psy);
+
+ rc = regmap_write(chip->regmap, BARK_BITE_WDOG_PET,
+ BARK_BITE_WDOG_PET_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't pet the dog rc=%d\n", rc);
+
+ return IRQ_HANDLED;
+}
+
+static const struct power_supply_desc smb2_psy_desc = {
+ .name = "pmi8998_charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = smb2_usb_types,
+ .num_usb_types = ARRAY_SIZE(smb2_usb_types),
+ .properties = smb2_properties,
+ .num_properties = ARRAY_SIZE(smb2_properties),
+ .get_property = smb2_get_property,
+ .set_property = smb2_set_property,
+ .property_is_writeable = smb2_property_is_writable,
+};
+
+/* Init sequence derived from vendor downstream driver */
+static const struct smb2_register smb2_init_seq[] = {
+ { .addr = AICL_RERUN_TIME_CFG, .mask = AICL_RERUN_TIME_MASK, .val = 0 },
+ /*
+ * By default configure us as an upstream facing port
+ * FIXME: This will be handled by the type-c driver
+ */
+ { .addr = TYPE_C_INTRPT_ENB_SOFTWARE_CTRL,
+ .mask = TYPEC_POWER_ROLE_CMD_MASK | VCONN_EN_SRC_BIT |
+ VCONN_EN_VALUE_BIT,
+ .val = VCONN_EN_SRC_BIT },
+ /*
+ * Disable Type-C factory mode and stay in Attached.SRC state when VCONN
+ * over-current happens
+ */
+ { .addr = TYPE_C_CFG,
+ .mask = FACTORY_MODE_DETECTION_EN_BIT | VCONN_OC_CFG_BIT,
+ .val = 0 },
+ /* Configure VBUS for software control */
+ { .addr = OTG_CFG, .mask = OTG_EN_SRC_CFG_BIT, .val = 0 },
+ /*
+ * Use VBAT to determine the recharge threshold when battery is full
+ * rather than the state of charge.
+ */
+ { .addr = FG_UPDATE_CFG_2_SEL,
+ .mask = SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+ VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+ .val = VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT },
+ /* Enable charging */
+ { .addr = USBIN_OPTIONS_1_CFG, .mask = HVDCP_EN_BIT, .val = 0 },
+ { .addr = CHARGING_ENABLE_CMD,
+ .mask = CHARGING_ENABLE_CMD_BIT,
+ .val = CHARGING_ENABLE_CMD_BIT },
+ /*
+ * Match downstream defaults
+ * CHG_EN_SRC_BIT - charger enable is controlled by software
+ * CHG_EN_POLARITY_BIT - polarity of charge enable pin when in HW control
+ * pulled low on OnePlus 6 and SHIFT6mq
+ * PRETOFAST_TRANSITION_CFG_BIT -
+ * BAT_OV_ECC_BIT -
+ * I_TERM_BIT - Current termination ?? 0 = enabled
+ * AUTO_RECHG_BIT - Enable automatic recharge when battery is full
+ * 0 = enabled
+ * EN_ANALOG_DROP_IN_VBATT_BIT
+ * CHARGER_INHIBIT_BIT - Inhibit charging based on battery voltage
+ * instead of ??
+ */
+ { .addr = CHGR_CFG2,
+ .mask = CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT |
+ PRETOFAST_TRANSITION_CFG_BIT | BAT_OV_ECC_BIT | I_TERM_BIT |
+ AUTO_RECHG_BIT | EN_ANALOG_DROP_IN_VBATT_BIT |
+ CHARGER_INHIBIT_BIT,
+ .val = CHARGER_INHIBIT_BIT },
+ /* STAT pin software override, match downstream. Parallell charging? */
+ { .addr = STAT_CFG,
+ .mask = STAT_SW_OVERRIDE_CFG_BIT,
+ .val = STAT_SW_OVERRIDE_CFG_BIT },
+ /* Set the default SDP charger type to a 500ma USB 2.0 port */
+ { .addr = USBIN_ICL_OPTIONS,
+ .mask = USB51_MODE_BIT | USBIN_MODE_CHG_BIT,
+ .val = USB51_MODE_BIT },
+ /* Disable watchdog */
+ { .addr = SNARL_BARK_BITE_WD_CFG, .mask = 0xff, .val = 0 },
+ { .addr = WD_CFG,
+ .mask = WATCHDOG_TRIGGER_AFP_EN_BIT | WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT,
+ .val = 0 },
+ /* These bits aren't documented anywhere */
+ { .addr = USBIN_5V_AICL_THRESHOLD_CFG,
+ .mask = USBIN_5V_AICL_THRESHOLD_CFG_MASK,
+ .val = 0x3 },
+ { .addr = USBIN_CONT_AICL_THRESHOLD_CFG,
+ .mask = USBIN_CONT_AICL_THRESHOLD_CFG_MASK,
+ .val = 0x3 },
+ /*
+ * Enable Automatic Input Current Limit, this will slowly ramp up the current
+ * When connected to a wall charger, and automatically stop when it detects
+ * the charger current limit (voltage drop?) or it reaches the programmed limit.
+ */
+ { .addr = USBIN_AICL_OPTIONS_CFG,
+ .mask = USBIN_AICL_START_AT_MAX_BIT | USBIN_AICL_ADC_EN_BIT |
+ USBIN_AICL_EN_BIT | SUSPEND_ON_COLLAPSE_USBIN_BIT |
+ USBIN_HV_COLLAPSE_RESPONSE_BIT |
+ USBIN_LV_COLLAPSE_RESPONSE_BIT,
+ .val = USBIN_HV_COLLAPSE_RESPONSE_BIT |
+ USBIN_LV_COLLAPSE_RESPONSE_BIT | USBIN_AICL_EN_BIT },
+ /*
+ * Set pre charge current to default, the OnePlus 6 bootloader
+ * sets this very conservatively.
+ */
+ { .addr = PRE_CHARGE_CURRENT_CFG,
+ .mask = PRE_CHARGE_CURRENT_SETTING_MASK,
+ .val = 500000 / CURRENT_SCALE_FACTOR },
+ /*
+ * This overrides all of the current limit options exposed to userspace
+ * and prevents the device from pulling more than ~1A. This is done
+ * to minimise potential fire hazard risks.
+ */
+ { .addr = FAST_CHARGE_CURRENT_CFG,
+ .mask = FAST_CHARGE_CURRENT_SETTING_MASK,
+ .val = 1000000 / CURRENT_SCALE_FACTOR },
+};
+
+static int smb2_init_hw(struct smb2_chip *chip)
+{
+ int rc, i;
+
+ for (i = 0; i < ARRAY_SIZE(smb2_init_seq); i++) {
+ dev_dbg(chip->dev, "%d: Writing 0x%02x to 0x%02x\n", i,
+ smb2_init_seq[i].val, smb2_init_seq[i].addr);
+ rc = regmap_update_bits(chip->regmap,
+ chip->base + smb2_init_seq[i].addr,
+ smb2_init_seq[i].mask,
+ smb2_init_seq[i].val);
+ if (rc < 0)
+ return dev_err_probe(chip->dev, rc,
+ "%s: init command %d failed\n",
+ __func__, i);
+ }
+
+ return 0;
+}
+
+static int smb2_init_irq(struct smb2_chip *chip, int *irq, const char *name,
+ irqreturn_t (*handler)(int irq, void *data))
+{
+ int irqnum;
+ int rc;
+
+ irqnum = platform_get_irq_byname(to_platform_device(chip->dev), name);
+ if (irqnum < 0)
+ return dev_err_probe(chip->dev, irqnum,
+ "Couldn't get irq %s byname\n", name);
+
+ rc = devm_request_threaded_irq(chip->dev, irqnum, NULL, handler,
+ IRQF_ONESHOT, name, chip);
+ if (rc < 0)
+ return dev_err_probe(chip->dev, rc, "Couldn't request irq %s\n",
+ name);
+
+ if (irq)
+ *irq = irqnum;
+
+ return 0;
+}
+
+static int smb2_probe(struct platform_device *pdev)
+{
+ struct power_supply_config supply_config = {};
+ struct power_supply_desc *desc;
+ struct smb2_chip *chip;
+ int rc, irq;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->name = pdev->name;
+
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap)
+ return dev_err_probe(chip->dev, -ENODEV,
+ "failed to locate the regmap\n");
+
+ rc = device_property_read_u32(chip->dev, "reg", &chip->base);
+ if (rc < 0)
+ return dev_err_probe(chip->dev, rc,
+ "Couldn't read base address\n");
+
+ chip->usb_in_v_chan = devm_iio_channel_get(chip->dev, "usbin_v");
+ if (IS_ERR(chip->usb_in_v_chan))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->usb_in_v_chan),
+ "Couldn't get usbin_v IIO channel\n");
+
+ chip->usb_in_i_chan = devm_iio_channel_get(chip->dev, "usbin_i");
+ if (IS_ERR(chip->usb_in_i_chan)) {
+ return dev_err_probe(chip->dev, PTR_ERR(chip->usb_in_i_chan),
+ "Couldn't get usbin_i IIO channel\n");
+ }
+
+ rc = smb2_init_hw(chip);
+ if (rc < 0)
+ return rc;
+
+ supply_config.drv_data = chip;
+ supply_config.of_node = pdev->dev.of_node;
+
+ desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
+ memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
+ desc->name =
+ devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
+ (const char *)device_get_match_data(chip->dev));
+
+ chip->chg_psy =
+ devm_power_supply_register(chip->dev, desc, &supply_config);
+ if (IS_ERR(chip->chg_psy))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->chg_psy),
+ "failed to register power supply\n");
+
+ rc = power_supply_get_battery_info(chip->chg_psy, &chip->batt_info);
+ if (rc)
+ return dev_err_probe(chip->dev, rc,
+ "Failed to get battery info\n");
+
+ rc = devm_delayed_work_autocancel(chip->dev, &chip->status_change_work,
+ smb2_status_change_work);
+ if (rc)
+ return dev_err_probe(chip->dev, rc,
+ "Failed to init status change work\n");
+
+ rc = (chip->batt_info->voltage_max_design_uv - 3487500) / 7500 + 1;
+ rc = regmap_update_bits(chip->regmap, chip->base + FLOAT_VOLTAGE_CFG,
+ FLOAT_VOLTAGE_SETTING_MASK, rc);
+ if (rc < 0)
+ return dev_err_probe(chip->dev, rc, "Couldn't set vbat max\n");
+
+ rc = smb2_init_irq(chip, &irq, "bat-ov", smb2_handle_batt_overvoltage);
+ if (rc < 0)
+ return rc;
+
+ rc = smb2_init_irq(chip, &chip->cable_irq, "usb-plugin",
+ smb2_handle_usb_plugin);
+ if (rc < 0)
+ return rc;
+
+ rc = smb2_init_irq(chip, &irq, "usbin-icl-change",
+ smb2_handle_usb_icl_change);
+ if (rc < 0)
+ return rc;
+ rc = smb2_init_irq(chip, &irq, "wdog-bark", smb2_handle_wdog_bark);
+ if (rc < 0)
+ return rc;
+
+ rc = dev_pm_set_wake_irq(chip->dev, chip->cable_irq);
+ if (rc < 0)
+ return dev_err_probe(chip->dev, rc, "Couldn't set wake irq\n");
+
+ platform_set_drvdata(pdev, chip);
+
+ /* Initialise charger state */
+ schedule_delayed_work(&chip->status_change_work, 0);
+
+ return 0;
+}
+
+static const struct of_device_id smb2_match_id_table[] = {
+ { .compatible = "qcom,pmi8998-charger", .data = "pmi8998" },
+ { .compatible = "qcom,pm660-charger", .data = "pm660" },
+ { /* sentinal */ }
+};
+MODULE_DEVICE_TABLE(of, smb2_match_id_table);
+
+static struct platform_driver qcom_spmi_smb2 = {
+ .probe = smb2_probe,
+ .driver = {
+ .name = "qcom-pmi8998/pm660-charger",
+ .of_match_table = smb2_match_id_table,
+ },
+};
+
+module_platform_driver(qcom_spmi_smb2);
+
+MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 1a2143641e66..8328bcea1a29 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1134,7 +1134,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
&bat_info);
if (ret) {
return dev_err_probe(dev, ret,
- "Unable to get battery info: %d\n", ret);
+ "Unable to get battery info\n");
}
if ((bat_info->charge_full_design_uah <= 0) ||
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index 5c04cf305219..d90b96df8e73 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -6,11 +6,33 @@
* Author: Beomho Seo <beomho.seo@samsung.com>
*/
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/regmap.h>
#include <linux/mfd/rt5033-private.h>
-#include <linux/mfd/rt5033.h>
+
+struct rt5033_battery {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct power_supply *psy;
+};
+
+static int rt5033_battery_get_status(struct i2c_client *client)
+{
+ struct rt5033_battery *battery = i2c_get_clientdata(client);
+ union power_supply_propval val;
+ int ret;
+
+ ret = power_supply_get_property_from_supplier(battery->psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &val);
+ if (ret)
+ val.intval = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ return val.intval;
+}
static int rt5033_battery_get_capacity(struct i2c_client *client)
{
@@ -84,6 +106,9 @@ static int rt5033_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = rt5033_battery_get_capacity(battery->client);
break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = rt5033_battery_get_status(battery->client);
+ break;
default:
return -EINVAL;
}
@@ -96,6 +121,7 @@ static enum power_supply_property rt5033_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_STATUS,
};
static const struct regmap_config rt5033_battery_regmap_config = {
@@ -117,7 +143,6 @@ static int rt5033_battery_probe(struct i2c_client *client)
struct i2c_adapter *adapter = client->adapter;
struct power_supply_config psy_cfg = {};
struct rt5033_battery *battery;
- u32 ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
@@ -135,15 +160,14 @@ static int rt5033_battery_probe(struct i2c_client *client)
}
i2c_set_clientdata(client, battery);
+ psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = battery;
battery->psy = power_supply_register(&client->dev,
&rt5033_battery_desc, &psy_cfg);
- if (IS_ERR(battery->psy)) {
- dev_err(&client->dev, "Failed to register power supply\n");
- ret = PTR_ERR(battery->psy);
- return ret;
- }
+ if (IS_ERR(battery->psy))
+ return dev_err_probe(&client->dev, PTR_ERR(battery->psy),
+ "Failed to register power supply\n");
return 0;
}
@@ -172,7 +196,7 @@ static struct i2c_driver rt5033_battery_driver = {
.name = "rt5033-battery",
.of_match_table = rt5033_battery_of_match,
},
- .probe_new = rt5033_battery_probe,
+ .probe = rt5033_battery_probe,
.remove = rt5033_battery_remove,
.id_table = rt5033_battery_id,
};
diff --git a/drivers/power/supply/rt5033_charger.c b/drivers/power/supply/rt5033_charger.c
new file mode 100644
index 000000000000..5218dfbf5e1b
--- /dev/null
+++ b/drivers/power/supply/rt5033_charger.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Battery charger driver for RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/mfd/rt5033-private.h>
+
+struct rt5033_charger_data {
+ unsigned int pre_uamp;
+ unsigned int pre_uvolt;
+ unsigned int const_uvolt;
+ unsigned int eoc_uamp;
+ unsigned int fast_uamp;
+};
+
+struct rt5033_charger {
+ struct device *dev;
+ struct regmap *regmap;
+ struct power_supply *psy;
+ struct rt5033_charger_data *chg;
+};
+
+static int rt5033_get_charger_state(struct rt5033_charger *charger)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int reg_data;
+ int state;
+
+ if (!regmap)
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+
+ regmap_read(regmap, RT5033_REG_CHG_STAT, &reg_data);
+
+ switch (reg_data & RT5033_CHG_STAT_MASK) {
+ case RT5033_CHG_STAT_DISCHARGING:
+ state = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case RT5033_CHG_STAT_CHARGING:
+ state = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case RT5033_CHG_STAT_FULL:
+ state = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case RT5033_CHG_STAT_NOT_CHARGING:
+ state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ state = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ return state;
+}
+
+static int rt5033_get_charger_type(struct rt5033_charger *charger)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int reg_data;
+ int state;
+
+ regmap_read(regmap, RT5033_REG_CHG_STAT, &reg_data);
+
+ switch (reg_data & RT5033_CHG_STAT_TYPE_MASK) {
+ case RT5033_CHG_STAT_TYPE_FAST:
+ state = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case RT5033_CHG_STAT_TYPE_PRE:
+ state = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ default:
+ state = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ }
+
+ return state;
+}
+
+static int rt5033_get_charger_current_limit(struct rt5033_charger *charger)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int state, reg_data, data;
+
+ regmap_read(regmap, RT5033_REG_CHG_CTRL5, &reg_data);
+
+ state = (reg_data & RT5033_CHGCTRL5_ICHG_MASK)
+ >> RT5033_CHGCTRL5_ICHG_SHIFT;
+
+ data = RT5033_CHARGER_FAST_CURRENT_MIN +
+ RT5033_CHARGER_FAST_CURRENT_STEP_NUM * state;
+
+ return data;
+}
+
+static int rt5033_get_charger_const_voltage(struct rt5033_charger *charger)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int state, reg_data, data;
+
+ regmap_read(regmap, RT5033_REG_CHG_CTRL2, &reg_data);
+
+ state = (reg_data & RT5033_CHGCTRL2_CV_MASK)
+ >> RT5033_CHGCTRL2_CV_SHIFT;
+
+ data = RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN +
+ RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM * state;
+
+ return data;
+}
+
+static inline int rt5033_init_const_charge(struct rt5033_charger *charger)
+{
+ struct rt5033_charger_data *chg = charger->chg;
+ int ret;
+ unsigned int val;
+ u8 reg_data;
+
+ /* Set constant voltage mode */
+ if (chg->const_uvolt < RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN ||
+ chg->const_uvolt > RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX) {
+ dev_err(charger->dev,
+ "Value 'constant-charge-voltage-max-microvolt' out of range\n");
+ return -EINVAL;
+ }
+
+ if (chg->const_uvolt == RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN)
+ reg_data = 0x00;
+ else if (chg->const_uvolt == RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX)
+ reg_data = RT5033_CV_MAX_VOLTAGE;
+ else {
+ val = chg->const_uvolt;
+ val -= RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN;
+ val /= RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM;
+ reg_data = val;
+ }
+
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL2,
+ RT5033_CHGCTRL2_CV_MASK,
+ reg_data << RT5033_CHGCTRL2_CV_SHIFT);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ /* Set end of charge current */
+ if (chg->eoc_uamp < RT5033_CHARGER_EOC_MIN ||
+ chg->eoc_uamp > RT5033_CHARGER_EOC_MAX) {
+ dev_err(charger->dev,
+ "Value 'charge-term-current-microamp' out of range\n");
+ return -EINVAL;
+ }
+
+ if (chg->eoc_uamp == RT5033_CHARGER_EOC_MIN)
+ reg_data = 0x01;
+ else if (chg->eoc_uamp == RT5033_CHARGER_EOC_MAX)
+ reg_data = 0x07;
+ else {
+ val = chg->eoc_uamp;
+ if (val < RT5033_CHARGER_EOC_REF) {
+ val -= RT5033_CHARGER_EOC_MIN;
+ val /= RT5033_CHARGER_EOC_STEP_NUM1;
+ reg_data = 0x01 + val;
+ } else if (val > RT5033_CHARGER_EOC_REF) {
+ val -= RT5033_CHARGER_EOC_REF;
+ val /= RT5033_CHARGER_EOC_STEP_NUM2;
+ reg_data = 0x04 + val;
+ } else {
+ reg_data = 0x04;
+ }
+ }
+
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL4,
+ RT5033_CHGCTRL4_EOC_MASK, reg_data);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int rt5033_init_fast_charge(struct rt5033_charger *charger)
+{
+ struct rt5033_charger_data *chg = charger->chg;
+ int ret;
+ unsigned int val;
+ u8 reg_data;
+
+ /* Set limit input current */
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL1,
+ RT5033_CHGCTRL1_IAICR_MASK, RT5033_AICR_2000_MODE);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ /* Set fast-charge mode charging current */
+ if (chg->fast_uamp < RT5033_CHARGER_FAST_CURRENT_MIN ||
+ chg->fast_uamp > RT5033_CHARGER_FAST_CURRENT_MAX) {
+ dev_err(charger->dev,
+ "Value 'constant-charge-current-max-microamp' out of range\n");
+ return -EINVAL;
+ }
+
+ if (chg->fast_uamp == RT5033_CHARGER_FAST_CURRENT_MIN)
+ reg_data = 0x00;
+ else if (chg->fast_uamp == RT5033_CHARGER_FAST_CURRENT_MAX)
+ reg_data = RT5033_CHG_MAX_CURRENT;
+ else {
+ val = chg->fast_uamp;
+ val -= RT5033_CHARGER_FAST_CURRENT_MIN;
+ val /= RT5033_CHARGER_FAST_CURRENT_STEP_NUM;
+ reg_data = val;
+ }
+
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL5,
+ RT5033_CHGCTRL5_ICHG_MASK,
+ reg_data << RT5033_CHGCTRL5_ICHG_SHIFT);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int rt5033_init_pre_charge(struct rt5033_charger *charger)
+{
+ struct rt5033_charger_data *chg = charger->chg;
+ int ret;
+ unsigned int val;
+ u8 reg_data;
+
+ /* Set pre-charge threshold voltage */
+ if (chg->pre_uvolt < RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN ||
+ chg->pre_uvolt > RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX) {
+ dev_err(charger->dev,
+ "Value 'precharge-upper-limit-microvolt' out of range\n");
+ return -EINVAL;
+ }
+
+ if (chg->pre_uvolt == RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN)
+ reg_data = 0x00;
+ else if (chg->pre_uvolt == RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX)
+ reg_data = 0x0f;
+ else {
+ val = chg->pre_uvolt;
+ val -= RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN;
+ val /= RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM;
+ reg_data = val;
+ }
+
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL5,
+ RT5033_CHGCTRL5_VPREC_MASK, reg_data);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ /* Set pre-charge mode charging current */
+ if (chg->pre_uamp < RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN ||
+ chg->pre_uamp > RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX) {
+ dev_err(charger->dev,
+ "Value 'precharge-current-microamp' out of range\n");
+ return -EINVAL;
+ }
+
+ if (chg->pre_uamp == RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN)
+ reg_data = 0x00;
+ else if (chg->pre_uamp == RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX)
+ reg_data = RT5033_CHG_MAX_PRE_CURRENT;
+ else {
+ val = chg->pre_uamp;
+ val -= RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN;
+ val /= RT5033_CHARGER_PRE_CURRENT_STEP_NUM;
+ reg_data = val;
+ }
+
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL4,
+ RT5033_CHGCTRL4_IPREC_MASK,
+ reg_data << RT5033_CHGCTRL4_IPREC_SHIFT);
+ if (ret) {
+ dev_err(charger->dev, "Failed regmap update\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5033_charger_reg_init(struct rt5033_charger *charger)
+{
+ int ret = 0;
+
+ /* Enable charging termination */
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL1,
+ RT5033_CHGCTRL1_TE_EN_MASK, RT5033_TE_ENABLE);
+ if (ret) {
+ dev_err(charger->dev, "Failed to enable charging termination.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Disable minimum input voltage regulation (MIVR), this improves
+ * the charging performance.
+ */
+ ret = regmap_update_bits(charger->regmap, RT5033_REG_CHG_CTRL4,
+ RT5033_CHGCTRL4_MIVR_MASK, RT5033_CHARGER_MIVR_DISABLE);
+ if (ret) {
+ dev_err(charger->dev, "Failed to disable MIVR.\n");
+ return -EINVAL;
+ }
+
+ ret = rt5033_init_pre_charge(charger);
+ if (ret)
+ return ret;
+
+ ret = rt5033_init_fast_charge(charger);
+ if (ret)
+ return ret;
+
+ ret = rt5033_init_const_charge(charger);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static enum power_supply_property rt5033_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int rt5033_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rt5033_charger *charger = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = rt5033_get_charger_state(charger);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = rt5033_get_charger_type(charger);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = rt5033_get_charger_current_limit(charger);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = rt5033_get_charger_const_voltage(charger);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = RT5033_CHARGER_MODEL;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = RT5033_MANUFACTURER;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = (rt5033_get_charger_state(charger) ==
+ POWER_SUPPLY_STATUS_CHARGING);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct rt5033_charger_data *rt5033_charger_dt_init(
+ struct rt5033_charger *charger)
+{
+ struct rt5033_charger_data *chg;
+ struct power_supply_battery_info *info;
+ int ret;
+
+ chg = devm_kzalloc(charger->dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = power_supply_get_battery_info(charger->psy, &info);
+ if (ret)
+ return ERR_PTR(dev_err_probe(charger->dev, -EINVAL,
+ "missing battery info\n"));
+
+ /* Assign data. Validity will be checked in the init functions. */
+ chg->pre_uamp = info->precharge_current_ua;
+ chg->fast_uamp = info->constant_charge_current_max_ua;
+ chg->eoc_uamp = info->charge_term_current_ua;
+ chg->pre_uvolt = info->precharge_voltage_max_uv;
+ chg->const_uvolt = info->constant_charge_voltage_max_uv;
+
+ return chg;
+}
+
+static const struct power_supply_desc rt5033_charger_desc = {
+ .name = "rt5033-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = rt5033_charger_props,
+ .num_properties = ARRAY_SIZE(rt5033_charger_props),
+ .get_property = rt5033_charger_get_property,
+};
+
+static int rt5033_charger_probe(struct platform_device *pdev)
+{
+ struct rt5033_charger *charger;
+ struct power_supply_config psy_cfg = {};
+ int ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, charger);
+ charger->dev = &pdev->dev;
+ charger->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+
+ psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.drv_data = charger;
+
+ charger->psy = devm_power_supply_register(&pdev->dev,
+ &rt5033_charger_desc,
+ &psy_cfg);
+ if (IS_ERR(charger->psy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(charger->psy),
+ "Failed to register power supply\n");
+
+ charger->chg = rt5033_charger_dt_init(charger);
+ if (IS_ERR_OR_NULL(charger->chg))
+ return PTR_ERR(charger->chg);
+
+ ret = rt5033_charger_reg_init(charger);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct platform_device_id rt5033_charger_id[] = {
+ { "rt5033-charger", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, rt5033_charger_id);
+
+static const struct of_device_id rt5033_charger_of_match[] = {
+ { .compatible = "richtek,rt5033-charger", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rt5033_charger_of_match);
+
+static struct platform_driver rt5033_charger_driver = {
+ .driver = {
+ .name = "rt5033-charger",
+ .of_match_table = rt5033_charger_of_match,
+ },
+ .probe = rt5033_charger_probe,
+ .id_table = rt5033_charger_id,
+};
+module_platform_driver(rt5033_charger_driver);
+
+MODULE_DESCRIPTION("Richtek RT5033 charger driver");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index 0149e00f2bf8..ff4dcf77c788 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1737,7 +1737,7 @@ MODULE_DEVICE_TABLE(acpi, rt9455_i2c_acpi_match);
#endif
static struct i2c_driver rt9455_driver = {
- .probe_new = rt9455_probe,
+ .probe = rt9455_probe,
.remove = rt9455_remove,
.id_table = rt9455_i2c_id_table,
.driver = {
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index ea33693b6977..683adb18253d 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -1192,7 +1192,7 @@ static int rt9467_charger_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, data);
/* Default pull charge enable gpio to make 'CHG_EN' by SW control only */
- ceb_gpio = devm_gpiod_get_optional(dev, "charge-enable", GPIOD_OUT_LOW);
+ ceb_gpio = devm_gpiod_get_optional(dev, "charge-enable", GPIOD_OUT_HIGH);
if (IS_ERR(ceb_gpio))
return dev_err_probe(dev, PTR_ERR(ceb_gpio),
"Failed to config charge enable gpio\n");
@@ -1272,7 +1272,7 @@ static struct i2c_driver rt9467_charger_driver = {
.name = "rt9467-charger",
.of_match_table = rt9467_charger_of_match_table,
},
- .probe_new = rt9467_charger_probe,
+ .probe = rt9467_charger_probe,
};
module_i2c_driver(rt9467_charger_driver);
diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
index 1ea40876494b..868b0703d15c 100644
--- a/drivers/power/supply/rt9471.c
+++ b/drivers/power/supply/rt9471.c
@@ -919,7 +919,7 @@ static struct i2c_driver rt9471_driver = {
.name = "rt9471",
.of_match_table = rt9471_of_device_id,
},
- .probe_new = rt9471_probe,
+ .probe = rt9471_probe,
.shutdown = rt9471_shutdown,
};
module_i2c_driver(rt9471_driver);
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index c4a95b01463a..62a354e01204 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -1275,7 +1275,7 @@ static const struct of_device_id sbs_dt_ids[] = {
MODULE_DEVICE_TABLE(of, sbs_dt_ids);
static struct i2c_driver sbs_battery_driver = {
- .probe_new = sbs_probe,
+ .probe = sbs_probe,
.alert = sbs_alert,
.id_table = sbs_id,
.driver = {
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index a14e89ac0369..f4adde449270 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -240,7 +240,7 @@ static const struct i2c_device_id sbs_id[] = {
MODULE_DEVICE_TABLE(i2c, sbs_id);
static struct i2c_driver sbs_driver = {
- .probe_new = sbs_probe,
+ .probe = sbs_probe,
.id_table = sbs_id,
.driver = {
.name = "sbs-charger",
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index bde977391fd4..9e4141cffbf9 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -409,7 +409,7 @@ static struct i2c_driver sbsm_driver = {
.name = "sbsm",
.of_match_table = of_match_ptr(sbsm_dt_ids),
},
- .probe_new = sbsm_probe,
+ .probe = sbsm_probe,
.alert = sbsm_alert,
.id_table = sbsm_ids
};
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index b5f038310282..c8392933ee28 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1629,7 +1629,7 @@ static struct i2c_driver smb347_driver = {
.name = "smb347",
.of_match_table = smb3xx_of_match,
},
- .probe_new = smb347_probe,
+ .probe = smb347_probe,
.remove = smb347_remove,
.shutdown = smb347_shutdown,
.id_table = smb347_id,
diff --git a/drivers/power/supply/twl4030_madc_battery.c b/drivers/power/supply/twl4030_madc_battery.c
index 48649dcfe3a9..7fe029673b22 100644
--- a/drivers/power/supply/twl4030_madc_battery.c
+++ b/drivers/power/supply/twl4030_madc_battery.c
@@ -168,19 +168,13 @@ static int twl4030_madc_bat_get_property(struct power_supply *psy,
return 0;
}
-static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
-{
- power_supply_changed(psy);
-}
-
static const struct power_supply_desc twl4030_madc_bat_desc = {
.name = "twl4030_battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = twl4030_madc_bat_props,
.num_properties = ARRAY_SIZE(twl4030_madc_bat_props),
.get_property = twl4030_madc_bat_get_property,
- .external_power_changed = twl4030_madc_bat_ext_changed,
-
+ .external_power_changed = power_supply_changed,
};
static int twl4030_cmp(const void *a, const void *b)
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 836d44c9fb74..954feba6600b 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -680,7 +680,7 @@ static struct i2c_driver ucs1002_driver = {
.name = "ucs1002",
.of_match_table = ucs1002_of_match,
},
- .probe_new = ucs1002_probe,
+ .probe = ucs1002_probe,
};
module_i2c_driver(ucs1002_driver);
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index fbc966842509..ccc5c4d2e230 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -476,7 +476,7 @@ static struct i2c_driver ug3105_i2c_driver = {
.name = "ug3105",
.pm = &ug3105_pm_ops,
},
- .probe_new = ug3105_probe,
+ .probe = ug3105_probe,
.id_table = ug3105_id,
};
module_i2c_driver(ug3105_i2c_driver);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 77219cdcd683..6e4d5456a885 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -358,6 +358,9 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj,
attr == &dev_attr_max_vclocks.attr) {
if (ptp->is_virtual_clock)
mode = 0;
+ } else if (attr == &dev_attr_max_phase_adjustment.attr) {
+ if (!info->adjphase || !info->getmaxphase)
+ mode = 0;
}
return mode;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 8df861b1f4a3..6210babb0741 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -405,6 +405,16 @@ config PWM_MEDIATEK
To compile this driver as a module, choose M here: the module
will be called pwm-mediatek.
+config PWM_MICROCHIP_CORE
+ tristate "Microchip corePWM PWM support"
+ depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ PWM driver for Microchip FPGA soft IP core.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-microchip-core.
+
config PWM_MXS
tristate "Freescale MXS PWM support"
depends on ARCH_MXS || COMPILE_TEST
@@ -493,6 +503,17 @@ config PWM_ROCKCHIP
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
+config PWM_RZ_MTU3
+ tristate "Renesas RZ/G2L MTU3a PWM Timer support"
+ depends on RZ_MTU3 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver exposes the MTU3a PWM Timer controller found in Renesas
+ RZ/G2L like chips through the PWM API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-rz-mtu3.
+
config PWM_SAMSUNG
tristate "Samsung PWM support"
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 19899b912e00..c822389c2a24 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
obj-$(CONFIG_PWM_MESON) += pwm-meson.o
obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o
+obj-$(CONFIG_PWM_MICROCHIP_CORE) += pwm-microchip-core.o
obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_NTXEC) += pwm-ntxec.o
@@ -45,6 +46,7 @@ obj-$(CONFIG_PWM_RASPBERRYPI_POE) += pwm-raspberrypi-poe.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
+obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 507ff0d5f7bd..583a7d69c741 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -190,7 +190,7 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
int err;
if (pdev->id < 1 || pdev->id > 31)
- return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
+ return dev_err_probe(&pdev->dev, -EINVAL, "Invalid device id %d\n", pdev->id);
/*
* Nothing to be done in probe, this is required to get the
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
index f1da99881adf..0ee4d2aee4df 100644
--- a/drivers/pwm/pwm-clk.c
+++ b/drivers/pwm/pwm-clk.c
@@ -89,7 +89,7 @@ static int pwm_clk_probe(struct platform_device *pdev)
if (!pcchip)
return -ENOMEM;
- pcchip->clk = devm_clk_get(&pdev->dev, NULL);
+ pcchip->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(pcchip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk),
"Failed to get clock\n");
@@ -98,15 +98,9 @@ static int pwm_clk_probe(struct platform_device *pdev)
pcchip->chip.ops = &pwm_clk_ops;
pcchip->chip.npwm = 1;
- ret = clk_prepare(pcchip->clk);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "Failed to prepare clock\n");
-
ret = pwmchip_add(&pcchip->chip);
- if (ret < 0) {
- clk_unprepare(pcchip->clk);
+ if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n");
- }
platform_set_drvdata(pdev, pcchip);
return 0;
@@ -120,8 +114,6 @@ static void pwm_clk_remove(struct platform_device *pdev)
if (pcchip->clk_enabled)
clk_disable(pcchip->clk);
-
- clk_unprepare(pcchip->clk);
}
static const struct of_device_id pwm_clk_dt_ids[] = {
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 5e2b452ee5f2..98ab65c89685 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -397,6 +397,13 @@ static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
if (tpm->enable_count > 0)
return -EBUSY;
+ /*
+ * Force 'real_period' to be zero to force period update code
+ * can be executed after system resume back, since suspend causes
+ * the period related registers to become their reset values.
+ */
+ tpm->real_period = 0;
+
clk_disable_unprepare(tpm->clk);
return 0;
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 5b5eeaff35da..7a51d210a877 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -38,6 +38,7 @@ struct pwm_mediatek_of_data {
unsigned int num_pwms;
bool pwm45_fixup;
bool has_ck_26m_sel;
+ const unsigned int *reg_offset;
};
/**
@@ -59,10 +60,14 @@ struct pwm_mediatek_chip {
const struct pwm_mediatek_of_data *soc;
};
-static const unsigned int pwm_mediatek_reg_offset[] = {
+static const unsigned int mtk_pwm_reg_offset_v1[] = {
0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
};
+static const unsigned int mtk_pwm_reg_offset_v2[] = {
+ 0x0080, 0x00c0, 0x0100, 0x0140, 0x0180, 0x01c0, 0x0200, 0x0240
+};
+
static inline struct pwm_mediatek_chip *
to_pwm_mediatek_chip(struct pwm_chip *chip)
{
@@ -111,7 +116,7 @@ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + pwm_mediatek_reg_offset[num] + offset);
+ writel(value, chip->regs + chip->soc->reg_offset[num] + offset);
}
static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -285,60 +290,77 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
.has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt6795_pwm_data = {
.num_pwms = 7,
.pwm45_fixup = false,
.has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
.has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
.has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt7628_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = true,
.has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt7629_pwm_data = {
.num_pwms = 1,
.pwm45_fixup = false,
.has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
-static const struct pwm_mediatek_of_data mt8183_pwm_data = {
- .num_pwms = 4,
+static const struct pwm_mediatek_of_data mt7981_pwm_data = {
+ .num_pwms = 3,
.pwm45_fixup = false,
.has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v2,
};
-static const struct pwm_mediatek_of_data mt8365_pwm_data = {
- .num_pwms = 3,
+static const struct pwm_mediatek_of_data mt7986_pwm_data = {
+ .num_pwms = 2,
.pwm45_fixup = false,
.has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
-static const struct pwm_mediatek_of_data mt7986_pwm_data = {
- .num_pwms = 2,
+static const struct pwm_mediatek_of_data mt8183_pwm_data = {
+ .num_pwms = 4,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v1,
+};
+
+static const struct pwm_mediatek_of_data mt8365_pwm_data = {
+ .num_pwms = 3,
.pwm45_fixup = false,
.has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct pwm_mediatek_of_data mt8516_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = false,
.has_ck_26m_sel = true,
+ .reg_offset = mtk_pwm_reg_offset_v1,
};
static const struct of_device_id pwm_mediatek_of_match[] = {
@@ -348,6 +370,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
+ { .compatible = "mediatek,mt7981-pwm", .data = &mt7981_pwm_data },
{ .compatible = "mediatek,mt7986-pwm", .data = &mt7986_pwm_data },
{ .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
{ .compatible = "mediatek,mt8365-pwm", .data = &mt8365_pwm_data },
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 5732300eb004..22f54db3ae8e 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -49,9 +49,9 @@
#define PWM_HIGH_MASK GENMASK(31, 16)
#define REG_MISC_AB 0x8
-#define MISC_B_CLK_EN BIT(23)
-#define MISC_A_CLK_EN BIT(15)
-#define MISC_CLK_DIV_MASK 0x7f
+#define MISC_B_CLK_EN_SHIFT 23
+#define MISC_A_CLK_EN_SHIFT 15
+#define MISC_CLK_DIV_WIDTH 7
#define MISC_B_CLK_DIV_SHIFT 16
#define MISC_A_CLK_DIV_SHIFT 8
#define MISC_B_CLK_SEL_SHIFT 6
@@ -61,37 +61,39 @@
#define MISC_A_EN BIT(0)
#define MESON_NUM_PWMS 2
+#define MESON_MAX_MUX_PARENTS 4
static struct meson_pwm_channel_data {
u8 reg_offset;
u8 clk_sel_shift;
u8 clk_div_shift;
- u32 clk_en_mask;
+ u8 clk_en_shift;
u32 pwm_en_mask;
} meson_pwm_per_channel_data[MESON_NUM_PWMS] = {
{
.reg_offset = REG_PWM_A,
.clk_sel_shift = MISC_A_CLK_SEL_SHIFT,
.clk_div_shift = MISC_A_CLK_DIV_SHIFT,
- .clk_en_mask = MISC_A_CLK_EN,
+ .clk_en_shift = MISC_A_CLK_EN_SHIFT,
.pwm_en_mask = MISC_A_EN,
},
{
.reg_offset = REG_PWM_B,
.clk_sel_shift = MISC_B_CLK_SEL_SHIFT,
.clk_div_shift = MISC_B_CLK_DIV_SHIFT,
- .clk_en_mask = MISC_B_CLK_EN,
+ .clk_en_shift = MISC_B_CLK_EN_SHIFT,
.pwm_en_mask = MISC_B_EN,
}
};
struct meson_pwm_channel {
+ unsigned long rate;
unsigned int hi;
unsigned int lo;
- u8 pre_div;
- struct clk *clk_parent;
struct clk_mux mux;
+ struct clk_divider div;
+ struct clk_gate gate;
struct clk *clk;
};
@@ -124,16 +126,6 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
struct device *dev = chip->dev;
int err;
- if (channel->clk_parent) {
- err = clk_set_parent(channel->clk, channel->clk_parent);
- if (err < 0) {
- dev_err(dev, "failed to set parent %s for %s: %d\n",
- __clk_get_name(channel->clk_parent),
- __clk_get_name(channel->clk), err);
- return err;
- }
- }
-
err = clk_prepare_enable(channel->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock %s: %d\n",
@@ -156,8 +148,9 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
- unsigned int duty, period, pre_div, cnt, duty_cnt;
+ unsigned int cnt, duty_cnt;
unsigned long fin_freq;
+ u64 duty, period, freq;
duty = state->duty_cycle;
period = state->period;
@@ -171,7 +164,11 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
if (state->polarity == PWM_POLARITY_INVERSED)
duty = period - duty;
- fin_freq = clk_get_rate(channel->clk);
+ freq = div64_u64(NSEC_PER_SEC * 0xffffULL, period);
+ if (freq > ULONG_MAX)
+ freq = ULONG_MAX;
+
+ fin_freq = clk_round_rate(channel->clk, freq);
if (fin_freq == 0) {
dev_err(meson->chip.dev, "invalid source clock frequency\n");
return -EINVAL;
@@ -179,46 +176,31 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
- pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
- if (pre_div > MISC_CLK_DIV_MASK) {
- dev_err(meson->chip.dev, "unable to get period pre_div\n");
- return -EINVAL;
- }
-
- cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
+ cnt = div_u64(fin_freq * period, NSEC_PER_SEC);
if (cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get period cnt\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
- pre_div, cnt);
+ dev_dbg(meson->chip.dev, "period=%llu cnt=%u\n", period, cnt);
if (duty == period) {
- channel->pre_div = pre_div;
channel->hi = cnt;
channel->lo = 0;
} else if (duty == 0) {
- channel->pre_div = pre_div;
channel->hi = 0;
channel->lo = cnt;
} else {
- /* Then check is we can have the duty with the same pre_div */
- duty_cnt = div64_u64(fin_freq * (u64)duty,
- NSEC_PER_SEC * (pre_div + 1));
- if (duty_cnt > 0xffff) {
- dev_err(meson->chip.dev, "unable to get duty cycle\n");
- return -EINVAL;
- }
+ duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC);
- dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
- duty, pre_div, duty_cnt);
+ dev_dbg(meson->chip.dev, "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
- channel->pre_div = pre_div;
channel->hi = duty_cnt;
channel->lo = cnt - duty_cnt;
}
+ channel->rate = fin_freq;
+
return 0;
}
@@ -228,16 +210,15 @@ static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
u32 value;
+ int err;
channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
- spin_lock_irqsave(&meson->lock, flags);
+ err = clk_set_rate(channel->clk, channel->rate);
+ if (err)
+ dev_err(meson->chip.dev, "setting clock rate failed\n");
- value = readl(meson->base + REG_MISC_AB);
- value &= ~(MISC_CLK_DIV_MASK << channel_data->clk_div_shift);
- value |= channel->pre_div << channel_data->clk_div_shift;
- value |= channel_data->clk_en_mask;
- writel(value, meson->base + REG_MISC_AB);
+ spin_lock_irqsave(&meson->lock, flags);
value = FIELD_PREP(PWM_HIGH_MASK, channel->hi) |
FIELD_PREP(PWM_LOW_MASK, channel->lo);
@@ -276,16 +257,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* This IP block revision doesn't have an "always high"
* setting which we can use for "inverted disabled".
- * Instead we achieve this using the same settings
- * that we use a pre_div of 0 (to get the shortest
- * possible duration for one "count") and
- * "period == duty_cycle". This results in a signal
+ * Instead we achieve this by setting mux parent with
+ * highest rate and minimum divider value, resulting
+ * in the shortest possible duration for one "count"
+ * and "period == duty_cycle". This results in a signal
* which is LOW for one "count", while being HIGH for
* the rest of the (so the signal is HIGH for slightly
* less than 100% of the period, but this is the best
* we can achieve).
*/
- channel->pre_div = 0;
+ channel->rate = ULONG_MAX;
channel->hi = ~0;
channel->lo = 0;
@@ -304,13 +285,12 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
-static unsigned int meson_pwm_cnt_to_ns(struct pwm_chip *chip,
- struct pwm_device *pwm, u32 cnt)
+static u64 meson_pwm_cnt_to_ns(struct pwm_chip *chip, struct pwm_device *pwm,
+ u32 cnt)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel;
unsigned long fin_freq;
- u32 fin_ns;
/* to_meson_pwm() can only be used after .get_state() is called */
channel = &meson->channels[pwm->hwpwm];
@@ -319,9 +299,7 @@ static unsigned int meson_pwm_cnt_to_ns(struct pwm_chip *chip,
if (fin_freq == 0)
return 0;
- fin_ns = div_u64(NSEC_PER_SEC, fin_freq);
-
- return cnt * fin_ns * (channel->pre_div + 1);
+ return div64_ul(NSEC_PER_SEC * (u64)cnt, fin_freq);
}
static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -330,7 +308,7 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel_data *channel_data;
struct meson_pwm_channel *channel;
- u32 value, tmp;
+ u32 value;
if (!state)
return 0;
@@ -339,30 +317,14 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
value = readl(meson->base + REG_MISC_AB);
-
- tmp = channel_data->pwm_en_mask | channel_data->clk_en_mask;
- state->enabled = (value & tmp) == tmp;
-
- tmp = value >> channel_data->clk_div_shift;
- channel->pre_div = FIELD_GET(MISC_CLK_DIV_MASK, tmp);
+ state->enabled = value & channel_data->pwm_en_mask;
value = readl(meson->base + channel_data->reg_offset);
-
channel->lo = FIELD_GET(PWM_LOW_MASK, value);
channel->hi = FIELD_GET(PWM_HIGH_MASK, value);
- if (channel->lo == 0) {
- state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
- state->duty_cycle = state->period;
- } else if (channel->lo >= channel->hi) {
- state->period = meson_pwm_cnt_to_ns(chip, pwm,
- channel->lo + channel->hi);
- state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm,
- channel->hi);
- } else {
- state->period = 0;
- state->duty_cycle = 0;
- }
+ state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->lo + channel->hi);
+ state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
state->polarity = PWM_POLARITY_NORMAL;
@@ -378,7 +340,7 @@ static const struct pwm_ops meson_pwm_ops = {
};
static const char * const pwm_meson8b_parent_names[] = {
- "xtal", "vid_pll", "fclk_div4", "fclk_div3"
+ "xtal", NULL, "fclk_div4", "fclk_div3"
};
static const struct meson_pwm_data pwm_meson8b_data = {
@@ -386,15 +348,6 @@ static const struct meson_pwm_data pwm_meson8b_data = {
.num_parents = ARRAY_SIZE(pwm_meson8b_parent_names),
};
-static const char * const pwm_gxbb_parent_names[] = {
- "xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
-};
-
-static const struct meson_pwm_data pwm_gxbb_data = {
- .parent_names = pwm_gxbb_parent_names,
- .num_parents = ARRAY_SIZE(pwm_gxbb_parent_names),
-};
-
/*
* Only the 2 first inputs of the GXBB AO PWMs are valid
* The last 2 are grounded
@@ -444,15 +397,6 @@ static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
.num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
};
-static const char * const pwm_g12a_ee_parent_names[] = {
- "xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
-};
-
-static const struct meson_pwm_data pwm_g12a_ee_data = {
- .parent_names = pwm_g12a_ee_parent_names,
- .num_parents = ARRAY_SIZE(pwm_g12a_ee_parent_names),
-};
-
static const struct of_device_id meson_pwm_matches[] = {
{
.compatible = "amlogic,meson8b-pwm",
@@ -460,7 +404,7 @@ static const struct of_device_id meson_pwm_matches[] = {
},
{
.compatible = "amlogic,meson-gxbb-pwm",
- .data = &pwm_gxbb_data
+ .data = &pwm_meson8b_data
},
{
.compatible = "amlogic,meson-gxbb-ao-pwm",
@@ -476,7 +420,7 @@ static const struct of_device_id meson_pwm_matches[] = {
},
{
.compatible = "amlogic,meson-g12a-ee-pwm",
- .data = &pwm_g12a_ee_data
+ .data = &pwm_meson8b_data
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-ab",
@@ -492,21 +436,28 @@ MODULE_DEVICE_TABLE(of, meson_pwm_matches);
static int meson_pwm_init_channels(struct meson_pwm *meson)
{
+ struct clk_parent_data mux_parent_data[MESON_MAX_MUX_PARENTS] = {};
struct device *dev = meson->chip.dev;
- struct clk_init_data init;
unsigned int i;
char name[255];
int err;
+ for (i = 0; i < meson->data->num_parents; i++) {
+ mux_parent_data[i].index = -1;
+ mux_parent_data[i].name = meson->data->parent_names[i];
+ }
+
for (i = 0; i < meson->chip.npwm; i++) {
struct meson_pwm_channel *channel = &meson->channels[i];
+ struct clk_parent_data div_parent = {}, gate_parent = {};
+ struct clk_init_data init = {};
snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
init.name = name;
init.ops = &clk_mux_ops;
init.flags = 0;
- init.parent_names = meson->data->parent_names;
+ init.parent_data = mux_parent_data;
init.num_parents = meson->data->num_parents;
channel->mux.reg = meson->base + REG_MISC_AB;
@@ -518,18 +469,63 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
channel->mux.table = NULL;
channel->mux.hw.init = &init;
- channel->clk = devm_clk_register(dev, &channel->mux.hw);
- if (IS_ERR(channel->clk)) {
- err = PTR_ERR(channel->clk);
+ err = devm_clk_hw_register(dev, &channel->mux.hw);
+ if (err) {
+ dev_err(dev, "failed to register %s: %d\n", name, err);
+ return err;
+ }
+
+ snprintf(name, sizeof(name), "%s#div%u", dev_name(dev), i);
+
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ div_parent.index = -1;
+ div_parent.hw = &channel->mux.hw;
+ init.parent_data = &div_parent;
+ init.num_parents = 1;
+
+ channel->div.reg = meson->base + REG_MISC_AB;
+ channel->div.shift = meson_pwm_per_channel_data[i].clk_div_shift;
+ channel->div.width = MISC_CLK_DIV_WIDTH;
+ channel->div.hw.init = &init;
+ channel->div.flags = 0;
+ channel->div.lock = &meson->lock;
+
+ err = devm_clk_hw_register(dev, &channel->div.hw);
+ if (err) {
dev_err(dev, "failed to register %s: %d\n", name, err);
return err;
}
- snprintf(name, sizeof(name), "clkin%u", i);
+ snprintf(name, sizeof(name), "%s#gate%u", dev_name(dev), i);
- channel->clk_parent = devm_clk_get_optional(dev, name);
- if (IS_ERR(channel->clk_parent))
- return PTR_ERR(channel->clk_parent);
+ init.name = name;
+ init.ops = &clk_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
+ gate_parent.index = -1;
+ gate_parent.hw = &channel->div.hw;
+ init.parent_data = &gate_parent;
+ init.num_parents = 1;
+
+ channel->gate.reg = meson->base + REG_MISC_AB;
+ channel->gate.bit_idx = meson_pwm_per_channel_data[i].clk_en_shift;
+ channel->gate.hw.init = &init;
+ channel->gate.flags = 0;
+ channel->gate.lock = &meson->lock;
+
+ err = devm_clk_hw_register(dev, &channel->gate.hw);
+ if (err) {
+ dev_err(dev, "failed to register %s: %d\n", name, err);
+ return err;
+ }
+
+ channel->clk = devm_clk_hw_get_clk(dev, &channel->gate.hw, NULL);
+ if (IS_ERR(channel->clk)) {
+ err = PTR_ERR(channel->clk);
+ dev_err(dev, "failed to register %s: %d\n", name, err);
+ return err;
+ }
}
return 0;
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
new file mode 100644
index 000000000000..8750b57684a9
--- /dev/null
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * corePWM driver for Microchip "soft" FPGA IP cores.
+ *
+ * Copyright (c) 2021-2023 Microchip Corporation. All rights reserved.
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ * Documentation:
+ * https://www.microsemi.com/document-portal/doc_download/1245275-corepwm-hb
+ *
+ * Limitations:
+ * - If the IP block is configured without "shadow registers", all register
+ * writes will take effect immediately, causing glitches on the output.
+ * If shadow registers *are* enabled, setting the "SYNC_UPDATE" register
+ * notifies the core that it needs to update the registers defining the
+ * waveform from the contents of the "shadow registers". Otherwise, changes
+ * will take effective immediately, even for those channels.
+ * As setting the period/duty cycle takes 4 register writes, there is a window
+ * in which this races against the start of a new period.
+ * - The IP block has no concept of a duty cycle, only rising/falling edges of
+ * the waveform. Unfortunately, if the rising & falling edges registers have
+ * the same value written to them the IP block will do whichever of a rising
+ * or a falling edge is possible. I.E. a 50% waveform at twice the requested
+ * period. Therefore to get a 0% waveform, the output is set the max high/low
+ * time depending on polarity.
+ * If the duty cycle is 0%, and the requested period is less than the
+ * available period resolution, this will manifest as a ~100% waveform (with
+ * some output glitches) rather than 50%.
+ * - The PWM period is set for the whole IP block not per channel. The driver
+ * will only change the period if no other PWM output is enabled.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define MCHPCOREPWM_PRESCALE_MAX 0xff
+#define MCHPCOREPWM_PERIOD_STEPS_MAX 0xfe
+#define MCHPCOREPWM_PERIOD_MAX 0xff00
+
+#define MCHPCOREPWM_PRESCALE 0x00
+#define MCHPCOREPWM_PERIOD 0x04
+#define MCHPCOREPWM_EN(i) (0x08 + 0x04 * (i)) /* 0x08, 0x0c */
+#define MCHPCOREPWM_POSEDGE(i) (0x10 + 0x08 * (i)) /* 0x10, 0x18, ..., 0x88 */
+#define MCHPCOREPWM_NEGEDGE(i) (0x14 + 0x08 * (i)) /* 0x14, 0x1c, ..., 0x8c */
+#define MCHPCOREPWM_SYNC_UPD 0xe4
+#define MCHPCOREPWM_TIMEOUT_MS 100u
+
+struct mchp_core_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+ struct mutex lock; /* protects the shared period */
+ ktime_t update_timestamp;
+ u32 sync_update_mask;
+ u16 channel_enabled;
+};
+
+static inline struct mchp_core_pwm_chip *to_mchp_core_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct mchp_core_pwm_chip, chip);
+}
+
+static void mchp_core_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+ bool enable, u64 period)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
+ u8 channel_enable, reg_offset, shift;
+
+ /*
+ * There are two adjacent 8 bit control regs, the lower reg controls
+ * 0-7 and the upper reg 8-15. Check if the pwm is in the upper reg
+ * and if so, offset by the bus width.
+ */
+ reg_offset = MCHPCOREPWM_EN(pwm->hwpwm >> 3);
+ shift = pwm->hwpwm & 7;
+
+ channel_enable = readb_relaxed(mchp_core_pwm->base + reg_offset);
+ channel_enable &= ~(1 << shift);
+ channel_enable |= (enable << shift);
+
+ writel_relaxed(channel_enable, mchp_core_pwm->base + reg_offset);
+ mchp_core_pwm->channel_enabled &= ~BIT(pwm->hwpwm);
+ mchp_core_pwm->channel_enabled |= enable << pwm->hwpwm;
+
+ /*
+ * The updated values will not appear on the bus until they have been
+ * applied to the waveform at the beginning of the next period.
+ * This is a NO-OP if the channel does not have shadow registers.
+ */
+ if (mchp_core_pwm->sync_update_mask & (1 << pwm->hwpwm))
+ mchp_core_pwm->update_timestamp = ktime_add_ns(ktime_get(), period);
+}
+
+static void mchp_core_pwm_wait_for_sync_update(struct mchp_core_pwm_chip *mchp_core_pwm,
+ unsigned int channel)
+{
+ /*
+ * If a shadow register is used for this PWM channel, and iff there is
+ * a pending update to the waveform, we must wait for it to be applied
+ * before attempting to read its state. Reading the registers yields
+ * the currently implemented settings & the new ones are only readable
+ * once the current period has ended.
+ */
+
+ if (mchp_core_pwm->sync_update_mask & (1 << channel)) {
+ ktime_t current_time = ktime_get();
+ s64 remaining_ns;
+ u32 delay_us;
+
+ remaining_ns = ktime_to_ns(ktime_sub(mchp_core_pwm->update_timestamp,
+ current_time));
+
+ /*
+ * If the update has gone through, don't bother waiting for
+ * obvious reasons. Otherwise wait around for an appropriate
+ * amount of time for the update to go through.
+ */
+ if (remaining_ns <= 0)
+ return;
+
+ delay_us = DIV_ROUND_UP_ULL(remaining_ns, NSEC_PER_USEC);
+ fsleep(delay_us);
+ }
+}
+
+static u64 mchp_core_pwm_calc_duty(const struct pwm_state *state, u64 clk_rate,
+ u8 prescale, u8 period_steps)
+{
+ u64 duty_steps, tmp;
+
+ /*
+ * Calculate the duty cycle in multiples of the prescaled period:
+ * duty_steps = duty_in_ns / step_in_ns
+ * step_in_ns = (prescale * NSEC_PER_SEC) / clk_rate
+ * The code below is rearranged slightly to only divide once.
+ */
+ tmp = (((u64)prescale) + 1) * NSEC_PER_SEC;
+ duty_steps = mul_u64_u64_div_u64(state->duty_cycle, clk_rate, tmp);
+
+ return duty_steps;
+}
+
+static void mchp_core_pwm_apply_duty(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state, u64 duty_steps,
+ u16 period_steps)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
+ u8 posedge, negedge;
+ u8 first_edge = 0, second_edge = duty_steps;
+
+ /*
+ * Setting posedge == negedge doesn't yield a constant output,
+ * so that's an unsuitable setting to model duty_steps = 0.
+ * In that case set the unwanted edge to a value that never
+ * triggers.
+ */
+ if (duty_steps == 0)
+ first_edge = period_steps + 1;
+
+ if (state->polarity == PWM_POLARITY_INVERSED) {
+ negedge = first_edge;
+ posedge = second_edge;
+ } else {
+ posedge = first_edge;
+ negedge = second_edge;
+ }
+
+ /*
+ * Set the sync bit which ensures that periods that already started are
+ * completed unaltered. At each counter reset event the values are
+ * updated from the shadow registers.
+ */
+ writel_relaxed(posedge, mchp_core_pwm->base + MCHPCOREPWM_POSEDGE(pwm->hwpwm));
+ writel_relaxed(negedge, mchp_core_pwm->base + MCHPCOREPWM_NEGEDGE(pwm->hwpwm));
+}
+
+static int mchp_core_pwm_calc_period(const struct pwm_state *state, unsigned long clk_rate,
+ u16 *prescale, u16 *period_steps)
+{
+ u64 tmp;
+
+ /*
+ * Calculate the period cycles and prescale values.
+ * The registers are each 8 bits wide & multiplied to compute the period
+ * using the formula:
+ * (prescale + 1) * (period_steps + 1)
+ * period = -------------------------------------
+ * clk_rate
+ * so the maximum period that can be generated is 0x10000 times the
+ * period of the input clock.
+ * However, due to the design of the "hardware", it is not possible to
+ * attain a 100% duty cycle if the full range of period_steps is used.
+ * Therefore period_steps is restricted to 0xfe and the maximum multiple
+ * of the clock period attainable is (0xff + 1) * (0xfe + 1) = 0xff00
+ *
+ * The prescale and period_steps registers operate similarly to
+ * CLK_DIVIDER_ONE_BASED, where the value used by the hardware is that
+ * in the register plus one.
+ * It's therefore not possible to set a period lower than 1/clk_rate, so
+ * if tmp is 0, abort. Without aborting, we will set a period that is
+ * greater than that requested and, more importantly, will trigger the
+ * neg-/pos-edge issue described in the limitations.
+ */
+ tmp = mul_u64_u64_div_u64(state->period, clk_rate, NSEC_PER_SEC);
+ if (tmp >= MCHPCOREPWM_PERIOD_MAX) {
+ *prescale = MCHPCOREPWM_PRESCALE_MAX;
+ *period_steps = MCHPCOREPWM_PERIOD_STEPS_MAX;
+
+ return 0;
+ }
+
+ /*
+ * There are multiple strategies that could be used to choose the
+ * prescale & period_steps values.
+ * Here the idea is to pick values so that the selection of duty cycles
+ * is as finegrain as possible, while also keeping the period less than
+ * that requested.
+ *
+ * A simple way to satisfy the first condition is to always set
+ * period_steps to its maximum value. This neatly also satisfies the
+ * second condition too, since using the maximum value of period_steps
+ * to calculate prescale actually calculates its upper bound.
+ * Integer division will ensure a round down, so the period will thereby
+ * always be less than that requested.
+ *
+ * The downside of this approach is a significant degree of inaccuracy,
+ * especially as tmp approaches integer multiples of
+ * MCHPCOREPWM_PERIOD_STEPS_MAX.
+ *
+ * As we must produce a period less than that requested, and for the
+ * sake of creating a simple algorithm, disallow small values of tmp
+ * that would need special handling.
+ */
+ if (tmp < MCHPCOREPWM_PERIOD_STEPS_MAX + 1)
+ return -EINVAL;
+
+ /*
+ * This "optimal" value for prescale is be calculated using the maximum
+ * permitted value of period_steps, 0xfe.
+ *
+ * period * clk_rate
+ * prescale = ------------------------- - 1
+ * NSEC_PER_SEC * (0xfe + 1)
+ *
+ *
+ * period * clk_rate
+ * ------------------- was precomputed as `tmp`
+ * NSEC_PER_SEC
+ */
+ *prescale = ((u16)tmp) / (MCHPCOREPWM_PERIOD_STEPS_MAX + 1) - 1;
+
+ /*
+ * period_steps can be computed from prescale:
+ * period * clk_rate
+ * period_steps = ----------------------------- - 1
+ * NSEC_PER_SEC * (prescale + 1)
+ *
+ * However, in this approximation, we simply use the maximum value that
+ * was used to compute prescale.
+ */
+ *period_steps = MCHPCOREPWM_PERIOD_STEPS_MAX;
+
+ return 0;
+}
+
+static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
+ bool period_locked;
+ unsigned long clk_rate;
+ u64 duty_steps;
+ u16 prescale, period_steps;
+ int ret;
+
+ if (!state->enabled) {
+ mchp_core_pwm_enable(chip, pwm, false, pwm->state.period);
+ return 0;
+ }
+
+ /*
+ * If clk_rate is too big, the following multiplication might overflow.
+ * However this is implausible, as the fabric of current FPGAs cannot
+ * provide clocks at a rate high enough.
+ */
+ clk_rate = clk_get_rate(mchp_core_pwm->clk);
+ if (clk_rate >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ ret = mchp_core_pwm_calc_period(state, clk_rate, &prescale, &period_steps);
+ if (ret)
+ return ret;
+
+ /*
+ * If the only thing that has changed is the duty cycle or the polarity,
+ * we can shortcut the calculations and just compute/apply the new duty
+ * cycle pos & neg edges
+ * As all the channels share the same period, do not allow it to be
+ * changed if any other channels are enabled.
+ * If the period is locked, it may not be possible to use a period
+ * less than that requested. In that case, we just abort.
+ */
+ period_locked = mchp_core_pwm->channel_enabled & ~(1 << pwm->hwpwm);
+
+ if (period_locked) {
+ u16 hw_prescale;
+ u16 hw_period_steps;
+
+ hw_prescale = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
+ hw_period_steps = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
+
+ if ((period_steps + 1) * (prescale + 1) <
+ (hw_period_steps + 1) * (hw_prescale + 1))
+ return -EINVAL;
+
+ /*
+ * It is possible that something could have set the period_steps
+ * register to 0xff, which would prevent us from setting a 100%
+ * or 0% relative duty cycle, as explained above in
+ * mchp_core_pwm_calc_period().
+ * The period is locked and we cannot change this, so we abort.
+ */
+ if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
+ return -EINVAL;
+
+ prescale = hw_prescale;
+ period_steps = hw_period_steps;
+ }
+
+ duty_steps = mchp_core_pwm_calc_duty(state, clk_rate, prescale, period_steps);
+
+ /*
+ * Because the period is not per channel, it is possible that the
+ * requested duty cycle is longer than the period, in which case cap it
+ * to the period, IOW a 100% duty cycle.
+ */
+ if (duty_steps > period_steps)
+ duty_steps = period_steps + 1;
+
+ if (!period_locked) {
+ writel_relaxed(prescale, mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
+ writel_relaxed(period_steps, mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
+ }
+
+ mchp_core_pwm_apply_duty(chip, pwm, state, duty_steps, period_steps);
+
+ mchp_core_pwm_enable(chip, pwm, true, pwm->state.period);
+
+ return 0;
+}
+
+static int mchp_core_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
+ int ret;
+
+ mutex_lock(&mchp_core_pwm->lock);
+
+ mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm);
+
+ ret = mchp_core_pwm_apply_locked(chip, pwm, state);
+
+ mutex_unlock(&mchp_core_pwm->lock);
+
+ return ret;
+}
+
+static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
+ u64 rate;
+ u16 prescale, period_steps;
+ u8 duty_steps, posedge, negedge;
+
+ mutex_lock(&mchp_core_pwm->lock);
+
+ mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm);
+
+ if (mchp_core_pwm->channel_enabled & (1 << pwm->hwpwm))
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ rate = clk_get_rate(mchp_core_pwm->clk);
+
+ /*
+ * Calculating the period:
+ * The registers are each 8 bits wide & multiplied to compute the period
+ * using the formula:
+ * (prescale + 1) * (period_steps + 1)
+ * period = -------------------------------------
+ * clk_rate
+ *
+ * Note:
+ * The prescale and period_steps registers operate similarly to
+ * CLK_DIVIDER_ONE_BASED, where the value used by the hardware is that
+ * in the register plus one.
+ */
+ prescale = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
+ period_steps = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
+
+ state->period = (period_steps + 1) * (prescale + 1);
+ state->period *= NSEC_PER_SEC;
+ state->period = DIV64_U64_ROUND_UP(state->period, rate);
+
+ posedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_POSEDGE(pwm->hwpwm));
+ negedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_NEGEDGE(pwm->hwpwm));
+
+ mutex_unlock(&mchp_core_pwm->lock);
+
+ if (negedge == posedge) {
+ state->duty_cycle = state->period;
+ state->period *= 2;
+ } else {
+ duty_steps = abs((s16)posedge - (s16)negedge);
+ state->duty_cycle = duty_steps * (prescale + 1) * NSEC_PER_SEC;
+ state->duty_cycle = DIV64_U64_ROUND_UP(state->duty_cycle, rate);
+ }
+
+ state->polarity = negedge < posedge ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+
+ return 0;
+}
+
+static const struct pwm_ops mchp_core_pwm_ops = {
+ .apply = mchp_core_pwm_apply,
+ .get_state = mchp_core_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id mchp_core_of_match[] = {
+ {
+ .compatible = "microchip,corepwm-rtl-v4",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_core_of_match);
+
+static int mchp_core_pwm_probe(struct platform_device *pdev)
+{
+ struct mchp_core_pwm_chip *mchp_core_pwm;
+ struct resource *regs;
+ int ret;
+
+ mchp_core_pwm = devm_kzalloc(&pdev->dev, sizeof(*mchp_core_pwm), GFP_KERNEL);
+ if (!mchp_core_pwm)
+ return -ENOMEM;
+
+ mchp_core_pwm->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mchp_core_pwm->base))
+ return PTR_ERR(mchp_core_pwm->base);
+
+ mchp_core_pwm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(mchp_core_pwm->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mchp_core_pwm->clk),
+ "failed to get PWM clock\n");
+
+ if (of_property_read_u32(pdev->dev.of_node, "microchip,sync-update-mask",
+ &mchp_core_pwm->sync_update_mask))
+ mchp_core_pwm->sync_update_mask = 0;
+
+ mutex_init(&mchp_core_pwm->lock);
+
+ mchp_core_pwm->chip.dev = &pdev->dev;
+ mchp_core_pwm->chip.ops = &mchp_core_pwm_ops;
+ mchp_core_pwm->chip.npwm = 16;
+
+ mchp_core_pwm->channel_enabled = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(0));
+ mchp_core_pwm->channel_enabled |=
+ readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(1)) << 8;
+
+ /*
+ * Enable synchronous update mode for all channels for which shadow
+ * registers have been synthesised.
+ */
+ writel_relaxed(1U, mchp_core_pwm->base + MCHPCOREPWM_SYNC_UPD);
+ mchp_core_pwm->update_timestamp = ktime_get();
+
+ ret = devm_pwmchip_add(&pdev->dev, &mchp_core_pwm->chip);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add pwmchip\n");
+
+ return 0;
+}
+
+static struct platform_driver mchp_core_pwm_driver = {
+ .driver = {
+ .name = "mchp-core-pwm",
+ .of_match_table = mchp_core_of_match,
+ },
+ .probe = mchp_core_pwm_probe,
+};
+module_platform_driver(mchp_core_pwm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("corePWM driver for Microchip FPGAs");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 79e321e96f56..2401b6733241 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -79,14 +79,11 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
- if (!state->enabled) {
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- 0x0);
-
- if (mdp->enabled) {
- clk_disable_unprepare(mdp->clk_mm);
- clk_disable_unprepare(mdp->clk_main);
- }
+ if (!state->enabled && mdp->enabled) {
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN,
+ mdp->data->enable_mask, 0x0);
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
mdp->enabled = false;
return 0;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 3ed5a48ca581..3038a68412a7 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -665,7 +665,7 @@ static struct i2c_driver pca9685_i2c_driver = {
.of_match_table = of_match_ptr(pca9685_dt_ids),
.pm = &pca9685_pwm_pm,
},
- .probe_new = pca9685_pwm_probe,
+ .probe = pca9685_pwm_probe,
.remove = pca9685_pwm_remove,
.id_table = pca9685_id,
};
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
new file mode 100644
index 000000000000..bed8bd671e37
--- /dev/null
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L MTU3a PWM Timer driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Hardware manual for this IP can be found here
+ * https://www.renesas.com/eu/en/document/mah/rzg2l-group-rzg2lc-group-users-manual-hardware-0?language=en
+ *
+ * Limitations:
+ * - When PWM is disabled, the output is driven to Hi-Z.
+ * - While the hardware supports both polarities, the driver (for now)
+ * only handles normal polarity.
+ * - HW uses one counter and two match components to configure duty_cycle
+ * and period.
+ * - Multi-Function Timer Pulse Unit (a.k.a MTU) has 7 HW channels for PWM
+ * operations. (The channels are MTU{0..4, 6, 7}.)
+ * - MTU{1, 2} channels have a single IO, whereas all other HW channels have
+ * 2 IOs.
+ * - Each IO is modelled as an independent PWM channel.
+ * - rz_mtu3_channel_io_map table is used to map the PWM channel to the
+ * corresponding HW channel as there are difference in number of IOs
+ * between HW channels.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/limits.h>
+#include <linux/mfd/rz-mtu3.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+#include <linux/time.h>
+
+#define RZ_MTU3_MAX_PWM_CHANNELS 12
+#define RZ_MTU3_MAX_HW_CHANNELS 7
+
+/**
+ * struct rz_mtu3_channel_io_map - MTU3 pwm channel map
+ *
+ * @base_pwm_number: First PWM of a channel
+ * @num: number of IOs on the HW channel.
+ */
+struct rz_mtu3_channel_io_map {
+ u8 base_pwm_number;
+ u8 num_channel_ios;
+};
+
+/**
+ * struct rz_mtu3_pwm_channel - MTU3 pwm channel data
+ *
+ * @mtu: MTU3 channel data
+ * @map: MTU3 pwm channel map
+ */
+struct rz_mtu3_pwm_channel {
+ struct rz_mtu3_channel *mtu;
+ const struct rz_mtu3_channel_io_map *map;
+};
+
+/**
+ * struct rz_mtu3_pwm_chip - MTU3 pwm private data
+ *
+ * @chip: MTU3 pwm chip data
+ * @clk: MTU3 module clock
+ * @lock: Lock to prevent concurrent access for usage count
+ * @rate: MTU3 clock rate
+ * @user_count: MTU3 usage count
+ * @enable_count: MTU3 enable count
+ * @prescale: MTU3 prescale
+ * @channel_data: MTU3 pwm channel data
+ */
+
+struct rz_mtu3_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ struct mutex lock;
+ unsigned long rate;
+ u32 user_count[RZ_MTU3_MAX_HW_CHANNELS];
+ u32 enable_count[RZ_MTU3_MAX_HW_CHANNELS];
+ u8 prescale[RZ_MTU3_MAX_HW_CHANNELS];
+ struct rz_mtu3_pwm_channel channel_data[RZ_MTU3_MAX_HW_CHANNELS];
+};
+
+/*
+ * The MTU channels are {0..4, 6, 7} and the number of IO on MTU1
+ * and MTU2 channel is 1 compared to 2 on others.
+ */
+static const struct rz_mtu3_channel_io_map channel_map[] = {
+ { 0, 2 }, { 2, 1 }, { 3, 1 }, { 4, 2 }, { 6, 2 }, { 8, 2 }, { 10, 2 }
+};
+
+static inline struct rz_mtu3_pwm_chip *to_rz_mtu3_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct rz_mtu3_pwm_chip, chip);
+}
+
+static void rz_mtu3_pwm_read_tgr_registers(struct rz_mtu3_pwm_channel *priv,
+ u16 reg_pv_offset, u16 *pv_val,
+ u16 reg_dc_offset, u16 *dc_val)
+{
+ *pv_val = rz_mtu3_16bit_ch_read(priv->mtu, reg_pv_offset);
+ *dc_val = rz_mtu3_16bit_ch_read(priv->mtu, reg_dc_offset);
+}
+
+static void rz_mtu3_pwm_write_tgr_registers(struct rz_mtu3_pwm_channel *priv,
+ u16 reg_pv_offset, u16 pv_val,
+ u16 reg_dc_offset, u16 dc_val)
+{
+ rz_mtu3_16bit_ch_write(priv->mtu, reg_pv_offset, pv_val);
+ rz_mtu3_16bit_ch_write(priv->mtu, reg_dc_offset, dc_val);
+}
+
+static u8 rz_mtu3_pwm_calculate_prescale(struct rz_mtu3_pwm_chip *rz_mtu3,
+ u64 period_cycles)
+{
+ u32 prescaled_period_cycles;
+ u8 prescale;
+
+ /*
+ * Supported prescale values are 1, 4, 16 and 64.
+ * TODO: Support prescale values 2, 8, 32, 256 and 1024.
+ */
+ prescaled_period_cycles = period_cycles >> 16;
+ if (prescaled_period_cycles >= 16)
+ prescale = 3;
+ else
+ prescale = (fls(prescaled_period_cycles) + 1) / 2;
+
+ return prescale;
+}
+
+static struct rz_mtu3_pwm_channel *
+rz_mtu3_get_channel(struct rz_mtu3_pwm_chip *rz_mtu3_pwm, u32 hwpwm)
+{
+ struct rz_mtu3_pwm_channel *priv = rz_mtu3_pwm->channel_data;
+ unsigned int ch;
+
+ for (ch = 0; ch < RZ_MTU3_MAX_HW_CHANNELS; ch++, priv++) {
+ if (priv->map->base_pwm_number + priv->map->num_channel_ios > hwpwm)
+ break;
+ }
+
+ return priv;
+}
+
+static bool rz_mtu3_pwm_is_ch_enabled(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
+ u32 hwpwm)
+{
+ struct rz_mtu3_pwm_channel *priv;
+ bool is_channel_en;
+ u8 val;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, hwpwm);
+ is_channel_en = rz_mtu3_is_enabled(priv->mtu);
+ if (!is_channel_en)
+ return false;
+
+ if (priv->map->base_pwm_number == hwpwm)
+ val = rz_mtu3_8bit_ch_read(priv->mtu, RZ_MTU3_TIORH);
+ else
+ val = rz_mtu3_8bit_ch_read(priv->mtu, RZ_MTU3_TIORL);
+
+ return val & RZ_MTU3_TIOR_IOA;
+}
+
+static int rz_mtu3_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
+ struct rz_mtu3_pwm_channel *priv;
+ bool is_mtu3_channel_available;
+ u32 ch;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ ch = priv - rz_mtu3_pwm->channel_data;
+
+ mutex_lock(&rz_mtu3_pwm->lock);
+ /*
+ * Each channel must be requested only once, so if the channel
+ * serves two PWMs and the other is already requested, skip over
+ * rz_mtu3_request_channel()
+ */
+ if (!rz_mtu3_pwm->user_count[ch]) {
+ is_mtu3_channel_available = rz_mtu3_request_channel(priv->mtu);
+ if (!is_mtu3_channel_available) {
+ mutex_unlock(&rz_mtu3_pwm->lock);
+ return -EBUSY;
+ }
+ }
+
+ rz_mtu3_pwm->user_count[ch]++;
+ mutex_unlock(&rz_mtu3_pwm->lock);
+
+ return 0;
+}
+
+static void rz_mtu3_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
+ struct rz_mtu3_pwm_channel *priv;
+ u32 ch;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ ch = priv - rz_mtu3_pwm->channel_data;
+
+ mutex_lock(&rz_mtu3_pwm->lock);
+ rz_mtu3_pwm->user_count[ch]--;
+ if (!rz_mtu3_pwm->user_count[ch])
+ rz_mtu3_release_channel(priv->mtu);
+
+ mutex_unlock(&rz_mtu3_pwm->lock);
+}
+
+static int rz_mtu3_pwm_enable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
+ struct pwm_device *pwm)
+{
+ struct rz_mtu3_pwm_channel *priv;
+ u32 ch;
+ u8 val;
+ int rc;
+
+ rc = pm_runtime_resume_and_get(rz_mtu3_pwm->chip.dev);
+ if (rc)
+ return rc;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ ch = priv - rz_mtu3_pwm->channel_data;
+ val = RZ_MTU3_TIOR_OC_IOB_TOGGLE | RZ_MTU3_TIOR_OC_IOA_H_COMP_MATCH;
+
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TMDR1, RZ_MTU3_TMDR1_MD_PWMMODE1);
+ if (priv->map->base_pwm_number == pwm->hwpwm)
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TIORH, val);
+ else
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TIORL, val);
+
+ mutex_lock(&rz_mtu3_pwm->lock);
+ if (!rz_mtu3_pwm->enable_count[ch])
+ rz_mtu3_enable(priv->mtu);
+
+ rz_mtu3_pwm->enable_count[ch]++;
+ mutex_unlock(&rz_mtu3_pwm->lock);
+
+ return 0;
+}
+
+static void rz_mtu3_pwm_disable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
+ struct pwm_device *pwm)
+{
+ struct rz_mtu3_pwm_channel *priv;
+ u32 ch;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ ch = priv - rz_mtu3_pwm->channel_data;
+
+ /* Disable output pins of MTU3 channel */
+ if (priv->map->base_pwm_number == pwm->hwpwm)
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TIORH, RZ_MTU3_TIOR_OC_RETAIN);
+ else
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TIORL, RZ_MTU3_TIOR_OC_RETAIN);
+
+ mutex_lock(&rz_mtu3_pwm->lock);
+ rz_mtu3_pwm->enable_count[ch]--;
+ if (!rz_mtu3_pwm->enable_count[ch])
+ rz_mtu3_disable(priv->mtu);
+
+ mutex_unlock(&rz_mtu3_pwm->lock);
+
+ pm_runtime_put_sync(rz_mtu3_pwm->chip.dev);
+}
+
+static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
+ int rc;
+
+ rc = pm_runtime_resume_and_get(chip->dev);
+ if (rc)
+ return rc;
+
+ state->enabled = rz_mtu3_pwm_is_ch_enabled(rz_mtu3_pwm, pwm->hwpwm);
+ if (state->enabled) {
+ struct rz_mtu3_pwm_channel *priv;
+ u8 prescale, val;
+ u16 dc, pv;
+ u64 tmp;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ if (priv->map->base_pwm_number == pwm->hwpwm)
+ rz_mtu3_pwm_read_tgr_registers(priv, RZ_MTU3_TGRA, &pv,
+ RZ_MTU3_TGRB, &dc);
+ else
+ rz_mtu3_pwm_read_tgr_registers(priv, RZ_MTU3_TGRC, &pv,
+ RZ_MTU3_TGRD, &dc);
+
+ val = rz_mtu3_8bit_ch_read(priv->mtu, RZ_MTU3_TCR);
+ prescale = FIELD_GET(RZ_MTU3_TCR_TPCS, val);
+
+ /* With prescale <= 7 and pv <= 0xffff this doesn't overflow. */
+ tmp = NSEC_PER_SEC * (u64)pv << (2 * prescale);
+ state->period = DIV_ROUND_UP_ULL(tmp, rz_mtu3_pwm->rate);
+ tmp = NSEC_PER_SEC * (u64)dc << (2 * prescale);
+ state->duty_cycle = DIV_ROUND_UP_ULL(tmp, rz_mtu3_pwm->rate);
+
+ if (state->duty_cycle > state->period)
+ state->duty_cycle = state->period;
+ }
+
+ state->polarity = PWM_POLARITY_NORMAL;
+ pm_runtime_put(chip->dev);
+
+ return 0;
+}
+
+static u16 rz_mtu3_pwm_calculate_pv_or_dc(u64 period_or_duty_cycle, u8 prescale)
+{
+ return min(period_or_duty_cycle >> (2 * prescale), (u64)U16_MAX);
+}
+
+static int rz_mtu3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
+ struct rz_mtu3_pwm_channel *priv;
+ u64 period_cycles;
+ u64 duty_cycles;
+ u8 prescale;
+ u16 pv, dc;
+ u8 val;
+ u32 ch;
+
+ priv = rz_mtu3_get_channel(rz_mtu3_pwm, pwm->hwpwm);
+ ch = priv - rz_mtu3_pwm->channel_data;
+
+ period_cycles = mul_u64_u32_div(state->period, rz_mtu3_pwm->rate,
+ NSEC_PER_SEC);
+ prescale = rz_mtu3_pwm_calculate_prescale(rz_mtu3_pwm, period_cycles);
+
+ /*
+ * Prescalar is shared by multiple channels, so prescale can
+ * NOT be modified when there are multiple channels in use with
+ * different settings. Modify prescalar if other PWM is off or handle
+ * it, if current prescale value is less than the one we want to set.
+ */
+ if (rz_mtu3_pwm->enable_count[ch] > 1) {
+ if (rz_mtu3_pwm->prescale[ch] > prescale)
+ return -EBUSY;
+
+ prescale = rz_mtu3_pwm->prescale[ch];
+ }
+
+ pv = rz_mtu3_pwm_calculate_pv_or_dc(period_cycles, prescale);
+
+ duty_cycles = mul_u64_u32_div(state->duty_cycle, rz_mtu3_pwm->rate,
+ NSEC_PER_SEC);
+ dc = rz_mtu3_pwm_calculate_pv_or_dc(duty_cycles, prescale);
+
+ /*
+ * If the PWM channel is disabled, make sure to turn on the clock
+ * before writing the register.
+ */
+ if (!pwm->state.enabled) {
+ int rc;
+
+ rc = pm_runtime_resume_and_get(chip->dev);
+ if (rc)
+ return rc;
+ }
+
+ val = RZ_MTU3_TCR_CKEG_RISING | prescale;
+
+ /* Counter must be stopped while updating TCR register */
+ if (rz_mtu3_pwm->prescale[ch] != prescale && rz_mtu3_pwm->enable_count[ch])
+ rz_mtu3_disable(priv->mtu);
+
+ if (priv->map->base_pwm_number == pwm->hwpwm) {
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TCR,
+ RZ_MTU3_TCR_CCLR_TGRA | val);
+ rz_mtu3_pwm_write_tgr_registers(priv, RZ_MTU3_TGRA, pv,
+ RZ_MTU3_TGRB, dc);
+ } else {
+ rz_mtu3_8bit_ch_write(priv->mtu, RZ_MTU3_TCR,
+ RZ_MTU3_TCR_CCLR_TGRC | val);
+ rz_mtu3_pwm_write_tgr_registers(priv, RZ_MTU3_TGRC, pv,
+ RZ_MTU3_TGRD, dc);
+ }
+
+ if (rz_mtu3_pwm->prescale[ch] != prescale) {
+ /*
+ * Prescalar is shared by multiple channels, we cache the
+ * prescalar value from first enabled channel and use the same
+ * value for both channels.
+ */
+ rz_mtu3_pwm->prescale[ch] = prescale;
+
+ if (rz_mtu3_pwm->enable_count[ch])
+ rz_mtu3_enable(priv->mtu);
+ }
+
+ /* If the PWM is not enabled, turn the clock off again to save power. */
+ if (!pwm->state.enabled)
+ pm_runtime_put(chip->dev);
+
+ return 0;
+}
+
+static int rz_mtu3_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
+ bool enabled = pwm->state.enabled;
+ int ret;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (enabled)
+ rz_mtu3_pwm_disable(rz_mtu3_pwm, pwm);
+
+ return 0;
+ }
+
+ mutex_lock(&rz_mtu3_pwm->lock);
+ ret = rz_mtu3_pwm_config(chip, pwm, state);
+ mutex_unlock(&rz_mtu3_pwm->lock);
+ if (ret)
+ return ret;
+
+ if (!enabled)
+ ret = rz_mtu3_pwm_enable(rz_mtu3_pwm, pwm);
+
+ return ret;
+}
+
+static const struct pwm_ops rz_mtu3_pwm_ops = {
+ .request = rz_mtu3_pwm_request,
+ .free = rz_mtu3_pwm_free,
+ .get_state = rz_mtu3_pwm_get_state,
+ .apply = rz_mtu3_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rz_mtu3_pwm->clk);
+
+ return 0;
+}
+
+static int rz_mtu3_pwm_pm_runtime_resume(struct device *dev)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(rz_mtu3_pwm->clk);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(rz_mtu3_pwm_pm_ops,
+ rz_mtu3_pwm_pm_runtime_suspend,
+ rz_mtu3_pwm_pm_runtime_resume, NULL);
+
+static void rz_mtu3_pwm_pm_disable(void *data)
+{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = data;
+
+ clk_rate_exclusive_put(rz_mtu3_pwm->clk);
+ pm_runtime_disable(rz_mtu3_pwm->chip.dev);
+ pm_runtime_set_suspended(rz_mtu3_pwm->chip.dev);
+}
+
+static int rz_mtu3_pwm_probe(struct platform_device *pdev)
+{
+ struct rz_mtu3 *parent_ddata = dev_get_drvdata(pdev->dev.parent);
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm;
+ struct device *dev = &pdev->dev;
+ unsigned int i, j = 0;
+ int ret;
+
+ rz_mtu3_pwm = devm_kzalloc(&pdev->dev, sizeof(*rz_mtu3_pwm), GFP_KERNEL);
+ if (!rz_mtu3_pwm)
+ return -ENOMEM;
+
+ rz_mtu3_pwm->clk = parent_ddata->clk;
+
+ for (i = 0; i < RZ_MTU_NUM_CHANNELS; i++) {
+ if (i == RZ_MTU3_CHAN_5 || i == RZ_MTU3_CHAN_8)
+ continue;
+
+ rz_mtu3_pwm->channel_data[j].mtu = &parent_ddata->channels[i];
+ rz_mtu3_pwm->channel_data[j].mtu->dev = dev;
+ rz_mtu3_pwm->channel_data[j].map = &channel_map[j];
+ j++;
+ }
+
+ mutex_init(&rz_mtu3_pwm->lock);
+ platform_set_drvdata(pdev, rz_mtu3_pwm);
+ ret = clk_prepare_enable(rz_mtu3_pwm->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Clock enable failed\n");
+
+ clk_rate_exclusive_get(rz_mtu3_pwm->clk);
+
+ rz_mtu3_pwm->rate = clk_get_rate(rz_mtu3_pwm->clk);
+ /*
+ * Refuse clk rates > 1 GHz to prevent overflow later for computing
+ * period and duty cycle.
+ */
+ if (rz_mtu3_pwm->rate > NSEC_PER_SEC) {
+ ret = -EINVAL;
+ clk_rate_exclusive_put(rz_mtu3_pwm->clk);
+ goto disable_clock;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ rz_mtu3_pwm->chip.dev = &pdev->dev;
+ ret = devm_add_action_or_reset(&pdev->dev, rz_mtu3_pwm_pm_disable,
+ rz_mtu3_pwm);
+ if (ret < 0)
+ return ret;
+
+ rz_mtu3_pwm->chip.ops = &rz_mtu3_pwm_ops;
+ rz_mtu3_pwm->chip.npwm = RZ_MTU3_MAX_PWM_CHANNELS;
+ ret = devm_pwmchip_add(&pdev->dev, &rz_mtu3_pwm->chip);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
+
+ pm_runtime_idle(&pdev->dev);
+
+ return 0;
+
+disable_clock:
+ clk_disable_unprepare(rz_mtu3_pwm->clk);
+ return ret;
+}
+
+static struct platform_driver rz_mtu3_pwm_driver = {
+ .driver = {
+ .name = "pwm-rz-mtu3",
+ .pm = pm_ptr(&rz_mtu3_pwm_pm_ops),
+ },
+ .probe = rz_mtu3_pwm_probe,
+};
+module_platform_driver(rz_mtu3_pwm_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_ALIAS("platform:pwm-rz-mtu3");
+MODULE_DESCRIPTION("Renesas RZ/G2L MTU3a PWM Timer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 5b0574f635f6..ae49d67ab2b1 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -244,12 +244,12 @@ static int pwm_sifive_probe(struct platform_device *pdev)
if (IS_ERR(ddata->regs))
return PTR_ERR(ddata->regs);
- ddata->clk = devm_clk_get(dev, NULL);
+ ddata->clk = devm_clk_get_prepared(dev, NULL);
if (IS_ERR(ddata->clk))
return dev_err_probe(dev, PTR_ERR(ddata->clk),
"Unable to find controller clock\n");
- ret = clk_prepare_enable(ddata->clk);
+ ret = clk_enable(ddata->clk);
if (ret) {
dev_err(dev, "failed to enable clock for pwm: %d\n", ret);
return ret;
@@ -308,7 +308,6 @@ disable_clk:
clk_disable(ddata->clk);
--enabled_clks;
}
- clk_unprepare(ddata->clk);
return ret;
}
@@ -327,8 +326,6 @@ static void pwm_sifive_remove(struct platform_device *dev)
if (pwm->state.enabled)
clk_disable(ddata->clk);
}
-
- clk_unprepare(ddata->clk);
}
static const struct of_device_id pwm_sifive_of_match[] = {
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 1a106ec32939..8d1254761e4d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -424,6 +424,13 @@ static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
if (!export)
continue;
+ /* If pwmchip was not enabled before suspend, do nothing. */
+ if (!export->suspend.enabled) {
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+ continue;
+ }
+
state.enabled = export->suspend.enabled;
ret = pwm_class_apply_state(export, pwm, &state);
if (ret < 0)
@@ -448,7 +455,17 @@ static int pwm_class_suspend(struct device *parent)
if (!export)
continue;
+ /*
+ * If pwmchip was not enabled before suspend, save
+ * state for resume time and do nothing else.
+ */
export->suspend = state;
+ if (!state.enabled) {
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+ continue;
+ }
+
state.enabled = false;
ret = pwm_class_apply_state(export, pwm, &state);
if (ret < 0) {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2c2405024ace..823f8e6e4801 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -556,6 +556,17 @@ config REGULATOR_MAX597X
The MAX5970/5978 is a smart switch with no output regulation, but
fault protection and voltage and current monitoring capabilities.
+config REGULATOR_MAX77541
+ tristate "Analog Devices MAX77541/77540 Regulator"
+ depends on MFD_MAX77541
+ help
+ This driver controls a Analog Devices MAX77541/77540 regulators
+ via I2C bus. Both MAX77540 and MAX77541 are dual-phase
+ high-efficiency buck converter. Say Y here to
+ enable the regulator driver.
+ Say M here if you want to include support for the regulator as a
+ module.
+
config REGULATOR_MAX77620
tristate "Maxim 77620/MAX20024 voltage regulator"
depends on MFD_MAX77620 || COMPILE_TEST
@@ -1036,6 +1047,7 @@ config REGULATOR_QCOM_USB_VBUS
config REGULATOR_RAA215300
tristate "Renesas RAA215300 driver"
select REGMAP_I2C
+ depends on COMMON_CLK
depends on I2C
help
Support for the Renesas RAA215300 PMIC.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index ebfa75379c20..15e0d614ff66 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX597X) += max597x-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77541) += max77541-regulator.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
diff --git a/drivers/regulator/max77541-regulator.c b/drivers/regulator/max77541-regulator.c
new file mode 100644
index 000000000000..2976f9cb3e26
--- /dev/null
+++ b/drivers/regulator/max77541-regulator.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 Analog Devices, Inc.
+ * ADI Regulator driver for the MAX77540 and MAX77541
+ */
+
+#include <linux/mfd/max77541.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+static const struct regulator_ops max77541_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
+};
+
+static const struct linear_range max77540_buck_ranges[] = {
+ /* Ranges when VOLT_SEL bits are 0x00 */
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0x8B, 5000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x8C, 0xFF, 0),
+ /* Ranges when VOLT_SEL bits are 0x40 */
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x8B, 10000),
+ REGULATOR_LINEAR_RANGE(2400000, 0x8C, 0xFF, 0),
+ /* Ranges when VOLT_SEL bits are 0x80 */
+ REGULATOR_LINEAR_RANGE(2000000, 0x00, 0x9F, 20000),
+ REGULATOR_LINEAR_RANGE(5200000, 0xA0, 0xFF, 0),
+};
+
+static const struct linear_range max77541_buck_ranges[] = {
+ /* Ranges when VOLT_SEL bits are 0x00 */
+ REGULATOR_LINEAR_RANGE(300000, 0x00, 0xB3, 5000),
+ REGULATOR_LINEAR_RANGE(1200000, 0xB4, 0xFF, 0),
+ /* Ranges when VOLT_SEL bits are 0x40 */
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x8B, 10000),
+ REGULATOR_LINEAR_RANGE(2400000, 0x8C, 0xFF, 0),
+ /* Ranges when VOLT_SEL bits are 0x80 */
+ REGULATOR_LINEAR_RANGE(2000000, 0x00, 0x9F, 20000),
+ REGULATOR_LINEAR_RANGE(5200000, 0xA0, 0xFF, 0),
+};
+
+static const unsigned int max77541_buck_volt_range_sel[] = {
+ 0x00, 0x00, 0x40, 0x40, 0x80, 0x80,
+};
+
+enum max77541_regulators {
+ MAX77541_BUCK1 = 1,
+ MAX77541_BUCK2,
+};
+
+#define MAX77540_BUCK(_id, _ops) \
+ { .id = MAX77541_BUCK ## _id, \
+ .name = "buck"#_id, \
+ .of_match = "buck"#_id, \
+ .regulators_node = "regulators", \
+ .enable_reg = MAX77541_REG_EN_CTRL, \
+ .enable_mask = MAX77541_BIT_M ## _id ## _EN, \
+ .ops = &(_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .linear_ranges = max77540_buck_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(max77540_buck_ranges), \
+ .vsel_reg = MAX77541_REG_M ## _id ## _VOUT, \
+ .vsel_mask = MAX77541_BITS_MX_VOUT, \
+ .vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
+ .vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
+ .linear_range_selectors = max77541_buck_volt_range_sel, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MAX77541_BUCK(_id, _ops) \
+ { .id = MAX77541_BUCK ## _id, \
+ .name = "buck"#_id, \
+ .of_match = "buck"#_id, \
+ .regulators_node = "regulators", \
+ .enable_reg = MAX77541_REG_EN_CTRL, \
+ .enable_mask = MAX77541_BIT_M ## _id ## _EN, \
+ .ops = &(_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .linear_ranges = max77541_buck_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(max77541_buck_ranges), \
+ .vsel_reg = MAX77541_REG_M ## _id ## _VOUT, \
+ .vsel_mask = MAX77541_BITS_MX_VOUT, \
+ .vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
+ .vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
+ .linear_range_selectors = max77541_buck_volt_range_sel, \
+ .owner = THIS_MODULE, \
+ }
+
+static const struct regulator_desc max77540_regulators_desc[] = {
+ MAX77540_BUCK(1, max77541_buck_ops),
+ MAX77540_BUCK(2, max77541_buck_ops),
+};
+
+static const struct regulator_desc max77541_regulators_desc[] = {
+ MAX77541_BUCK(1, max77541_buck_ops),
+ MAX77541_BUCK(2, max77541_buck_ops),
+};
+
+static int max77541_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config config = {};
+ const struct regulator_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct regulator_dev *rdev;
+ struct max77541 *max77541 = dev_get_drvdata(dev->parent);
+ unsigned int i;
+
+ config.dev = dev->parent;
+
+ switch (max77541->id) {
+ case MAX77540:
+ desc = max77540_regulators_desc;
+ break;
+ case MAX77541:
+ desc = max77541_regulators_desc;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX77541_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev, &desc[i], &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id max77541_regulator_platform_id[] = {
+ { "max77540-regulator" },
+ { "max77541-regulator" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77541_regulator_platform_id);
+
+static struct platform_driver max77541_regulator_driver = {
+ .driver = {
+ .name = "max77541-regulator",
+ },
+ .probe = max77541_regulator_probe,
+ .id_table = max77541_regulator_platform_id,
+};
+module_platform_driver(max77541_regulator_driver);
+
+MODULE_AUTHOR("Okan Sahin <Okan.Sahin@analog.com>");
+MODULE_DESCRIPTION("MAX77540/MAX77541 regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 768217f0f5cd..9041a0e07fb2 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -357,7 +357,7 @@ free_mem:
return ret;
}
-static int da8xx_rproc_remove(struct platform_device *pdev)
+static void da8xx_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct da8xx_rproc *drproc = rproc->priv;
@@ -374,8 +374,6 @@ static int da8xx_rproc_remove(struct platform_device *pdev)
rproc_free(rproc);
if (dev->of_node)
of_reserved_mem_device_release(dev);
-
- return 0;
}
static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = {
@@ -386,7 +384,7 @@ MODULE_DEVICE_TABLE(of, davinci_rproc_of_match);
static struct platform_driver da8xx_rproc_driver = {
.probe = da8xx_rproc_probe,
- .remove = da8xx_rproc_remove,
+ .remove_new = da8xx_rproc_remove,
.driver = {
.name = "davinci-rproc",
.of_match_table = of_match_ptr(davinci_rproc_of_match),
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index cab06dbf37fb..d95fa5586189 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -1161,7 +1161,7 @@ err_put_rproc:
return ret;
}
-static int imx_dsp_rproc_remove(struct platform_device *pdev)
+static void imx_dsp_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_dsp_rproc *priv = rproc->priv;
@@ -1170,8 +1170,6 @@ static int imx_dsp_rproc_remove(struct platform_device *pdev)
rproc_del(rproc);
imx_dsp_detach_pm_domains(priv);
rproc_free(rproc);
-
- return 0;
}
/* pm runtime functions */
@@ -1243,7 +1241,7 @@ out:
release_firmware(fw);
}
-static __maybe_unused int imx_dsp_suspend(struct device *dev)
+static int imx_dsp_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct imx_dsp_rproc *priv = rproc->priv;
@@ -1278,7 +1276,7 @@ out:
return pm_runtime_force_suspend(dev);
}
-static __maybe_unused int imx_dsp_resume(struct device *dev)
+static int imx_dsp_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
int ret = 0;
@@ -1312,9 +1310,8 @@ err:
}
static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx_dsp_suspend, imx_dsp_resume)
- SET_RUNTIME_PM_OPS(imx_dsp_runtime_suspend,
- imx_dsp_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(imx_dsp_suspend, imx_dsp_resume)
+ RUNTIME_PM_OPS(imx_dsp_runtime_suspend, imx_dsp_runtime_resume, NULL)
};
static const struct of_device_id imx_dsp_rproc_of_match[] = {
@@ -1328,11 +1325,11 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
static struct platform_driver imx_dsp_rproc_driver = {
.probe = imx_dsp_rproc_probe,
- .remove = imx_dsp_rproc_remove,
+ .remove_new = imx_dsp_rproc_remove,
.driver = {
.name = "imx-dsp-rproc",
.of_match_table = imx_dsp_rproc_of_match,
- .pm = &imx_dsp_rproc_pm_ops,
+ .pm = pm_ptr(&imx_dsp_rproc_pm_ops),
},
};
module_platform_driver(imx_dsp_rproc_driver);
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 0ab840dc7e97..f9874fc5a80f 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -1112,7 +1112,7 @@ err_put_rproc:
return ret;
}
-static int imx_rproc_remove(struct platform_device *pdev)
+static void imx_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_rproc *priv = rproc->priv;
@@ -1123,8 +1123,6 @@ static int imx_rproc_remove(struct platform_device *pdev)
imx_rproc_free_mbox(rproc);
destroy_workqueue(priv->workqueue);
rproc_free(rproc);
-
- return 0;
}
static const struct of_device_id imx_rproc_of_match[] = {
@@ -1145,7 +1143,7 @@ MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
static struct platform_driver imx_rproc_driver = {
.probe = imx_rproc_probe,
- .remove = imx_rproc_remove,
+ .remove_new = imx_rproc_remove,
.driver = {
.name = "imx-rproc",
.of_match_table = imx_rproc_of_match,
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 95b39741925d..7e57b90bcaf8 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -476,7 +476,7 @@ free_rproc:
return ret;
}
-static int keystone_rproc_remove(struct platform_device *pdev)
+static void keystone_rproc_remove(struct platform_device *pdev)
{
struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
@@ -486,8 +486,6 @@ static int keystone_rproc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
rproc_free(ksproc->rproc);
of_reserved_mem_device_release(&pdev->dev);
-
- return 0;
}
static const struct of_device_id keystone_rproc_of_match[] = {
@@ -501,7 +499,7 @@ MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
static struct platform_driver keystone_rproc_driver = {
.probe = keystone_rproc_probe,
- .remove = keystone_rproc_remove,
+ .remove_new = keystone_rproc_remove,
.driver = {
.name = "keystone-rproc",
.of_match_table = keystone_rproc_of_match,
diff --git a/drivers/remoteproc/meson_mx_ao_arc.c b/drivers/remoteproc/meson_mx_ao_arc.c
index 462cddab6518..f6744b538323 100644
--- a/drivers/remoteproc/meson_mx_ao_arc.c
+++ b/drivers/remoteproc/meson_mx_ao_arc.c
@@ -228,15 +228,13 @@ err_free_genpool:
return ret;
}
-static int meson_mx_ao_arc_rproc_remove(struct platform_device *pdev)
+static void meson_mx_ao_arc_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
rproc_del(rproc);
gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
-
- return 0;
}
static const struct of_device_id meson_mx_ao_arc_rproc_match[] = {
@@ -248,7 +246,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match);
static struct platform_driver meson_mx_ao_arc_rproc_driver = {
.probe = meson_mx_ao_arc_rproc_probe,
- .remove = meson_mx_ao_arc_rproc_remove,
+ .remove_new = meson_mx_ao_arc_rproc_remove,
.driver = {
.name = "meson-mx-ao-arc-rproc",
.of_match_table = meson_mx_ao_arc_rproc_match,
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index e1d93e63d7df..dcc94ee2458d 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -913,7 +913,7 @@ release_dev_mem:
return ret;
}
-static int scp_remove(struct platform_device *pdev)
+static void scp_remove(struct platform_device *pdev)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
int i;
@@ -925,8 +925,6 @@ static int scp_remove(struct platform_device *pdev)
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
-
- return 0;
}
static const struct mtk_scp_of_data mt8183_of_data = {
@@ -1003,7 +1001,7 @@ MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
static struct platform_driver mtk_scp_driver = {
.probe = scp_probe,
- .remove = scp_remove,
+ .remove_new = scp_remove,
.driver = {
.name = "mtk-scp",
.of_match_table = mtk_scp_of_match,
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 430fab0266ed..82ed90f03d91 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -1363,15 +1363,13 @@ free_rproc:
return ret;
}
-static int omap_rproc_remove(struct platform_device *pdev)
+static void omap_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_del(rproc);
rproc_free(rproc);
of_reserved_mem_device_release(&pdev->dev);
-
- return 0;
}
static const struct dev_pm_ops omap_rproc_pm_ops = {
@@ -1382,7 +1380,7 @@ static const struct dev_pm_ops omap_rproc_pm_ops = {
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
- .remove = omap_rproc_remove,
+ .remove_new = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
.pm = &omap_rproc_pm_ops,
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 54f5ce302e7a..2874c8d324f7 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -1056,14 +1056,12 @@ static int pru_rproc_probe(struct platform_device *pdev)
return 0;
}
-static int pru_rproc_remove(struct platform_device *pdev)
+static void pru_rproc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rproc *rproc = platform_get_drvdata(pdev);
dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
-
- return 0;
}
static const struct pru_private_data pru_data = {
@@ -1111,7 +1109,7 @@ static struct platform_driver pru_rproc_driver = {
.suppress_bind_attrs = true,
},
.probe = pru_rproc_probe,
- .remove = pru_rproc_remove,
+ .remove_new = pru_rproc_remove,
};
module_platform_driver(pru_rproc_driver);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index d546ab9dc141..6777a3bd6226 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -763,7 +763,7 @@ free_rproc:
return ret;
}
-static int adsp_remove(struct platform_device *pdev)
+static void adsp_remove(struct platform_device *pdev)
{
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
@@ -775,8 +775,6 @@ static int adsp_remove(struct platform_device *pdev)
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
rproc_free(adsp->rproc);
-
- return 0;
}
static const struct adsp_pil_data adsp_resource_init = {
@@ -859,7 +857,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_pil_driver = {
.probe = adsp_probe,
- .remove = adsp_remove,
+ .remove_new = adsp_remove,
.driver = {
.name = "qcom_q6v5_adsp",
.of_match_table = adsp_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 8e15e4f85de1..70bffc9f33f6 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -2110,7 +2110,7 @@ free_rproc:
return ret;
}
-static int q6v5_remove(struct platform_device *pdev)
+static void q6v5_remove(struct platform_device *pdev)
{
struct q6v5 *qproc = platform_get_drvdata(pdev);
struct rproc *rproc = qproc->rproc;
@@ -2128,8 +2128,6 @@ static int q6v5_remove(struct platform_device *pdev)
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
rproc_free(rproc);
-
- return 0;
}
static const struct rproc_hexagon_res sc7180_mss = {
@@ -2482,7 +2480,7 @@ MODULE_DEVICE_TABLE(of, q6v5_of_match);
static struct platform_driver q6v5_driver = {
.probe = q6v5_probe,
- .remove = q6v5_remove,
+ .remove_new = q6v5_remove,
.driver = {
.name = "qcom-q6v5-mss",
.of_match_table = q6v5_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index e34d82b18a67..3153d82037e7 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -105,7 +105,7 @@ struct qcom_adsp {
struct qcom_scm_pas_metadata dtb_pas_metadata;
};
-void adsp_segment_dump(struct rproc *rproc, struct rproc_dump_segment *segment,
+static void adsp_segment_dump(struct rproc *rproc, struct rproc_dump_segment *segment,
void *dest, size_t offset, size_t size)
{
struct qcom_adsp *adsp = rproc->priv;
@@ -754,7 +754,7 @@ free_rproc:
return ret;
}
-static int adsp_remove(struct platform_device *pdev)
+static void adsp_remove(struct platform_device *pdev)
{
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
@@ -769,8 +769,6 @@ static int adsp_remove(struct platform_device *pdev)
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
rproc_free(adsp->rproc);
-
- return 0;
}
static const struct adsp_data adsp_resource_init = {
@@ -1232,7 +1230,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_driver = {
.probe = adsp_probe,
- .remove = adsp_remove,
+ .remove_new = adsp_remove,
.driver = {
.name = "qcom_q6v5_pas",
.of_match_table = adsp_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index ba24d745b2d6..b437044aa126 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -1074,7 +1074,7 @@ free_rproc:
return ret;
}
-static int q6v5_wcss_remove(struct platform_device *pdev)
+static void q6v5_wcss_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct q6v5_wcss *wcss = rproc->priv;
@@ -1082,8 +1082,6 @@ static int q6v5_wcss_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&wcss->q6v5);
rproc_del(rproc);
rproc_free(rproc);
-
- return 0;
}
static const struct wcss_data wcss_ipq8074_res_init = {
@@ -1117,7 +1115,7 @@ MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
static struct platform_driver q6v5_wcss_driver = {
.probe = q6v5_wcss_probe,
- .remove = q6v5_wcss_remove,
+ .remove_new = q6v5_wcss_remove,
.driver = {
.name = "qcom-q6v5-wcss-pil",
.of_match_table = q6v5_wcss_of_match,
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 0fc317265064..1ed0647bc962 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -666,7 +666,7 @@ free_rproc:
return ret;
}
-static int wcnss_remove(struct platform_device *pdev)
+static void wcnss_remove(struct platform_device *pdev)
{
struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
@@ -678,8 +678,6 @@ static int wcnss_remove(struct platform_device *pdev)
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
wcnss_release_pds(wcnss);
rproc_free(wcnss->rproc);
-
- return 0;
}
static const struct of_device_id wcnss_of_match[] = {
@@ -693,7 +691,7 @@ MODULE_DEVICE_TABLE(of, wcnss_of_match);
static struct platform_driver wcnss_driver = {
.probe = wcnss_probe,
- .remove = wcnss_remove,
+ .remove_new = wcnss_remove,
.driver = {
.name = "qcom-wcnss-pil",
.of_match_table = wcnss_of_match,
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index 1ff2a73ade90..90e8769d5624 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -197,13 +197,11 @@ pm_disable:
return ret;
}
-static int rcar_rproc_remove(struct platform_device *pdev)
+static void rcar_rproc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
pm_runtime_disable(dev);
-
- return 0;
}
static const struct of_device_id rcar_rproc_of_match[] = {
@@ -215,7 +213,7 @@ MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
static struct platform_driver rcar_rproc_driver = {
.probe = rcar_rproc_probe,
- .remove = rcar_rproc_remove,
+ .remove_new = rcar_rproc_remove,
.driver = {
.name = "rcar-rproc",
.of_match_table = rcar_rproc_of_match,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 0e95525c1158..83d76915a6ad 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -569,7 +569,7 @@ unwind_vring_allocations:
return ret;
}
-static int rproc_virtio_remove(struct platform_device *pdev)
+static void rproc_virtio_remove(struct platform_device *pdev)
{
struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = rvdev->rproc;
@@ -588,14 +588,12 @@ static int rproc_virtio_remove(struct platform_device *pdev)
dma_release_coherent_memory(&pdev->dev);
put_device(&rproc->dev);
-
- return 0;
}
/* Platform driver */
static struct platform_driver rproc_virtio_driver = {
.probe = rproc_virtio_probe,
- .remove = rproc_virtio_remove,
+ .remove_new = rproc_virtio_remove,
.driver = {
.name = "rproc-virtio",
},
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 3f1b8963639f..e3ce01d98b4c 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -448,7 +448,7 @@ free_rproc:
return ret;
}
-static int st_rproc_remove(struct platform_device *pdev)
+static void st_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
@@ -462,13 +462,11 @@ static int st_rproc_remove(struct platform_device *pdev)
mbox_free_channel(ddata->mbox_chan[i]);
rproc_free(rproc);
-
- return 0;
}
static struct platform_driver st_rproc_driver = {
.probe = st_rproc_probe,
- .remove = st_rproc_remove,
+ .remove_new = st_rproc_remove,
.driver = {
.name = "st-rproc",
.of_match_table = of_match_ptr(st_rproc_match),
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index e432febf4337..cf073bac79f7 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -79,6 +79,7 @@ struct stm32_mbox {
struct stm32_rproc {
struct reset_control *rst;
+ struct reset_control *hold_boot_rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
struct stm32_syscon m4_state;
@@ -88,7 +89,7 @@ struct stm32_rproc {
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
struct workqueue_struct *workqueue;
- bool secured_soc;
+ bool hold_boot_smc;
void __iomem *rsc_va;
};
@@ -413,13 +414,28 @@ static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
struct arm_smccc_res smc_res;
int val, err;
+ /*
+ * Three ways to manage the hold boot
+ * - using SCMI: the hold boot is managed as a reset,
+ * - using Linux(no SCMI): the hold boot is managed as a syscon register
+ * - using SMC call (deprecated): use SMC reset interface
+ */
+
val = hold ? HOLD_BOOT : RELEASE_BOOT;
- if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) {
+ if (ddata->hold_boot_rst) {
+ /* Use the SCMI reset controller */
+ if (!hold)
+ err = reset_control_deassert(ddata->hold_boot_rst);
+ else
+ err = reset_control_assert(ddata->hold_boot_rst);
+ } else if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->hold_boot_smc) {
+ /* Use the SMC call */
arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE,
hold_boot.reg, val, 0, 0, 0, 0, &smc_res);
err = smc_res.a0;
} else {
+ /* Use syscon */
err = regmap_update_bits(hold_boot.map, hold_boot.reg,
hold_boot.mask, val);
}
@@ -717,34 +733,52 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
dev_info(dev, "wdg irq registered\n");
}
- ddata->rst = devm_reset_control_get_by_index(dev, 0);
+ ddata->rst = devm_reset_control_get_optional(dev, "mcu_rst");
+ if (!ddata->rst) {
+ /* Try legacy fallback method: get it by index */
+ ddata->rst = devm_reset_control_get_by_index(dev, 0);
+ }
if (IS_ERR(ddata->rst))
return dev_err_probe(dev, PTR_ERR(ddata->rst),
"failed to get mcu_reset\n");
/*
- * if platform is secured the hold boot bit must be written by
- * smc call and read normally.
- * if not secure the hold boot bit could be read/write normally
+ * Three ways to manage the hold boot
+ * - using SCMI: the hold boot is managed as a reset
+ * The DT "reset-mames" property should be defined with 2 items:
+ * reset-names = "mcu_rst", "hold_boot";
+ * - using SMC call (deprecated): use SMC reset interface
+ * The DT "reset-mames" property is optional, "st,syscfg-tz" is required
+ * - default(no SCMI, no SMC): the hold boot is managed as a syscon register
+ * The DT "reset-mames" property is optional, "st,syscfg-holdboot" is required
*/
- err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
- if (err) {
- dev_err(dev, "failed to get tz syscfg\n");
- return err;
- }
- err = regmap_read(tz.map, tz.reg, &tzen);
- if (err) {
- dev_err(dev, "failed to read tzen\n");
- return err;
+ ddata->hold_boot_rst = devm_reset_control_get_optional(dev, "hold_boot");
+ if (IS_ERR(ddata->hold_boot_rst))
+ return dev_err_probe(dev, PTR_ERR(ddata->hold_boot_rst),
+ "failed to get hold_boot reset\n");
+
+ if (!ddata->hold_boot_rst && IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)) {
+ /* Manage the MCU_BOOT using SMC call */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
+ if (!err) {
+ err = regmap_read(tz.map, tz.reg, &tzen);
+ if (err) {
+ dev_err(dev, "failed to read tzen\n");
+ return err;
+ }
+ ddata->hold_boot_smc = tzen & tz.mask;
+ }
}
- ddata->secured_soc = tzen & tz.mask;
- err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
- &ddata->hold_boot);
- if (err) {
- dev_err(dev, "failed to get hold boot\n");
- return err;
+ if (!ddata->hold_boot_rst && !ddata->hold_boot_smc) {
+ /* Default: hold boot manage it through the syscon controller */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
+ &ddata->hold_boot);
+ if (err) {
+ dev_err(dev, "failed to get hold boot\n");
+ return err;
+ }
}
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
@@ -867,7 +901,7 @@ free_rproc:
return ret;
}
-static int stm32_rproc_remove(struct platform_device *pdev)
+static void stm32_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct stm32_rproc *ddata = rproc->priv;
@@ -885,8 +919,6 @@ static int stm32_rproc_remove(struct platform_device *pdev)
device_init_wakeup(dev, false);
}
rproc_free(rproc);
-
- return 0;
}
static int __maybe_unused stm32_rproc_suspend(struct device *dev)
@@ -916,7 +948,7 @@ static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
- .remove = stm32_rproc_remove,
+ .remove_new = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
.pm = &stm32_rproc_pm_ops,
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index a0c204cb0979..120dc7d2dac1 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -223,7 +223,7 @@ err:
return ret;
}
-static int wkup_m3_rproc_remove(struct platform_device *pdev)
+static void wkup_m3_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
@@ -231,8 +231,6 @@ static int wkup_m3_rproc_remove(struct platform_device *pdev)
rproc_free(rproc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -253,7 +251,7 @@ static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
static struct platform_driver wkup_m3_rproc_driver = {
.probe = wkup_m3_rproc_probe,
- .remove = wkup_m3_rproc_remove,
+ .remove_new = wkup_m3_rproc_remove,
.driver = {
.name = "wkup_m3_rproc",
.of_match_table = wkup_m3_rproc_of_match,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ffca9a8bb878..05f4b2d66290 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -956,6 +956,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
depends on X86 || ARM || PPC || MIPS || SPARC64
+ depends on HAS_IOPORT || MACH_DECSTATION
default y if X86
select RTC_MC146818_LIB
help
@@ -976,6 +977,7 @@ config RTC_DRV_CMOS
config RTC_DRV_ALPHA
bool "Alpha PC-style CMOS"
depends on ALPHA
+ depends on HAS_IOPORT
select RTC_MC146818_LIB
default y
help
@@ -1193,7 +1195,7 @@ config RTC_DRV_MSM6242
config RTC_DRV_BQ4802
tristate "TI BQ4802"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_IOPORT
help
If you say Y here you will get support for the TI
BQ4802 RTC chip.
@@ -1685,6 +1687,19 @@ config RTC_DRV_JZ4740
This driver can also be built as a module. If so, the module
will be called rtc-jz4740.
+config RTC_DRV_LOONGSON
+ tristate "Loongson On-chip RTC"
+ depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This is a driver for the Loongson on-chip Counter0 (Time-Of-Year
+ counter) to be used as a RTC.
+ It can be found on Loongson-1 series cpu, Loongson-2K series cpu
+ and Loongson LS7A bridge chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-loongson.
+
config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -1726,16 +1741,6 @@ config RTC_DRV_TEGRA
This drive can also be built as a module. If so, the module
will be called rtc-tegra.
-config RTC_DRV_LOONGSON1
- tristate "loongson1 RTC support"
- depends on MACH_LOONGSON32
- help
- This is a driver for the loongson1 on-chip Counter0 (Time-Of-Year
- counter) to be used as a RTC.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-ls1x.
-
config RTC_DRV_MXC
tristate "Freescale MXC Real Time Clock"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ea445d1ebb17..fd209883ee2e 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -78,7 +78,7 @@ obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_ISL12026) += rtc-isl12026.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
-obj-$(CONFIG_RTC_DRV_LOONGSON1) += rtc-ls1x.o
+obj-$(CONFIG_RTC_DRV_LOONGSON) += rtc-loongson.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
obj-$(CONFIG_RTC_DRV_LPC24XX) += rtc-lpc24xx.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index f2b0971d2c65..100062001831 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -944,7 +944,7 @@ static struct i2c_driver abb5zes3_driver = {
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
- .probe_new = abb5zes3_probe,
+ .probe = abb5zes3_probe,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index 34611f6dedcb..04e1b8e93bc1 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -455,7 +455,7 @@ static const struct hwmon_channel_info abeoz9_temp = {
.config = abeoz9_temp_config,
};
-static const struct hwmon_channel_info *abeoz9_info[] = {
+static const struct hwmon_channel_info * const abeoz9_info[] = {
&abeoz9_chip,
&abeoz9_temp,
NULL
@@ -584,7 +584,7 @@ static struct i2c_driver abeoz9_driver = {
.name = "rtc-ab-eoz9",
.of_match_table = of_match_ptr(abeoz9_dt_match),
},
- .probe_new = abeoz9_probe,
+ .probe = abeoz9_probe,
.id_table = abeoz9_id,
};
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index f34a2e59cac7..e08d3181bd2a 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -992,7 +992,7 @@ static struct i2c_driver abx80x_driver = {
.name = "rtc-abx80x",
.of_match_table = of_match_ptr(abx80x_of_match),
},
- .probe_new = abx80x_probe,
+ .probe = abx80x_probe,
.id_table = abx80x_id,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 967ddc6bf76d..591e42391747 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -320,7 +320,7 @@ static struct i2c_driver bq32k_driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
- .probe_new = bq32k_probe,
+ .probe = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e86ba84df6cb..cb5acecc11aa 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -2011,7 +2011,7 @@ static struct i2c_driver ds1307_driver = {
.name = "rtc-ds1307",
.of_match_table = ds1307_of_match,
},
- .probe_new = ds1307_probe,
+ .probe = ds1307_probe,
.id_table = ds1307_id,
};
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 7f089f066163..4a5005cb23f5 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -572,7 +572,7 @@ static struct i2c_driver ds1374_driver = {
.of_match_table = of_match_ptr(ds1374_of_match),
.pm = &ds1374_pm,
},
- .probe_new = ds1374_probe,
+ .probe = ds1374_probe,
.remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index a3bb2cd9c881..641799f30baa 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -149,7 +149,7 @@ static struct i2c_driver ds1672_driver = {
.name = "rtc-ds1672",
.of_match_table = of_match_ptr(ds1672_of_match),
},
- .probe_new = ds1672_probe,
+ .probe = ds1672_probe,
.id_table = ds1672_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index dd31a60c1fc6..89d7b085f721 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -359,7 +359,7 @@ static const struct hwmon_channel_info ds3232_hwmon_temp = {
.config = ds3232_hwmon_temp_config,
};
-static const struct hwmon_channel_info *ds3232_hwmon_info[] = {
+static const struct hwmon_channel_info * const ds3232_hwmon_info[] = {
&ds3232_hwmon_chip,
&ds3232_hwmon_temp,
NULL
@@ -603,7 +603,7 @@ static struct i2c_driver ds3232_driver = {
.of_match_table = of_match_ptr(ds3232_of_match),
.pm = &ds3232_pm_ops,
},
- .probe_new = ds3232_i2c_probe,
+ .probe = ds3232_i2c_probe,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 53f9f9391a5f..fc772eae5da5 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -147,7 +147,7 @@ static struct i2c_driver em3027_driver = {
.name = "rtc-em3027",
.of_match_table = of_match_ptr(em3027_of_match),
},
- .probe_new = em3027_probe,
+ .probe = em3027_probe,
.id_table = em3027_id,
};
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index f59bb81f23c0..400ce4ad0c49 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -517,7 +517,7 @@ static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
},
- .probe_new = fm3130_probe,
+ .probe = fm3130_probe,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 7d5a298a9a3b..b018535c842b 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -576,7 +576,7 @@ static struct i2c_driver hym8563_driver = {
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
- .probe_new = hym8563_probe,
+ .probe = hym8563_probe,
.id_table = hym8563_id,
};
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index e68a79b5e00e..a613257d1574 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -89,7 +89,7 @@ static int isl12022_hwmon_read(struct device *dev,
return -EOPNOTSUPP;
}
-static const struct hwmon_channel_info *isl12022_hwmon_info[] = {
+static const struct hwmon_channel_info * const isl12022_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
@@ -262,7 +262,7 @@ static struct i2c_driver isl12022_driver = {
.name = "rtc-isl12022",
.of_match_table = isl12022_dt_match,
},
- .probe_new = isl12022_probe,
+ .probe = isl12022_probe,
.id_table = isl12022_id,
};
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 1bfca39079d4..5abff5d348ac 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -490,7 +490,7 @@ static struct i2c_driver isl12026_driver = {
.name = "rtc-isl12026",
.of_match_table = isl12026_dt_match,
},
- .probe_new = isl12026_probe_new,
+ .probe = isl12026_probe_new,
.remove = isl12026_remove,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 73cc6aaf9b8b..b0712b4e3648 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -6,6 +6,7 @@
*/
#include <linux/bcd.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -68,42 +69,60 @@
static struct i2c_driver isl1208_driver;
-/* ISL1208 various variants */
-enum isl1208_id {
- TYPE_ISL1208 = 0,
- TYPE_ISL1209,
- TYPE_ISL1218,
- TYPE_ISL1219,
- ISL_LAST_ID
-};
-
/* Chip capabilities table */
-static const struct isl1208_config {
- const char name[8];
+struct isl1208_config {
unsigned int nvmem_length;
unsigned has_tamper:1;
unsigned has_timestamp:1;
-} isl1208_configs[] = {
- [TYPE_ISL1208] = { "isl1208", 2, false, false },
- [TYPE_ISL1209] = { "isl1209", 2, true, false },
- [TYPE_ISL1218] = { "isl1218", 8, false, false },
- [TYPE_ISL1219] = { "isl1219", 2, true, true },
+ unsigned has_inverted_osc_bit:1;
+};
+
+static const struct isl1208_config config_isl1208 = {
+ .nvmem_length = 2,
+ .has_tamper = false,
+ .has_timestamp = false
+};
+
+static const struct isl1208_config config_isl1209 = {
+ .nvmem_length = 2,
+ .has_tamper = true,
+ .has_timestamp = false
+};
+
+static const struct isl1208_config config_isl1218 = {
+ .nvmem_length = 8,
+ .has_tamper = false,
+ .has_timestamp = false
+};
+
+static const struct isl1208_config config_isl1219 = {
+ .nvmem_length = 2,
+ .has_tamper = true,
+ .has_timestamp = true
+};
+
+static const struct isl1208_config config_raa215300_a0 = {
+ .nvmem_length = 2,
+ .has_tamper = false,
+ .has_timestamp = false,
+ .has_inverted_osc_bit = true
};
static const struct i2c_device_id isl1208_id[] = {
- { "isl1208", TYPE_ISL1208 },
- { "isl1209", TYPE_ISL1209 },
- { "isl1218", TYPE_ISL1218 },
- { "isl1219", TYPE_ISL1219 },
+ { "isl1208", .driver_data = (kernel_ulong_t)&config_isl1208 },
+ { "isl1209", .driver_data = (kernel_ulong_t)&config_isl1209 },
+ { "isl1218", .driver_data = (kernel_ulong_t)&config_isl1218 },
+ { "isl1219", .driver_data = (kernel_ulong_t)&config_isl1219 },
+ { "raa215300_a0", .driver_data = (kernel_ulong_t)&config_raa215300_a0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl1208_id);
static const __maybe_unused struct of_device_id isl1208_of_match[] = {
- { .compatible = "isil,isl1208", .data = &isl1208_configs[TYPE_ISL1208] },
- { .compatible = "isil,isl1209", .data = &isl1208_configs[TYPE_ISL1209] },
- { .compatible = "isil,isl1218", .data = &isl1208_configs[TYPE_ISL1218] },
- { .compatible = "isil,isl1219", .data = &isl1208_configs[TYPE_ISL1219] },
+ { .compatible = "isil,isl1208", .data = &config_isl1208 },
+ { .compatible = "isil,isl1209", .data = &config_isl1209 },
+ { .compatible = "isil,isl1218", .data = &config_isl1218 },
+ { .compatible = "isil,isl1219", .data = &config_isl1219 },
{ }
};
MODULE_DEVICE_TABLE(of, isl1208_of_match);
@@ -166,6 +185,20 @@ isl1208_i2c_validate_client(struct i2c_client *client)
return 0;
}
+static int isl1208_set_xtoscb(struct i2c_client *client, int sr, int xtosb_val)
+{
+ /* Do nothing if bit is already set to desired value */
+ if ((sr & ISL1208_REG_SR_XTOSCB) == xtosb_val)
+ return 0;
+
+ if (xtosb_val)
+ sr |= ISL1208_REG_SR_XTOSCB;
+ else
+ sr &= ~ISL1208_REG_SR_XTOSCB;
+
+ return i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+}
+
static int
isl1208_i2c_get_sr(struct i2c_client *client)
{
@@ -502,7 +535,6 @@ isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
return 0;
}
-
static int
isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
@@ -797,11 +829,25 @@ static int isl1208_setup_irq(struct i2c_client *client, int irq)
}
static int
+isl1208_clk_present(struct i2c_client *client, const char *name)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get_optional(&client->dev, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return !!clk;
+}
+
+static int
isl1208_probe(struct i2c_client *client)
{
- int rc = 0;
struct isl1208_state *isl1208;
int evdet_irq = -1;
+ int xtosb_val = 0;
+ int rc = 0;
+ int sr;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -823,9 +869,22 @@ isl1208_probe(struct i2c_client *client)
} else {
const struct i2c_device_id *id = i2c_match_id(isl1208_id, client);
- if (id->driver_data >= ISL_LAST_ID)
+ if (!id)
return -ENODEV;
- isl1208->config = &isl1208_configs[id->driver_data];
+ isl1208->config = (struct isl1208_config *)id->driver_data;
+ }
+
+ rc = isl1208_clk_present(client, "xin");
+ if (rc < 0)
+ return rc;
+
+ if (!rc) {
+ rc = isl1208_clk_present(client, "clkin");
+ if (rc < 0)
+ return rc;
+
+ if (rc)
+ xtosb_val = 1;
}
isl1208->rtc = devm_rtc_allocate_device(&client->dev);
@@ -839,13 +898,20 @@ isl1208_probe(struct i2c_client *client)
isl1208->nvmem_config.size = isl1208->config->nvmem_length;
isl1208->nvmem_config.priv = isl1208;
- rc = isl1208_i2c_get_sr(client);
- if (rc < 0) {
+ sr = isl1208_i2c_get_sr(client);
+ if (sr < 0) {
dev_err(&client->dev, "reading status failed\n");
- return rc;
+ return sr;
}
- if (rc & ISL1208_REG_SR_RTCF)
+ if (isl1208->config->has_inverted_osc_bit)
+ xtosb_val = !xtosb_val;
+
+ rc = isl1208_set_xtoscb(client, sr, xtosb_val);
+ if (rc)
+ return rc;
+
+ if (sr & ISL1208_REG_SR_RTCF)
dev_warn(&client->dev, "rtc power failure detected, "
"please set clock.\n");
@@ -908,7 +974,7 @@ static struct i2c_driver isl1208_driver = {
.name = "rtc-isl1208",
.of_match_table = of_match_ptr(isl1208_of_match),
},
- .probe_new = isl1208_probe,
+ .probe = isl1208_probe,
.id_table = isl1208_id,
};
diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
new file mode 100644
index 000000000000..e8ffc1ab90b0
--- /dev/null
+++ b/drivers/rtc/rtc-loongson.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson RTC driver
+ *
+ * Maintained out-of-tree by Huacai Chen <chenhuacai@kernel.org>.
+ * Rewritten for mainline by WANG Xuerui <git@xen0n.name>.
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/acpi.h>
+
+/* Time Of Year(TOY) counters registers */
+#define TOY_TRIM_REG 0x20 /* Must be initialized to 0 */
+#define TOY_WRITE0_REG 0x24 /* TOY low 32-bits value (write-only) */
+#define TOY_WRITE1_REG 0x28 /* TOY high 32-bits value (write-only) */
+#define TOY_READ0_REG 0x2c /* TOY low 32-bits value (read-only) */
+#define TOY_READ1_REG 0x30 /* TOY high 32-bits value (read-only) */
+#define TOY_MATCH0_REG 0x34 /* TOY timing interrupt 0 */
+#define TOY_MATCH1_REG 0x38 /* TOY timing interrupt 1 */
+#define TOY_MATCH2_REG 0x3c /* TOY timing interrupt 2 */
+
+/* RTC counters registers */
+#define RTC_CTRL_REG 0x40 /* TOY and RTC control register */
+#define RTC_TRIM_REG 0x60 /* Must be initialized to 0 */
+#define RTC_WRITE0_REG 0x64 /* RTC counters value (write-only) */
+#define RTC_READ0_REG 0x68 /* RTC counters value (read-only) */
+#define RTC_MATCH0_REG 0x6c /* RTC timing interrupt 0 */
+#define RTC_MATCH1_REG 0x70 /* RTC timing interrupt 1 */
+#define RTC_MATCH2_REG 0x74 /* RTC timing interrupt 2 */
+
+/* bitmask of TOY_WRITE0_REG */
+#define TOY_MON GENMASK(31, 26)
+#define TOY_DAY GENMASK(25, 21)
+#define TOY_HOUR GENMASK(20, 16)
+#define TOY_MIN GENMASK(15, 10)
+#define TOY_SEC GENMASK(9, 4)
+#define TOY_MSEC GENMASK(3, 0)
+
+/* bitmask of TOY_MATCH0/1/2_REG */
+#define TOY_MATCH_YEAR GENMASK(31, 26)
+#define TOY_MATCH_MON GENMASK(25, 22)
+#define TOY_MATCH_DAY GENMASK(21, 17)
+#define TOY_MATCH_HOUR GENMASK(16, 12)
+#define TOY_MATCH_MIN GENMASK(11, 6)
+#define TOY_MATCH_SEC GENMASK(5, 0)
+
+/* bitmask of RTC_CTRL_REG */
+#define RTC_ENABLE BIT(13) /* 1: RTC counters enable */
+#define TOY_ENABLE BIT(11) /* 1: TOY counters enable */
+#define OSC_ENABLE BIT(8) /* 1: 32.768k crystal enable */
+#define TOY_ENABLE_MASK (TOY_ENABLE | OSC_ENABLE)
+
+/* PM domain registers */
+#define PM1_STS_REG 0x0c /* Power management 1 status register */
+#define RTC_STS BIT(10) /* RTC status */
+#define PM1_EN_REG 0x10 /* Power management 1 enable register */
+#define RTC_EN BIT(10) /* RTC event enable */
+
+/*
+ * According to the LS1C manual, RTC_CTRL and alarm-related registers are not defined.
+ * Accessing the relevant registers will cause the system to hang.
+ */
+#define LS1C_RTC_CTRL_WORKAROUND BIT(0)
+
+struct loongson_rtc_config {
+ u32 pm_offset; /* Offset of PM domain, for RTC alarm wakeup */
+ u32 flags; /* Workaround bits */
+};
+
+struct loongson_rtc_priv {
+ spinlock_t lock; /* protects PM registers access */
+ u32 fix_year; /* RTC alarm year compensation value */
+ struct rtc_device *rtcdev;
+ struct regmap *regmap;
+ void __iomem *pm_base; /* PM domain base, for RTC alarm wakeup */
+ const struct loongson_rtc_config *config;
+};
+
+static const struct loongson_rtc_config ls1b_rtc_config = {
+ .pm_offset = 0,
+ .flags = 0,
+};
+
+static const struct loongson_rtc_config ls1c_rtc_config = {
+ .pm_offset = 0,
+ .flags = LS1C_RTC_CTRL_WORKAROUND,
+};
+
+static const struct loongson_rtc_config generic_rtc_config = {
+ .pm_offset = 0x100,
+ .flags = 0,
+};
+
+static const struct loongson_rtc_config ls2k1000_rtc_config = {
+ .pm_offset = 0x800,
+ .flags = 0,
+};
+
+static const struct regmap_config loongson_rtc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+/* RTC alarm irq handler */
+static irqreturn_t loongson_rtc_isr(int irq, void *id)
+{
+ struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+
+ rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+/* For ACPI fixed event handler */
+static u32 loongson_rtc_handler(void *id)
+{
+ struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+
+ spin_lock(&priv->lock);
+ /* Disable RTC alarm wakeup and interrupt */
+ writel(readl(priv->pm_base + PM1_EN_REG) & ~RTC_EN,
+ priv->pm_base + PM1_EN_REG);
+
+ /* Clear RTC interrupt status */
+ writel(RTC_STS, priv->pm_base + PM1_STS_REG);
+ spin_unlock(&priv->lock);
+
+ /*
+ * The TOY_MATCH0_REG should be cleared 0 here,
+ * otherwise the interrupt cannot be cleared.
+ */
+ return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+}
+
+static int loongson_rtc_set_enabled(struct device *dev)
+{
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->config->flags & LS1C_RTC_CTRL_WORKAROUND)
+ return 0;
+
+ /* Enable RTC TOY counters and crystal */
+ return regmap_update_bits(priv->regmap, RTC_CTRL_REG, TOY_ENABLE_MASK,
+ TOY_ENABLE_MASK);
+}
+
+static bool loongson_rtc_get_enabled(struct device *dev)
+{
+ int ret;
+ u32 ctrl_data;
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->config->flags & LS1C_RTC_CTRL_WORKAROUND)
+ return true;
+
+ ret = regmap_read(priv->regmap, RTC_CTRL_REG, &ctrl_data);
+ if (ret < 0)
+ return false;
+
+ return ctrl_data & TOY_ENABLE_MASK;
+}
+
+static int loongson_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int ret;
+ u32 rtc_data[2];
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ if (!loongson_rtc_get_enabled(dev))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(priv->regmap, TOY_READ0_REG, rtc_data,
+ ARRAY_SIZE(rtc_data));
+ if (ret < 0)
+ return ret;
+
+ tm->tm_sec = FIELD_GET(TOY_SEC, rtc_data[0]);
+ tm->tm_min = FIELD_GET(TOY_MIN, rtc_data[0]);
+ tm->tm_hour = FIELD_GET(TOY_HOUR, rtc_data[0]);
+ tm->tm_mday = FIELD_GET(TOY_DAY, rtc_data[0]);
+ tm->tm_mon = FIELD_GET(TOY_MON, rtc_data[0]) - 1;
+ tm->tm_year = rtc_data[1];
+
+ /* Prepare for RTC alarm year compensation value. */
+ priv->fix_year = tm->tm_year / 64 * 64;
+ return 0;
+}
+
+static int loongson_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ int ret;
+ u32 rtc_data[2];
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ rtc_data[0] = FIELD_PREP(TOY_SEC, tm->tm_sec)
+ | FIELD_PREP(TOY_MIN, tm->tm_min)
+ | FIELD_PREP(TOY_HOUR, tm->tm_hour)
+ | FIELD_PREP(TOY_DAY, tm->tm_mday)
+ | FIELD_PREP(TOY_MON, tm->tm_mon + 1);
+ rtc_data[1] = tm->tm_year;
+
+ ret = regmap_bulk_write(priv->regmap, TOY_WRITE0_REG, rtc_data,
+ ARRAY_SIZE(rtc_data));
+ if (ret < 0)
+ return ret;
+
+ return loongson_rtc_set_enabled(dev);
+}
+
+static int loongson_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ u32 alarm_data;
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ ret = regmap_read(priv->regmap, TOY_MATCH0_REG, &alarm_data);
+ if (ret < 0)
+ return ret;
+
+ alrm->time.tm_sec = FIELD_GET(TOY_MATCH_SEC, alarm_data);
+ alrm->time.tm_min = FIELD_GET(TOY_MATCH_MIN, alarm_data);
+ alrm->time.tm_hour = FIELD_GET(TOY_MATCH_HOUR, alarm_data);
+ alrm->time.tm_mday = FIELD_GET(TOY_MATCH_DAY, alarm_data);
+ alrm->time.tm_mon = FIELD_GET(TOY_MATCH_MON, alarm_data) - 1;
+ /*
+ * This is a hardware bug: the year field of SYS_TOYMATCH is only 6 bits,
+ * making it impossible to save year values larger than 64.
+ *
+ * SYS_TOYMATCH is used to match the alarm time value and determine if
+ * an alarm is triggered, so we must keep the lower 6 bits of the year
+ * value constant during the value conversion.
+ *
+ * In summary, we need to manually add 64(or a multiple of 64) to the
+ * year value to avoid the invalid alarm prompt at startup.
+ */
+ alrm->time.tm_year = FIELD_GET(TOY_MATCH_YEAR, alarm_data) + priv->fix_year;
+
+ alrm->enabled = !!(readl(priv->pm_base + PM1_EN_REG) & RTC_EN);
+ return 0;
+}
+
+static int loongson_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ u32 val;
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ spin_lock(&priv->lock);
+ val = readl(priv->pm_base + PM1_EN_REG);
+ /* Enable RTC alarm wakeup */
+ writel(enabled ? val | RTC_EN : val & ~RTC_EN,
+ priv->pm_base + PM1_EN_REG);
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int loongson_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ u32 alarm_data;
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ alarm_data = FIELD_PREP(TOY_MATCH_SEC, alrm->time.tm_sec)
+ | FIELD_PREP(TOY_MATCH_MIN, alrm->time.tm_min)
+ | FIELD_PREP(TOY_MATCH_HOUR, alrm->time.tm_hour)
+ | FIELD_PREP(TOY_MATCH_DAY, alrm->time.tm_mday)
+ | FIELD_PREP(TOY_MATCH_MON, alrm->time.tm_mon + 1)
+ | FIELD_PREP(TOY_MATCH_YEAR, alrm->time.tm_year - priv->fix_year);
+
+ ret = regmap_write(priv->regmap, TOY_MATCH0_REG, alarm_data);
+ if (ret < 0)
+ return ret;
+
+ return loongson_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static const struct rtc_class_ops loongson_rtc_ops = {
+ .read_time = loongson_rtc_read_time,
+ .set_time = loongson_rtc_set_time,
+ .read_alarm = loongson_rtc_read_alarm,
+ .set_alarm = loongson_rtc_set_alarm,
+ .alarm_irq_enable = loongson_rtc_alarm_irq_enable,
+};
+
+static int loongson_rtc_probe(struct platform_device *pdev)
+{
+ int ret, alarm_irq;
+ void __iomem *regs;
+ struct loongson_rtc_priv *priv;
+ struct device *dev = &pdev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "devm_platform_ioremap_resource failed\n");
+
+ priv->regmap = devm_regmap_init_mmio(dev, regs,
+ &loongson_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ "devm_regmap_init_mmio failed\n");
+
+ priv->config = device_get_match_data(dev);
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->rtcdev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtcdev))
+ return dev_err_probe(dev, PTR_ERR(priv->rtcdev),
+ "devm_rtc_allocate_device failed\n");
+
+ /* Get RTC alarm irq */
+ alarm_irq = platform_get_irq(pdev, 0);
+ if (alarm_irq > 0) {
+ ret = devm_request_irq(dev, alarm_irq, loongson_rtc_isr,
+ 0, "loongson-alarm", priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to request irq %d\n",
+ alarm_irq);
+
+ priv->pm_base = regs - priv->config->pm_offset;
+ device_init_wakeup(dev, 1);
+
+ if (has_acpi_companion(dev))
+ acpi_install_fixed_event_handler(ACPI_EVENT_RTC,
+ loongson_rtc_handler, priv);
+ } else {
+ /* Loongson-1C RTC does not support alarm */
+ clear_bit(RTC_FEATURE_ALARM, priv->rtcdev->features);
+ }
+
+ /* Loongson RTC does not support UIE */
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, priv->rtcdev->features);
+ priv->rtcdev->ops = &loongson_rtc_ops;
+ priv->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ priv->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+
+ return devm_rtc_register_device(priv->rtcdev);
+}
+
+static void loongson_rtc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
+
+ if (!test_bit(RTC_FEATURE_ALARM, priv->rtcdev->features))
+ return;
+
+ if (has_acpi_companion(dev))
+ acpi_remove_fixed_event_handler(ACPI_EVENT_RTC,
+ loongson_rtc_handler);
+
+ device_init_wakeup(dev, 0);
+ loongson_rtc_alarm_irq_enable(dev, 0);
+}
+
+static const struct of_device_id loongson_rtc_of_match[] = {
+ { .compatible = "loongson,ls1b-rtc", .data = &ls1b_rtc_config },
+ { .compatible = "loongson,ls1c-rtc", .data = &ls1c_rtc_config },
+ { .compatible = "loongson,ls7a-rtc", .data = &generic_rtc_config },
+ { .compatible = "loongson,ls2k1000-rtc", .data = &ls2k1000_rtc_config },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, loongson_rtc_of_match);
+
+static const struct acpi_device_id loongson_rtc_acpi_match[] = {
+ { "LOON0001", .driver_data = (kernel_ulong_t)&generic_rtc_config },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, loongson_rtc_acpi_match);
+
+static struct platform_driver loongson_rtc_driver = {
+ .probe = loongson_rtc_probe,
+ .remove_new = loongson_rtc_remove,
+ .driver = {
+ .name = "loongson-rtc",
+ .of_match_table = loongson_rtc_of_match,
+ .acpi_match_table = loongson_rtc_acpi_match,
+ },
+};
+module_platform_driver(loongson_rtc_driver);
+
+MODULE_DESCRIPTION("Loongson RTC driver");
+MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
+MODULE_AUTHOR("WANG Xuerui <git@xen0n.name>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
deleted file mode 100644
index 5af26dc5c2a3..000000000000
--- a/drivers/rtc/rtc-ls1x.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2011 Zhao Zhang <zhzhl555@gmail.com>
- *
- * Derived from driver/rtc/rtc-au1xxx.c
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/rtc.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <loongson1.h>
-
-#define LS1X_RTC_REG_OFFSET (LS1X_RTC_BASE + 0x20)
-#define LS1X_RTC_REGS(x) \
- ((void __iomem *)KSEG1ADDR(LS1X_RTC_REG_OFFSET + (x)))
-
-/*RTC programmable counters 0 and 1*/
-#define SYS_COUNTER_CNTRL (LS1X_RTC_REGS(0x20))
-#define SYS_CNTRL_ERS (1 << 23)
-#define SYS_CNTRL_RTS (1 << 20)
-#define SYS_CNTRL_RM2 (1 << 19)
-#define SYS_CNTRL_RM1 (1 << 18)
-#define SYS_CNTRL_RM0 (1 << 17)
-#define SYS_CNTRL_RS (1 << 16)
-#define SYS_CNTRL_BP (1 << 14)
-#define SYS_CNTRL_REN (1 << 13)
-#define SYS_CNTRL_BRT (1 << 12)
-#define SYS_CNTRL_TEN (1 << 11)
-#define SYS_CNTRL_BTT (1 << 10)
-#define SYS_CNTRL_E0 (1 << 8)
-#define SYS_CNTRL_ETS (1 << 7)
-#define SYS_CNTRL_32S (1 << 5)
-#define SYS_CNTRL_TTS (1 << 4)
-#define SYS_CNTRL_TM2 (1 << 3)
-#define SYS_CNTRL_TM1 (1 << 2)
-#define SYS_CNTRL_TM0 (1 << 1)
-#define SYS_CNTRL_TS (1 << 0)
-
-/* Programmable Counter 0 Registers */
-#define SYS_TOYTRIM (LS1X_RTC_REGS(0))
-#define SYS_TOYWRITE0 (LS1X_RTC_REGS(4))
-#define SYS_TOYWRITE1 (LS1X_RTC_REGS(8))
-#define SYS_TOYREAD0 (LS1X_RTC_REGS(0xC))
-#define SYS_TOYREAD1 (LS1X_RTC_REGS(0x10))
-#define SYS_TOYMATCH0 (LS1X_RTC_REGS(0x14))
-#define SYS_TOYMATCH1 (LS1X_RTC_REGS(0x18))
-#define SYS_TOYMATCH2 (LS1X_RTC_REGS(0x1C))
-
-/* Programmable Counter 1 Registers */
-#define SYS_RTCTRIM (LS1X_RTC_REGS(0x40))
-#define SYS_RTCWRITE0 (LS1X_RTC_REGS(0x44))
-#define SYS_RTCREAD0 (LS1X_RTC_REGS(0x48))
-#define SYS_RTCMATCH0 (LS1X_RTC_REGS(0x4C))
-#define SYS_RTCMATCH1 (LS1X_RTC_REGS(0x50))
-#define SYS_RTCMATCH2 (LS1X_RTC_REGS(0x54))
-
-#define LS1X_SEC_OFFSET (4)
-#define LS1X_MIN_OFFSET (10)
-#define LS1X_HOUR_OFFSET (16)
-#define LS1X_DAY_OFFSET (21)
-#define LS1X_MONTH_OFFSET (26)
-
-
-#define LS1X_SEC_MASK (0x3f)
-#define LS1X_MIN_MASK (0x3f)
-#define LS1X_HOUR_MASK (0x1f)
-#define LS1X_DAY_MASK (0x1f)
-#define LS1X_MONTH_MASK (0x3f)
-#define LS1X_YEAR_MASK (0xffffffff)
-
-#define ls1x_get_sec(t) (((t) >> LS1X_SEC_OFFSET) & LS1X_SEC_MASK)
-#define ls1x_get_min(t) (((t) >> LS1X_MIN_OFFSET) & LS1X_MIN_MASK)
-#define ls1x_get_hour(t) (((t) >> LS1X_HOUR_OFFSET) & LS1X_HOUR_MASK)
-#define ls1x_get_day(t) (((t) >> LS1X_DAY_OFFSET) & LS1X_DAY_MASK)
-#define ls1x_get_month(t) (((t) >> LS1X_MONTH_OFFSET) & LS1X_MONTH_MASK)
-
-#define RTC_CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
-
-static int ls1x_rtc_read_time(struct device *dev, struct rtc_time *rtm)
-{
- unsigned long v;
- time64_t t;
-
- v = readl(SYS_TOYREAD0);
- t = readl(SYS_TOYREAD1);
-
- memset(rtm, 0, sizeof(struct rtc_time));
- t = mktime64((t & LS1X_YEAR_MASK), ls1x_get_month(v),
- ls1x_get_day(v), ls1x_get_hour(v),
- ls1x_get_min(v), ls1x_get_sec(v));
- rtc_time64_to_tm(t, rtm);
-
- return 0;
-}
-
-static int ls1x_rtc_set_time(struct device *dev, struct rtc_time *rtm)
-{
- unsigned long v, t, c;
- int ret = -ETIMEDOUT;
-
- v = ((rtm->tm_mon + 1) << LS1X_MONTH_OFFSET)
- | (rtm->tm_mday << LS1X_DAY_OFFSET)
- | (rtm->tm_hour << LS1X_HOUR_OFFSET)
- | (rtm->tm_min << LS1X_MIN_OFFSET)
- | (rtm->tm_sec << LS1X_SEC_OFFSET);
-
- writel(v, SYS_TOYWRITE0);
- c = 0x10000;
- /* add timeout check counter, for more safe */
- while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c)
- usleep_range(1000, 3000);
-
- if (!c) {
- dev_err(dev, "set time timeout!\n");
- goto err;
- }
-
- t = rtm->tm_year + 1900;
- writel(t, SYS_TOYWRITE1);
- c = 0x10000;
- while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c)
- usleep_range(1000, 3000);
-
- if (!c) {
- dev_err(dev, "set time timeout!\n");
- goto err;
- }
- return 0;
-err:
- return ret;
-}
-
-static const struct rtc_class_ops ls1x_rtc_ops = {
- .read_time = ls1x_rtc_read_time,
- .set_time = ls1x_rtc_set_time,
-};
-
-static int ls1x_rtc_probe(struct platform_device *pdev)
-{
- struct rtc_device *rtcdev;
- unsigned long v;
-
- v = readl(SYS_COUNTER_CNTRL);
- if (!(v & RTC_CNTR_OK)) {
- dev_err(&pdev->dev, "rtc counters not working\n");
- return -ENODEV;
- }
-
- /* set to 1 HZ if needed */
- if (readl(SYS_TOYTRIM) != 32767) {
- v = 0x100000;
- while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TTS) && --v)
- usleep_range(1000, 3000);
-
- if (!v) {
- dev_err(&pdev->dev, "time out\n");
- return -ETIMEDOUT;
- }
- writel(32767, SYS_TOYTRIM);
- }
- /* this loop coundn't be endless */
- while (readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TTS)
- usleep_range(1000, 3000);
-
- rtcdev = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtcdev))
- return PTR_ERR(rtcdev);
-
- platform_set_drvdata(pdev, rtcdev);
- rtcdev->ops = &ls1x_rtc_ops;
- rtcdev->range_min = RTC_TIMESTAMP_BEGIN_1900;
- rtcdev->range_max = RTC_TIMESTAMP_END_2099;
-
- return devm_rtc_register_device(rtcdev);
-}
-
-static struct platform_driver ls1x_rtc_driver = {
- .driver = {
- .name = "ls1x-rtc",
- },
- .probe = ls1x_rtc_probe,
-};
-
-module_platform_driver(ls1x_rtc_driver);
-
-MODULE_AUTHOR("zhao zhang <zhzhl555@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index c1963f7c424d..3cc5151e0986 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -1013,7 +1013,7 @@ static struct i2c_driver m41t80_driver = {
.of_match_table = of_match_ptr(m41t80_of_match),
.pm = &m41t80_pm,
},
- .probe_new = m41t80_probe,
+ .probe = m41t80_probe,
.remove = m41t80_remove,
.id_table = m41t80_id,
};
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 0a33851cc51f..31b910e4d91a 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -224,7 +224,7 @@ static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
- .probe_new = max6900_probe,
+ .probe = max6900_probe,
.id_table = max6900_id,
};
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index 0a3b14c95d90..a4e3f924837e 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -540,7 +540,7 @@ static struct i2c_driver nct3018y_driver = {
.name = "rtc-nct3018y",
.of_match_table = of_match_ptr(nct3018y_of_match),
},
- .probe_new = nct3018y_probe,
+ .probe = nct3018y_probe,
.id_table = nct3018y_id,
};
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 87f4fc9df68b..ee03b04b74ba 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -923,7 +923,7 @@ static struct i2c_driver pcf2127_i2c_driver = {
.name = "rtc-pcf2127-i2c",
.of_match_table = of_match_ptr(pcf2127_of_match),
},
- .probe_new = pcf2127_i2c_probe,
+ .probe = pcf2127_i2c_probe,
.id_table = pcf2127_i2c_id,
};
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 71a456355981..e517abfaee2a 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -681,7 +681,7 @@ static struct i2c_driver pcf85063_driver = {
.name = "rtc-pcf85063",
.of_match_table = of_match_ptr(pcf85063_of_match),
},
- .probe_new = pcf85063_probe,
+ .probe = pcf85063_probe,
.id_table = pcf85063_ids,
};
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index e7115ebef707..d1efde3e7a80 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -488,7 +488,7 @@ static struct i2c_driver pcf8523_driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
- .probe_new = pcf8523_probe,
+ .probe = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 8958eadf1c3e..65b8b1338dbb 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -475,7 +475,7 @@ static struct i2c_driver pcf85363_driver = {
.name = "pcf85363",
.of_match_table = of_match_ptr(dev_ids),
},
- .probe_new = pcf85363_probe,
+ .probe = pcf85363_probe,
};
module_i2c_driver(pcf85363_driver);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 7e720472213c..ea82b89d8929 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -612,7 +612,7 @@ static struct i2c_driver pcf8563_driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
- .probe_new = pcf8563_probe,
+ .probe = pcf8563_probe,
.id_table = pcf8563_id,
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 87074d178274..a7e0fc360b6a 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -306,7 +306,7 @@ static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
},
- .probe_new = pcf8583_probe,
+ .probe = pcf8583_probe,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index b4c5d016eca3..a5a6c8772ecd 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -921,7 +921,7 @@ static struct i2c_driver rs5c372_driver = {
.name = "rtc-rs5c372",
.of_match_table = of_match_ptr(rs5c372_of_match),
},
- .probe_new = rs5c372_probe,
+ .probe = rs5c372_probe,
.remove = rs5c372_remove,
.id_table = rs5c372_id,
};
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index ec5d7a614e2d..076e56f4e01a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -994,13 +994,20 @@ static const __maybe_unused struct of_device_id rv3028_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rv3028_of_match);
+static const struct i2c_device_id rv3028_id_table[] = {
+ { .name = "rv3028", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rv3028_id_table);
+
static struct i2c_driver rv3028_driver = {
.driver = {
.name = "rtc-rv3028",
.acpi_match_table = rv3028_i2c_acpi_match,
.of_match_table = of_match_ptr(rv3028_of_match),
},
- .probe_new = rv3028_probe,
+ .id_table = rv3028_id_table,
+ .probe = rv3028_probe,
};
module_i2c_driver(rv3028_driver);
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 0852f6709a85..4a81feeb00ff 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -824,7 +824,7 @@ static struct i2c_driver rv3029_driver = {
.name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
- .probe_new = rv3029_i2c_probe,
+ .probe = rv3029_i2c_probe,
.id_table = rv3029_id,
};
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 1ff4f2e6fa77..6b8eb2039a33 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -842,7 +842,7 @@ static int rv3032_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
return err;
}
-static const struct hwmon_channel_info *rv3032_hwmon_info[] = {
+static const struct hwmon_channel_info * const rv3032_hwmon_info[] = {
HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
@@ -998,7 +998,7 @@ static struct i2c_driver rv3032_driver = {
.acpi_match_table = rv3032_i2c_acpi_match,
.of_match_table = of_match_ptr(rv3032_of_match),
},
- .probe_new = rv3032_probe,
+ .probe = rv3032_probe,
};
module_i2c_driver(rv3032_driver);
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 25c3b9e4f515..98679cae13e8 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -739,7 +739,7 @@ static struct i2c_driver rv8803_driver = {
.name = "rtc-rv8803",
.of_match_table = of_match_ptr(rv8803_of_match),
},
- .probe_new = rv8803_probe,
+ .probe = rv8803_probe,
.id_table = rv8803_id,
};
module_i2c_driver(rv8803_driver);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 37608883a796..8702db6096ba 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -462,7 +462,7 @@ static struct i2c_driver rx6110_i2c_driver = {
.name = RX6110_DRIVER_NAME,
.acpi_match_table = rx6110_i2c_acpi_match,
},
- .probe_new = rx6110_i2c_probe,
+ .probe = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
};
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index b9c8dad26208..f44e212c07de 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -424,7 +424,7 @@ static struct i2c_driver rx8010_driver = {
.name = "rtc-rx8010",
.of_match_table = of_match_ptr(rx8010_of_match),
},
- .probe_new = rx8010_probe,
+ .probe = rx8010_probe,
.id_table = rx8010_id,
};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 331c20d4d843..aabe62c283a1 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -581,7 +581,7 @@ static struct i2c_driver rx8025_driver = {
.driver = {
.name = "rtc-rx8025",
},
- .probe_new = rx8025_probe,
+ .probe = rx8025_probe,
.id_table = rx8025_id,
};
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 14edb7534c97..82881fd2e14a 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -325,7 +325,7 @@ static struct i2c_driver rx8581_driver = {
.name = "rtc-rx8581",
.of_match_table = of_match_ptr(rx8581_of_match),
},
- .probe_new = rx8581_probe,
+ .probe = rx8581_probe,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index b18daaf72b17..90a3028ac574 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -499,7 +499,7 @@ static struct i2c_driver s35390a_driver = {
.name = "rtc-s35390a",
.of_match_table = of_match_ptr(s35390a_of_match),
},
- .probe_new = s35390a_probe,
+ .probe = s35390a_probe,
.id_table = s35390a_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index e2f90d768ca8..7760394ccd2d 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -217,7 +217,7 @@ static struct i2c_driver sd3078_driver = {
.name = "sd3078",
.of_match_table = of_match_ptr(rtc_dt_match),
},
- .probe_new = sd3078_probe,
+ .probe = sd3078_probe,
.id_table = sd3078_id,
};
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 0f8e4231098e..d492a2d26600 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -228,17 +228,13 @@ static int st_rtc_probe(struct platform_device *pdev)
enable_irq_wake(rtc->irq);
disable_irq(rtc->irq);
- rtc->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(rtc->clk)) {
- dev_err(&pdev->dev, "Unable to request clock\n");
- return PTR_ERR(rtc->clk);
- }
-
- clk_prepare_enable(rtc->clk);
+ rtc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(rtc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->clk),
+ "Unable to request clock\n");
rtc->clkrate = clk_get_rate(rtc->clk);
if (!rtc->clkrate) {
- clk_disable_unprepare(rtc->clk);
dev_err(&pdev->dev, "Unable to fetch clock rate\n");
return -EINVAL;
}
@@ -252,10 +248,8 @@ static int st_rtc_probe(struct platform_device *pdev)
do_div(rtc->rtc_dev->range_max, rtc->clkrate);
ret = devm_rtc_register_device(rtc->rtc_dev);
- if (ret) {
- clk_disable_unprepare(rtc->clk);
+ if (ret)
return ret;
- }
return 0;
}
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 229cb2847cc4..3d36e11cff80 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -114,7 +114,6 @@ struct stm32_rtc_data {
void (*clear_events)(struct stm32_rtc *rtc, unsigned int flags);
bool has_pclk;
bool need_dbp;
- bool has_wakeirq;
};
struct stm32_rtc {
@@ -127,7 +126,6 @@ struct stm32_rtc {
struct clk *rtc_ck;
const struct stm32_rtc_data *data;
int irq_alarm;
- int wakeirq_alarm;
};
static void stm32_rtc_wpr_unlock(struct stm32_rtc *rtc)
@@ -547,7 +545,6 @@ static void stm32_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32_rtc_data = {
.has_pclk = false,
.need_dbp = true,
- .has_wakeirq = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -569,7 +566,6 @@ static const struct stm32_rtc_data stm32_rtc_data = {
static const struct stm32_rtc_data stm32h7_rtc_data = {
.has_pclk = true,
.need_dbp = true,
- .has_wakeirq = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -600,7 +596,6 @@ static void stm32mp1_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32mp1_data = {
.has_pclk = true,
.need_dbp = false,
- .has_wakeirq = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -779,19 +774,12 @@ static int stm32_rtc_probe(struct platform_device *pdev)
}
ret = device_init_wakeup(&pdev->dev, true);
- if (rtc->data->has_wakeirq) {
- rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
- if (rtc->wakeirq_alarm > 0) {
- ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
- rtc->wakeirq_alarm);
- } else {
- ret = rtc->wakeirq_alarm;
- if (rtc->wakeirq_alarm == -EPROBE_DEFER)
- goto err;
- }
- }
if (ret)
- dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
+ goto err;
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq_alarm);
+ if (ret)
+ goto err;
platform_set_drvdata(pdev, rtc);
@@ -879,9 +867,6 @@ static int stm32_rtc_suspend(struct device *dev)
if (rtc->data->has_pclk)
clk_disable_unprepare(rtc->pclk);
- if (device_may_wakeup(dev))
- return enable_irq_wake(rtc->irq_alarm);
-
return 0;
}
@@ -903,9 +888,6 @@ static int stm32_rtc_resume(struct device *dev)
return ret;
}
- if (device_may_wakeup(dev))
- return disable_irq_wake(rtc->irq_alarm);
-
return ret;
}
#endif
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index f587afa84357..807f953ae0ae 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -679,7 +679,7 @@ static struct i2c_driver x1205_driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
- .probe_new = x1205_probe,
+ .probe = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f956a4ac9881..2e4e555b37c3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,7 @@
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 113c509bf6d0..8587e423169e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -21,13 +21,13 @@
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bcb67fa747a7..c06fa2b27120 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -16,10 +16,10 @@
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
-#include <asm/io.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 200f88f0e451..09acf3853a77 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -20,8 +20,8 @@
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
+#include <linux/io.h>
#include <asm/extmem.h>
-#include <asm/io.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
@@ -54,7 +54,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
&kaddr, NULL);
if (rc < 0)
- return rc;
+ return dax_mem2blk_err(rc);
+
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 80c4e5101c97..8a03af5ee5b3 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -96,7 +96,7 @@ config SCLP_OFB
config S390_UV_UAPI
def_tristate m
prompt "Ultravisor userspace API"
- depends on S390
+ depends on S390 && (KVM || PROTECTED_VIRTUALIZATION_GUEST)
help
Selecting exposes parts of the UV interface to userspace
by providing a misc character device at /dev/uv.
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 0b05cd76b7d0..a1fef666c9b0 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
#include <asm/delay.h>
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 9cd1ea92d619..bc5193d81f9c 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -22,8 +22,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/ebcdic.h>
-#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 1d40457c7b10..144cd2e03590 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -32,6 +32,55 @@
#include <asm/uvdevice.h>
#include <asm/uv.h>
+#define BIT_UVIO_INTERNAL U32_MAX
+/* Mapping from IOCTL-nr to UVC-bit */
+static const u32 ioctl_nr_to_uvc_bit[] __initconst = {
+ [UVIO_IOCTL_UVDEV_INFO_NR] = BIT_UVIO_INTERNAL,
+ [UVIO_IOCTL_ATT_NR] = BIT_UVC_CMD_RETR_ATTEST,
+ [UVIO_IOCTL_ADD_SECRET_NR] = BIT_UVC_CMD_ADD_SECRET,
+ [UVIO_IOCTL_LIST_SECRETS_NR] = BIT_UVC_CMD_LIST_SECRETS,
+ [UVIO_IOCTL_LOCK_SECRETS_NR] = BIT_UVC_CMD_LOCK_SECRETS,
+};
+
+static_assert(ARRAY_SIZE(ioctl_nr_to_uvc_bit) == UVIO_IOCTL_NUM_IOCTLS);
+
+static struct uvio_uvdev_info uvdev_info = {
+ .supp_uvio_cmds = GENMASK_ULL(UVIO_IOCTL_NUM_IOCTLS - 1, 0),
+};
+
+static void __init set_supp_uv_cmds(unsigned long *supp_uv_cmds)
+{
+ int i;
+
+ for (i = 0; i < UVIO_IOCTL_NUM_IOCTLS; i++) {
+ if (ioctl_nr_to_uvc_bit[i] == BIT_UVIO_INTERNAL)
+ continue;
+ if (!test_bit_inv(ioctl_nr_to_uvc_bit[i], uv_info.inst_calls_list))
+ continue;
+ __set_bit(i, supp_uv_cmds);
+ }
+}
+
+/**
+ * uvio_uvdev_info() - get information about the uvdevice
+ *
+ * @uv_ioctl: ioctl control block
+ *
+ * Lists all IOCTLs that are supported by this uvdevice
+ */
+static int uvio_uvdev_info(struct uvio_ioctl_cb *uv_ioctl)
+{
+ void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
+
+ if (uv_ioctl->argument_len < sizeof(uvdev_info))
+ return -EINVAL;
+ if (copy_to_user(user_buf_arg, &uvdev_info, sizeof(uvdev_info)))
+ return -EFAULT;
+
+ uv_ioctl->uv_rc = UVC_RC_EXECUTED;
+ return 0;
+}
+
static int uvio_build_uvcb_attest(struct uv_cb_attest *uvcb_attest, u8 *arcb,
u8 *meas, u8 *add_data, struct uvio_attest *uvio_attest)
{
@@ -185,8 +234,161 @@ out:
return ret;
}
-static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp)
+/** uvio_add_secret() - perform an Add Secret UVC
+ *
+ * @uv_ioctl: ioctl control block
+ *
+ * uvio_add_secret() performs the Add Secret Ultravisor Call.
+ *
+ * The given userspace argument address and size are verified to be
+ * valid but every other check is made by the Ultravisor
+ * (UV). Therefore UV errors won't result in a negative return
+ * value. The request is then copied to kernelspace, the UV-call is
+ * performed and the results are copied back to userspace.
+ *
+ * The argument has to point to an Add Secret Request Control Block
+ * which is an encrypted and cryptographically verified request that
+ * inserts a protected guest's secrets into the Ultravisor for later
+ * use.
+ *
+ * If the Add Secret UV facility is not present, UV will return
+ * invalid command rc. This won't be fenced in the driver and does not
+ * result in a negative return value.
+ *
+ * Context: might sleep
+ *
+ * Return: 0 on success or a negative error code on error.
+ */
+static int uvio_add_secret(struct uvio_ioctl_cb *uv_ioctl)
{
+ void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
+ struct uv_cb_guest_addr uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_ADD_SECRET,
+ };
+ void *asrcb = NULL;
+ int ret;
+
+ if (uv_ioctl->argument_len > UVIO_ADD_SECRET_MAX_LEN)
+ return -EINVAL;
+ if (uv_ioctl->argument_len == 0)
+ return -EINVAL;
+
+ asrcb = kvzalloc(uv_ioctl->argument_len, GFP_KERNEL);
+ if (!asrcb)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(asrcb, user_buf_arg, uv_ioctl->argument_len))
+ goto out;
+
+ ret = 0;
+ uvcb.addr = (u64)asrcb;
+ uv_call_sched(0, (u64)&uvcb);
+ uv_ioctl->uv_rc = uvcb.header.rc;
+ uv_ioctl->uv_rrc = uvcb.header.rrc;
+
+out:
+ kvfree(asrcb);
+ return ret;
+}
+
+/** uvio_list_secrets() - perform a List Secret UVC
+ * @uv_ioctl: ioctl control block
+ *
+ * uvio_list_secrets() performs the List Secret Ultravisor Call. It verifies
+ * that the given userspace argument address is valid and its size is sane.
+ * Every other check is made by the Ultravisor (UV) and won't result in a
+ * negative return value. It builds the request, performs the UV-call, and
+ * copies the result to userspace.
+ *
+ * The argument specifies the location for the result of the UV-Call.
+ *
+ * If the List Secrets UV facility is not present, UV will return invalid
+ * command rc. This won't be fenced in the driver and does not result in a
+ * negative return value.
+ *
+ * Context: might sleep
+ *
+ * Return: 0 on success or a negative error code on error.
+ */
+static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
+{
+ void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
+ struct uv_cb_guest_addr uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_LIST_SECRETS,
+ };
+ void *secrets = NULL;
+ int ret = 0;
+
+ if (uv_ioctl->argument_len != UVIO_LIST_SECRETS_LEN)
+ return -EINVAL;
+
+ secrets = kvzalloc(UVIO_LIST_SECRETS_LEN, GFP_KERNEL);
+ if (!secrets)
+ return -ENOMEM;
+
+ uvcb.addr = (u64)secrets;
+ uv_call_sched(0, (u64)&uvcb);
+ uv_ioctl->uv_rc = uvcb.header.rc;
+ uv_ioctl->uv_rrc = uvcb.header.rrc;
+
+ if (copy_to_user(user_buf_arg, secrets, UVIO_LIST_SECRETS_LEN))
+ ret = -EFAULT;
+
+ kvfree(secrets);
+ return ret;
+}
+
+/** uvio_lock_secrets() - perform a Lock Secret Store UVC
+ * @uv_ioctl: ioctl control block
+ *
+ * uvio_lock_secrets() performs the Lock Secret Store Ultravisor Call. It
+ * performs the UV-call and copies the return codes to the ioctl control block.
+ * After this call was dispatched successfully every following Add Secret UVC
+ * and Lock Secrets UVC will fail with return code 0x102.
+ *
+ * The argument address and size must be 0.
+ *
+ * If the Lock Secrets UV facility is not present, UV will return invalid
+ * command rc. This won't be fenced in the driver and does not result in a
+ * negative return value.
+ *
+ * Context: might sleep
+ *
+ * Return: 0 on success or a negative error code on error.
+ */
+static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
+{
+ struct uv_cb_nodata uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_LOCK_SECRETS,
+ };
+
+ if (ioctl->argument_addr || ioctl->argument_len)
+ return -EINVAL;
+
+ uv_call(0, (u64)&uvcb);
+ ioctl->uv_rc = uvcb.header.rc;
+ ioctl->uv_rrc = uvcb.header.rrc;
+
+ return 0;
+}
+
+static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp,
+ unsigned long cmd)
+{
+ u8 nr = _IOC_NR(cmd);
+
+ if (_IOC_DIR(cmd) != (_IOC_READ | _IOC_WRITE))
+ return -ENOIOCTLCMD;
+ if (_IOC_TYPE(cmd) != UVIO_TYPE_UVC)
+ return -ENOIOCTLCMD;
+ if (nr >= UVIO_IOCTL_NUM_IOCTLS)
+ return -ENOIOCTLCMD;
+ if (_IOC_SIZE(cmd) != sizeof(*ioctl))
+ return -ENOIOCTLCMD;
if (copy_from_user(ioctl, argp, sizeof(*ioctl)))
return -EFAULT;
if (ioctl->flags != 0)
@@ -194,7 +396,7 @@ static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *a
if (memchr_inv(ioctl->reserved14, 0, sizeof(ioctl->reserved14)))
return -EINVAL;
- return 0;
+ return nr;
}
/*
@@ -205,14 +407,28 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
struct uvio_ioctl_cb uv_ioctl = { };
long ret;
+ int nr;
- switch (cmd) {
- case UVIO_IOCTL_ATT:
- ret = uvio_copy_and_check_ioctl(&uv_ioctl, argp);
- if (ret)
- return ret;
+ nr = uvio_copy_and_check_ioctl(&uv_ioctl, argp, cmd);
+ if (nr < 0)
+ return nr;
+
+ switch (nr) {
+ case UVIO_IOCTL_UVDEV_INFO_NR:
+ ret = uvio_uvdev_info(&uv_ioctl);
+ break;
+ case UVIO_IOCTL_ATT_NR:
ret = uvio_attestation(&uv_ioctl);
break;
+ case UVIO_IOCTL_ADD_SECRET_NR:
+ ret = uvio_add_secret(&uv_ioctl);
+ break;
+ case UVIO_IOCTL_LIST_SECRETS_NR:
+ ret = uvio_list_secrets(&uv_ioctl);
+ break;
+ case UVIO_IOCTL_LOCK_SECRETS_NR:
+ ret = uvio_lock_secrets(&uv_ioctl);
+ break;
default:
ret = -ENOIOCTLCMD;
break;
@@ -245,6 +461,7 @@ static void __exit uvio_dev_exit(void)
static int __init uvio_dev_init(void)
{
+ set_supp_uv_cmds((unsigned long *)&uvdev_info.supp_uv_cmds);
return misc_register(&uvio_dev_miscdev);
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f0538609dfe4..aa3292e57e38 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,7 @@ static ssize_t ccwgroup_online_show(struct device *dev,
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
- * longer needed or accidentially created. Saves memory :)
+ * longer needed or accidentally created. Saves memory :)
*/
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index c0d620ffea61..4ca5adce9107 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -943,7 +943,7 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schib.pmcw.dev, rc);
if (old_enabled) {
- /* Try to reenable the old subchannel. */
+ /* Try to re-enable the old subchannel. */
spin_lock_irq(old_sch->lock);
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
spin_unlock_irq(old_sch->lock);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 2b2058427a2b..c396ac3e3a32 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -310,7 +310,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
- /* Reenable channel measurements, if needed. */
+ /* Re-enable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
/* Save indication for new paths. */
cdev->private->path_new_mask = sch->vpm;
@@ -947,7 +947,7 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
*/
sch->lpm = sch->schib.pmcw.pam & sch->opm;
/*
- * Use the initial configuration since we can't be shure that the old
+ * Use the initial configuration since we can't be sure that the old
* paths are valid.
*/
io_subchannel_init_config(sch);
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 1c31e81ca8de..aafd66305ead 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -672,7 +672,7 @@ out_init:
/*
* Fetch one ccw.
* To reduce memory copy, we'll pin the cda page in memory,
- * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
+ * and to get rid of the cda 2G limitation of ccw1, we'll translate
* direct ccws to idal ccws.
*/
static int ccwchain_fetch_one(struct ccw1 *ccw,
@@ -787,7 +787,7 @@ void cp_free(struct channel_program *cp)
* program.
*
* These APIs will copy the ccws into kernel-space buffers, and update
- * the guest phsical addresses with their corresponding host physical
+ * the guest physical addresses with their corresponding host physical
* addresses. Then channel I/O device drivers could issue the
* translated channel program to real devices to perform an I/O
* operation.
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8d6b9a52bf3c..420120be300f 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -497,7 +497,7 @@ static void ap_tasklet_fn(unsigned long dummy)
enum ap_sm_wait wait = AP_SM_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
- * be received. Doing it in the beginning of the tasklet is therefor
+ * be received. Doing it in the beginning of the tasklet is therefore
* important that no requests on any AP get lost.
*/
if (ap_irq_flag)
@@ -2289,7 +2289,7 @@ static int __init ap_module_init(void)
timer_setup(&ap_config_timer, ap_config_timeout, 0);
/*
- * Setup the high resultion poll timer.
+ * Setup the high resolution poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
if (MACHINE_IS_VM)
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 101fb324476f..0d7b7eb374ad 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -233,30 +233,6 @@ struct ap_queue {
typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
-/* failure injection cmd struct */
-struct ap_fi {
- union {
- u16 cmd; /* fi flags + action */
- struct {
- u8 flags; /* fi flags only */
- u8 action; /* fi action only */
- };
- };
-};
-
-/* all currently known fi actions */
-enum ap_fi_actions {
- AP_FI_ACTION_CCA_AGENT_FF = 0x01,
- AP_FI_ACTION_CCA_DOM_INVAL = 0x02,
- AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
-};
-
-/* all currently known fi flags */
-enum ap_fi_flags {
- AP_FI_FLAG_NO_RETRY = 0x01,
- AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
-};
-
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long psmid; /* Message id. */
@@ -264,7 +240,6 @@ struct ap_message {
size_t len; /* actual msg len in msg buffer */
size_t bufsize; /* allocated msg buffer size */
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
- struct ap_fi fi; /* Failure Injection cmd */
int rc; /* Return code for this message */
void *private; /* ap driver private pointer. */
/* receive is called from tasklet context */
@@ -384,7 +359,7 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
* bits are cleared or set. Distinction is done based on the very
* first character which may be '+' or '-' for the relative string
- * and othewise assume to be an absolute value string. If parsing fails
+ * and otherwise assume to be an absolute value string. If parsing fails
* a negative errno value is returned. All arguments and bitmaps are
* big endian order.
*/
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ed8f813653fe..30df83735adf 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -274,13 +274,6 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
- AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
- __func__, ap_msg->fi.cmd);
- qid = 0xFF00;
- }
-#endif
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a8f58e133e6e..b441745b0418 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -445,7 +445,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
- /* We could not modify IRQ setings: clear new configuration */
+ /* We could not modify IRQ settings: clear new configuration */
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
@@ -524,7 +524,7 @@ static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
* Response.status may be set to following Response Code:
* - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
* - AP_RESPONSE_DECONFIGURED: if the queue is not configured
- * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * - AP_RESPONSE_NORMAL (0) : in case of success
* Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
* We take the matrix_dev lock to ensure serialization on queues and
* mediated device access.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 444ef95d3f59..4b23c9f7f3e5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -111,8 +111,6 @@ EXPORT_SYMBOL(zcrypt_msgtype);
* Multi device nodes extension functions.
*/
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
-
struct zcdn_device;
static struct class *zcrypt_class;
@@ -477,8 +475,6 @@ static void zcdn_destroy_all(void)
mutex_unlock(&ap_perms_mutex);
}
-#endif
-
/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
@@ -510,7 +506,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
{
struct ap_perms *perms = &ap_perms;
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
@@ -522,7 +517,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
if (zcdndev)
perms = &zcdndev->perms;
}
-#endif
filp->private_data = (void *)perms;
atomic_inc(&zcrypt_open_count);
@@ -536,7 +530,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
@@ -549,7 +542,6 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
put_device(&zcdndev->device);
}
}
-#endif
atomic_dec(&zcrypt_open_count);
return 0;
@@ -661,11 +653,6 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
ap_init_message(&ap_msg);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (tr && tr->fi.cmd)
- ap_msg.fi.cmd = tr->fi.cmd;
-#endif
-
if (mex->outputdatalength < mex->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@@ -687,7 +674,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for usable accelarator or CCA card */
+ /* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@@ -771,11 +758,6 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
ap_init_message(&ap_msg);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (tr && tr->fi.cmd)
- ap_msg.fi.cmd = tr->fi.cmd;
-#endif
-
if (crt->outputdatalength < crt->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@@ -797,7 +779,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for usable accelarator or CCA card */
+ /* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@@ -883,16 +865,6 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
xcrb->status = 0;
ap_init_message(&ap_msg);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (tr && tr->fi.cmd)
- ap_msg.fi.cmd = tr->fi.cmd;
- if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
- ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
- __func__, tr->fi.cmd);
- xcrb->agent_ID = 0x4646;
- }
-#endif
-
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -982,14 +954,6 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
- ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
- __func__, tr->fi.cmd);
- *domain = 99;
- }
-#endif
-
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
@@ -1058,11 +1022,6 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
ap_init_message(&ap_msg);
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (tr && tr->fi.cmd)
- ap_msg.fi.cmd = tr->fi.cmd;
-#endif
-
target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
@@ -1473,23 +1432,10 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (mex.inputdatalength & (1U << 31)) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
- }
- mex.inputdatalength &= 0x0000FFFF;
-#endif
-
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
if (rc == -EAGAIN)
tr.again_counter++;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
- break;
-#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@@ -1518,23 +1464,10 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (crt.inputdatalength & (1U << 31)) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
- }
- crt.inputdatalength &= 0x0000FFFF;
-#endif
-
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
if (rc == -EAGAIN)
tr.again_counter++;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
- break;
-#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@@ -1563,23 +1496,10 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- tr.fi.cmd = (u16)(xcrb.status >> 16);
- }
- xcrb.status = 0;
-#endif
-
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
- break;
-#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@@ -1609,23 +1529,10 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (xcrb.req_len & (1ULL << 63)) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- tr.fi.cmd = (u16)(xcrb.req_len >> 48);
- }
- xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
-#endif
-
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
- break;
-#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@@ -1668,14 +1575,16 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
size_t total_size = MAX_ZDEV_ENTRIES_EXT
* sizeof(struct zcrypt_device_status_ext);
- device_status = kzalloc(total_size, GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
case ZCRYPT_STATUS_MASK: {
@@ -2144,8 +2053,6 @@ void zcrypt_debug_exit(void)
debug_unregister(zcrypt_dbf_info);
}
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
-
static int __init zcdn_init(void)
{
int rc;
@@ -2203,8 +2110,6 @@ static void zcdn_exit(void)
class_destroy(zcrypt_class);
}
-#endif
-
/*
* zcrypt_api_init(): Module initialization.
*
@@ -2218,11 +2123,9 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
rc = zcdn_init();
if (rc)
goto out;
-#endif
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
@@ -2235,9 +2138,7 @@ int __init zcrypt_api_init(void)
return 0;
out_misc_register_failed:
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
zcdn_exit();
-#endif
zcrypt_debug_exit();
out:
return rc;
@@ -2250,9 +2151,7 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
zcdn_exit();
-#endif
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index f299deb8b8c7..de659954c8f7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -60,9 +60,6 @@ struct zcrypt_track {
int again_counter; /* retry attempts counter */
int last_qid; /* last qid used */
int last_rc; /* last return code */
-#ifdef CONFIG_ZCRYPT_DEBUG
- struct ap_fi fi; /* failure injection cmd */
-#endif
};
/* defines related to message tracking */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 8c8808cc68a4..263fe182648b 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -689,7 +689,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
goto out;
}
- /* copy the tanslated protected key */
+ /* copy the translated protected key */
switch (prepparm->lv3.ckb.len) {
case 16 + 32:
/* AES 128 protected key */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 78bf5631848e..5ddf02f965f9 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -115,7 +115,7 @@ struct eccprivkeytoken {
u64 mkvp; /* master key verification pattern */
u8 opk[48]; /* encrypted object protection key data */
u16 adatalen; /* associated data length in bytes */
- u16 fseclen; /* formated section length in bytes */
+ u16 fseclen; /* formatted section length in bytes */
u8 more_data[]; /* more data follows */
} __packed;
@@ -232,7 +232,7 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
* may be casted to struct pkey_apqn. The return value is either 0 for success
- * or a negative errno value. If no apqn meeting the criterias is found,
+ * or a negative errno value. If no apqn meeting the criteria is found,
* -ENODEV is returned.
*/
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index f67d19d08571..958f5ee47f1b 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1368,7 +1368,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
goto out;
}
- /* copy the tanslated protected key */
+ /* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
__func__, wki->pkeysize, *protkeylen);
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index 07445041869f..a3eddf51242d 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -131,14 +131,14 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
* - if minapi > 0 only apqns with API_ord_nr >= minapi
* - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
* to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
- * key for this domain. When a wkvp is given there will aways be a re-fetch
+ * key for this domain. When a wkvp is given there will always be a re-fetch
* of the domain info for the potential apqn - so this triggers an request
* reply to each apqn eligible.
* The array of apqn entries is allocated with kmalloc and returned in *apqns;
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
* may be casted to struct pkey_apqn. The return value is either 0 for success
- * or a negative errno value. If no apqn meeting the criterias is found,
+ * or a negative errno value. If no apqn meeting the criteria is found,
* -ENODEV is returned.
*/
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 05ace18c12b0..51f8f7a463f7 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -246,11 +246,6 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
copy_from_user(inp, mex->inputdata, mod_len))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
- ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
-#endif
-
return 0;
}
@@ -338,11 +333,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
- ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
-#endif
-
return 0;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 2f9bf23fbb44..67fd2ec9c5a1 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -425,11 +425,6 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
memcmp(function_code, "AU", 2) == 0)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
- ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
-#endif
-
/* check CPRB minor version, set info bits in ap_message flag field */
switch (*(unsigned short *)(&msg->cprbx.func_id[0])) {
case 0x5432: /* "T2" */
@@ -535,11 +530,6 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
if (msg->cprbx.flags & 0x20)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
-#ifdef CONFIG_ZCRYPT_DEBUG
- if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
- ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
-#endif
-
/* set info bits in ap_message flag field */
if (msg->cprbx.flags & 0x80)
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
@@ -1143,6 +1133,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
ap_cancel_message(zq->queue, ap_msg);
}
+ if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
+ rc = -EIO; /* do not retry administrative requests */
+
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
@@ -1263,6 +1256,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
ap_cancel_message(zq->queue, ap_msg);
}
+ if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
+ rc = -EIO; /* do not retry administrative requests */
+
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index c44ba88f9f47..7a2f34a5e0e0 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -43,13 +43,13 @@
#include <linux/netdevice.h>
#include <net/dst.h>
-#include <linux/io.h> /* instead of <asm/io.h> ok ? */
-#include <asm/ccwdev.h>
-#include <asm/ccwgroup.h>
-#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
-#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
#include <asm/idals.h>
#include "ctcm_main.h"
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 66076cada8ae..8852b03f943b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -47,7 +47,7 @@
#include <linux/ctype.h>
#include <net/dst.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 9f90a860ca2c..a6b64228ead2 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -625,7 +625,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index aafce8d00000..a536dd6f4f7c 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -226,8 +226,10 @@ static int dax_ccb_info(u64 ca, struct ccb_info_result *info);
static int dax_ccb_kill(u64 ca, u16 *kill_res);
static struct cdev c_dev;
-static struct class *cl;
static dev_t first;
+static const struct class cl = {
+ .name = DAX_NAME,
+};
static int max_ccb_version;
static int dax_debug;
@@ -323,14 +325,11 @@ static int __init dax_attach(void)
goto done;
}
- cl = class_create(DAX_NAME);
- if (IS_ERR(cl)) {
- dax_err("class_create failed");
- ret = PTR_ERR(cl);
+ ret = class_register(&cl);
+ if (ret)
goto class_error;
- }
- if (device_create(cl, NULL, first, NULL, dax_name) == NULL) {
+ if (device_create(&cl, NULL, first, NULL, dax_name) == NULL) {
dax_err("device_create failed");
ret = -ENXIO;
goto device_error;
@@ -347,9 +346,9 @@ static int __init dax_attach(void)
goto done;
cdev_error:
- device_destroy(cl, first);
+ device_destroy(&cl, first);
device_error:
- class_destroy(cl);
+ class_unregister(&cl);
class_error:
unregister_chrdev_region(first, 1);
done:
@@ -362,8 +361,8 @@ static void __exit dax_detach(void)
{
pr_info("Cleaning up DAX module\n");
cdev_del(&c_dev);
- device_destroy(cl, first);
- class_destroy(cl);
+ device_destroy(&cl, first);
+ class_unregister(&cl);
unregister_chrdev_region(first, 1);
}
module_exit(dax_detach);
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
index 869a979eb5b2..582d22d54689 100644
--- a/drivers/scsi/isci/scu_task_context.h
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -845,7 +845,7 @@ struct scu_task_context {
/**
* This field is used by the SCU TL to determine when to take a snapshot when
- * tranmitting read data frames.
+ * transmitting read data frames.
* - 0x00 The entire IO
* - 0x01 32k
* - 0x02 64k
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 663755842e4a..aaea3e31944d 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -365,7 +365,7 @@ struct lpfc_name {
uint8_t IEEE[6]; /* FC IEEE address */
} s;
uint8_t wwn[8];
- uint64_t name;
+ uint64_t name __packed __aligned(4);
} u;
};
@@ -850,7 +850,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
struct lpfc_name portName;
struct lpfc_name nodeName;
uint32_t DID;
-} __packed ADISC;
+} ADISC;
typedef struct _FARP { /* Structure is in Big Endian format */
uint32_t Mflags:8;
@@ -880,7 +880,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
uint32_t Fdid;
struct lpfc_name FportName;
struct lpfc_name FnodeName;
-} __packed FAN;
+} FAN;
typedef struct _SCR { /* Structure is in Big Endian format */
uint8_t resvd1;
@@ -924,7 +924,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
union {
RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
} un;
-} __packed RNID;
+} RNID;
struct RLS { /* Structure is in Big Endian format */
uint32_t rls;
@@ -1514,7 +1514,7 @@ struct lpfc_fdmi_hba_ident {
struct lpfc_fdmi_reg_port_list {
__be32 EntryCnt;
struct lpfc_fdmi_port_entry pe;
-} __packed;
+};
/*
* Register HBA(RHBA)
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 4458449c960b..35869b4f9329 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4555,7 +4555,7 @@ static void ncr_detach(struct ncb *np)
char inst_name[16];
/* Local copy so we don't access np after freeing it! */
- strlcpy(inst_name, ncr_name(np), sizeof(inst_name));
+ strscpy(inst_name, ncr_name(np), sizeof(inst_name));
printk("%s: releasing host resources\n", ncr_name(np));
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c4bf99a842f3..d0911bc28663 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,31 +586,22 @@ static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa,
if ((buf[1] & 0x03) != 0x03)
return false;
- /* See SPC-6, one command format of REPORT SUPPORTED OPERATION CODES */
+ /*
+ * See SPC-6, One_command parameter data format for
+ * REPORT SUPPORTED OPERATION CODES. We have the following cases
+ * depending on rwcdlp (buf[0] & 0x01) value:
+ * - rwcdlp == 0: then cdlp indicates support for the A mode page when
+ * it is equal to 1 and for the B mode page when it is
+ * equal to 2.
+ * - rwcdlp == 1: then cdlp indicates support for the T2A mode page
+ * when it is equal to 1 and for the T2B mode page when
+ * it is equal to 2.
+ * Overall, to detect support for command duration limits, we only need
+ * to check that cdlp is 1 or 2.
+ */
cdlp = (buf[1] & 0x18) >> 3;
- if (buf[0] & 0x01) {
- /* rwcdlp == 1 */
- switch (cdlp) {
- case 0x01:
- /* T2A page */
- return true;
- case 0x02:
- /* T2B page */
- return true;
- }
- } else {
- /* rwcdlp == 0 */
- switch (cdlp) {
- case 0x01:
- /* A page */
- return true;
- case 0x02:
- /* B page */
- return true;
- }
- }
- return false;
+ return cdlp == 0x01 || cdlp == 0x02;
}
/**
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index f960b5095d09..e392eaf5b2bf 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -982,12 +982,12 @@ struct report_phys_lun_16byte_wwid {
struct report_phys_lun_8byte_wwid_list {
struct report_lun_header header;
- struct report_phys_lun_8byte_wwid lun_entries[1];
+ struct report_phys_lun_8byte_wwid lun_entries[];
};
struct report_phys_lun_16byte_wwid_list {
struct report_lun_header header;
- struct report_phys_lun_16byte_wwid lun_entries[1];
+ struct report_phys_lun_16byte_wwid lun_entries[];
};
struct raid_map_disk_data {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 19af36e9a16d..6aaaa7ebca37 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1203,7 +1203,6 @@ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **b
unsigned int i;
u8 rpl_response_format;
u32 num_physicals;
- size_t rpl_16byte_wwid_list_length;
void *rpl_list;
struct report_lun_header *rpl_header;
struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
@@ -1232,9 +1231,9 @@ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **b
rpl_8byte_wwid_list = rpl_list;
num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
- rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
- rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
+ rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
+ num_physicals), GFP_KERNEL);
if (!rpl_16byte_wwid_list)
return -ENOMEM;
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 4d7895bdeaf2..df48fbea4b68 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -369,7 +369,6 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
{
struct pmic_glink_altmode_port *alt_port;
struct pmic_glink_altmode *altmode;
- struct typec_altmode_desc mux_desc = {};
const struct of_device_id *match;
struct fwnode_handle *fwnode;
struct device *dev = &adev->dev;
@@ -427,9 +426,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
alt_port->dp_alt.active = 1;
- mux_desc.svid = USB_TYPEC_DP_SID;
- mux_desc.mode = USB_TYPEC_DP_MODE;
- alt_port->typec_mux = fwnode_typec_mux_get(fwnode, &mux_desc);
+ alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
if (IS_ERR(alt_port->typec_mux))
return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
"failed to acquire mode-switch for port: %d\n",
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index fa71c9a36df7..4d8f3b7024ae 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -37,6 +37,7 @@ config SOUNDWIRE_INTEL
select SOUNDWIRE_GENERIC_ALLOCATION
select AUXILIARY_BUS
depends on ACPI && SND_SOC
+ depends on SND_SOC_SOF_HDA_MLINK || !SND_SOC_SOF_HDA_MLINK
help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 925566ff4272..c3d3ab3262d3 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -24,7 +24,8 @@ soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o \
+soundwire-intel-y := intel.o intel_ace2x.o intel_ace2x_debugfs.o \
+ intel_auxdevice.o intel_init.o dmi-quirks.o \
intel_bus_common.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 9fb7f91ca182..08aeb7ed00e1 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -972,15 +972,18 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
return 0;
}
-static int amd_sdw_manager_remove(struct platform_device *pdev)
+static void amd_sdw_manager_remove(struct platform_device *pdev)
{
struct amd_sdw_manager *amd_manager = dev_get_drvdata(&pdev->dev);
+ int ret;
pm_runtime_disable(&pdev->dev);
cancel_work_sync(&amd_manager->probe_work);
amd_disable_sdw_interrupts(amd_manager);
sdw_bus_master_delete(&amd_manager->bus);
- return amd_disable_sdw_manager(amd_manager);
+ ret = amd_disable_sdw_manager(amd_manager);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to disable device (%pe)\n", ERR_PTR(ret));
}
static int amd_sdw_clock_stop(struct amd_sdw_manager *amd_manager)
@@ -1194,7 +1197,7 @@ static const struct dev_pm_ops amd_pm = {
static struct platform_driver amd_sdw_driver = {
.probe = &amd_sdw_manager_probe,
- .remove = &amd_sdw_manager_remove,
+ .remove_new = &amd_sdw_manager_remove,
.driver = {
.name = "amd_sdw_manager",
.pm = &amd_pm,
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1ea6a64f8c4a..dba920ec88f6 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -69,8 +69,17 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
return -EINVAL;
}
- mutex_init(&bus->msg_lock);
- mutex_init(&bus->bus_lock);
+ /*
+ * Give each bus_lock and msg_lock a unique key so that lockdep won't
+ * trigger a deadlock warning when the locks of several buses are
+ * grabbed during configuration of a multi-bus stream.
+ */
+ lockdep_register_key(&bus->msg_lock_key);
+ __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
+
+ lockdep_register_key(&bus->bus_lock_key);
+ __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
+
INIT_LIST_HEAD(&bus->slaves);
INIT_LIST_HEAD(&bus->m_rt_list);
@@ -181,6 +190,8 @@ void sdw_bus_master_delete(struct sdw_bus *bus)
sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
+ lockdep_unregister_key(&bus->bus_lock_key);
+ lockdep_unregister_key(&bus->msg_lock_key);
ida_free(&sdw_bus_ida, bus->id);
}
EXPORT_SYMBOL(sdw_bus_master_delete);
@@ -769,6 +780,9 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
/* After xfer of msg, restore dev_num */
slave->dev_num = slave->dev_num_sticky;
+ if (bus->ops && bus->ops->new_peripheral_assigned)
+ bus->ops->new_peripheral_assigned(bus, dev_num);
+
return 0;
}
@@ -1588,7 +1602,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
unsigned long port;
bool slave_notify;
u8 sdca_cascade = 0;
- u8 buf, buf2[2], _buf, _buf2[2];
+ u8 buf, buf2[2];
bool parity_check;
bool parity_quirk;
@@ -1745,9 +1759,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
"SDW_SCP_INT1 recheck read failed:%d\n", ret);
goto io_err;
}
- _buf = ret;
+ buf = ret;
- ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
+ ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
if (ret < 0) {
dev_err(&slave->dev,
"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
@@ -1765,12 +1779,8 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
}
/*
- * Make sure no interrupts are pending, but filter to limit loop
- * to interrupts identified in the first status read
+ * Make sure no interrupts are pending
*/
- buf &= _buf;
- buf2[0] &= _buf2[0];
- buf2[1] &= _buf2[1];
stat = buf || buf2[0] || buf2[1] || sdca_cascade;
/*
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 39502bc75712..0efc1c3bee5f 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -283,6 +283,29 @@ static int cdns_config_update(struct sdw_cdns *cdns)
return ret;
}
+/**
+ * sdw_cdns_config_update() - Update configurations
+ * @cdns: Cadence instance
+ */
+void sdw_cdns_config_update(struct sdw_cdns *cdns)
+{
+ /* commit changes */
+ cdns_writel(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
+}
+EXPORT_SYMBOL(sdw_cdns_config_update);
+
+/**
+ * sdw_cdns_config_update_set_wait() - wait until configuration update bit is self-cleared
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns)
+{
+ /* the hardware recommendation is to wait at least 300us */
+ return cdns_set_wait(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT, 0);
+}
+EXPORT_SYMBOL(sdw_cdns_config_update_set_wait);
+
/*
* debugfs
*/
@@ -433,9 +456,9 @@ static int cdns_parity_error_injection(void *data, u64 value)
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR);
/* commit changes */
- cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT,
- CDNS_MCP_CONFIG_UPDATE_BIT);
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ goto unlock;
/* do a broadcast dummy read to avoid bus clashes */
ret = sdw_bread_no_pm_unlocked(&cdns->bus, 0xf, SDW_SCP_DEVID_0);
@@ -447,16 +470,17 @@ static int cdns_parity_error_injection(void *data, u64 value)
0);
/* commit changes */
- cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT,
- CDNS_MCP_CONFIG_UPDATE_BIT);
-
- /* Continue bus operation with parity error injection disabled */
- mutex_unlock(&bus->bus_lock);
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ goto unlock;
/* Userspace changed the hardware state behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+unlock:
+ /* Continue bus operation with parity error injection disabled */
+ mutex_unlock(&bus->bus_lock);
+
/*
* allow Master device to enter pm_runtime suspend. This may
* also result in Slave devices suspending.
@@ -1116,13 +1140,7 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
CDNS_MCP_CONTROL_HW_RST);
/* commit changes */
- cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT,
- CDNS_MCP_CONFIG_UPDATE_BIT);
-
- /* don't wait here */
- return 0;
-
+ return cdns_config_update(cdns);
}
EXPORT_SYMBOL(sdw_cdns_exit_reset);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 27c56274217f..bc84435e420f 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -14,6 +14,8 @@
*/
#define CDNS_MCP_IP_MAX_CMD_LEN 32
+#define SDW_CADENCE_MCP_IP_OFFSET 0x4000
+
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
@@ -197,4 +199,7 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
bool initial_delay, int reset_iterations);
+void sdw_cdns_config_update(struct sdw_cdns *cdns);
+int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns);
+
#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index dea782e0edc4..d1553cb77187 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -56,8 +56,9 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
if (!buf)
return -ENOMEM;
- ret = pm_runtime_resume_and_get(&slave->dev);
+ ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(&slave->dev);
kfree(buf);
return ret;
}
@@ -85,10 +86,17 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
/* SCP registers */
ret += scnprintf(buf + ret, RD_BUF - ret, "\nSCP\n");
- for (i = SDW_SCP_INT1; i <= SDW_SCP_BANKDELAY; i++)
+ for (i = SDW_SCP_INT1; i <= SDW_SCP_BUS_CLOCK_BASE; i++)
ret += sdw_sprintf(slave, buf, ret, i);
for (i = SDW_SCP_DEVID_0; i <= SDW_SCP_DEVID_5; i++)
ret += sdw_sprintf(slave, buf, ret, i);
+ for (i = SDW_SCP_FRAMECTRL_B0; i <= SDW_SCP_BUSCLOCK_SCALE_B0; i++)
+ ret += sdw_sprintf(slave, buf, ret, i);
+ for (i = SDW_SCP_FRAMECTRL_B1; i <= SDW_SCP_BUSCLOCK_SCALE_B1; i++)
+ ret += sdw_sprintf(slave, buf, ret, i);
+ for (i = SDW_SCP_PHY_OUT_CTRL_0; i <= SDW_SCP_PHY_OUT_CTRL_7; i++)
+ ret += sdw_sprintf(slave, buf, ret, i);
+
/*
* SCP Bank 0/1 registers are read-only and cannot be
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 325c475b6a66..31162f2b5638 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -139,20 +139,16 @@ static void _sdw_compute_port_params(struct sdw_bus *bus,
{
struct sdw_master_runtime *m_rt;
int hstop = bus->params.col - 1;
- int block_offset, port_bo, i;
+ int port_bo, i;
/* Run loop for all groups to compute transport parameters */
for (i = 0; i < count; i++) {
port_bo = 1;
- block_offset = 1;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- sdw_compute_master_ports(m_rt, &params[i],
- port_bo, hstop);
+ sdw_compute_master_ports(m_rt, &params[i], port_bo, hstop);
- block_offset += m_rt->ch_count *
- m_rt->stream->params.bps;
- port_bo = block_offset;
+ port_bo += m_rt->ch_count * m_rt->stream->params.bps;
}
hstop = hstop - params[i].hwidth;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 238acf5c97a9..26d8485427dd 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -260,7 +260,7 @@ static void intel_shim_init(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- u16 ioctl = 0, act = 0;
+ u16 ioctl = 0, act;
/* Initialize Shim */
ioctl |= SDW_SHIM_IOCTL_BKE;
@@ -281,6 +281,7 @@ static void intel_shim_init(struct sdw_intel *sdw)
intel_shim_glue_to_master_ip(sdw);
+ act = intel_readw(shim, SDW_SHIM_CTMCTL(link_id));
u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
act |= SDW_SHIM_CTMCTL_DACTQE;
act |= SDW_SHIM_CTMCTL_DODS;
@@ -643,7 +644,7 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
}
static int intel_params_stream(struct sdw_intel *sdw,
- int stream,
+ struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
struct snd_pcm_hw_params *hw_params,
int link_id, int alh_stream_id)
@@ -651,7 +652,7 @@ static int intel_params_stream(struct sdw_intel *sdw,
struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_params_data params_data;
- params_data.stream = stream; /* direction */
+ params_data.substream = substream;
params_data.dai = dai;
params_data.hw_params = hw_params;
params_data.link_id = link_id;
@@ -663,25 +664,6 @@ static int intel_params_stream(struct sdw_intel *sdw,
return -EIO;
}
-static int intel_free_stream(struct sdw_intel *sdw,
- int stream,
- struct snd_soc_dai *dai,
- int link_id)
-{
- struct sdw_intel_link_res *res = sdw->link_res;
- struct sdw_intel_stream_free_data free_data;
-
- free_data.stream = stream; /* direction */
- free_data.dai = dai;
- free_data.link_id = link_id;
-
- if (res->ops && res->ops->free_stream && res->dev)
- return res->ops->free_stream(res->dev,
- &free_data);
-
- return 0;
-}
-
/*
* DAI routines
*/
@@ -727,7 +709,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
dai_runtime->pdi = pdi;
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream->stream, dai, params,
+ ret = intel_params_stream(sdw, substream, dai, params,
sdw->instance,
pdi->intel_alh_id);
if (ret)
@@ -804,7 +786,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream->stream, dai,
+ ret = intel_params_stream(sdw, substream, dai,
hw_params,
sdw->instance,
dai_runtime->pdi->intel_alh_id);
@@ -817,7 +799,6 @@ static int
intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
- struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dai_runtime *dai_runtime;
int ret;
@@ -838,12 +819,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
- ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
- if (ret < 0) {
- dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
- return ret;
- }
-
dai_runtime->pdi = NULL;
return 0;
@@ -871,19 +846,9 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
- struct sdw_intel *sdw = cdns_to_intel(cdns);
- struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_cdns_dai_runtime *dai_runtime;
int ret = 0;
- /*
- * The .trigger callback is used to send required IPC to audio
- * firmware. The .free_stream callback will still be called
- * by intel_free_stream() in the TRIGGER_SUSPEND case.
- */
- if (res->ops && res->ops->trigger)
- res->ops->trigger(dai, cmd, substream->stream);
-
dai_runtime = cdns->dai_runtime_array[dai->id];
if (!dai_runtime) {
dev_err(dai->dev, "failed to get dai runtime in %s\n",
@@ -903,7 +868,6 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
dai_runtime->suspended = true;
- ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
@@ -949,9 +913,7 @@ static int intel_component_dais_suspend(struct snd_soc_component *component)
*/
for_each_component_dais(component, dai) {
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
- struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dai_runtime *dai_runtime;
- int ret;
dai_runtime = cdns->dai_runtime_array[dai->id];
@@ -961,13 +923,8 @@ static int intel_component_dais_suspend(struct snd_soc_component *component)
if (dai_runtime->suspended)
continue;
- if (dai_runtime->paused) {
+ if (dai_runtime->paused)
dai_runtime->suspended = true;
-
- ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance);
- if (ret < 0)
- return ret;
- }
}
return 0;
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 09d479f2c77b..511932c55216 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -4,13 +4,17 @@
#ifndef __SDW_INTEL_LOCAL_H
#define __SDW_INTEL_LOCAL_H
+struct hdac_bus;
+
/**
* struct sdw_intel_link_res - Soundwire Intel link resource structure,
* typically populated by the controller driver.
* @hw_ops: platform-specific ops
* @mmio_base: mmio base of SoundWire registers
* @registers: Link IO registers base
+ * @ip_offset: offset for MCP_IP registers
* @shim: Audio shim pointer
+ * @shim_vs: Audio vendor-specific shim pointer
* @alh: ALH (Audio Link Hub) pointer
* @irq: Interrupt line
* @ops: Shim callback ops
@@ -21,13 +25,16 @@
* @link_mask: global mask needed for power-up/down sequences
* @cdns: Cadence master descriptor
* @list: used to walk-through all masters exposed by the same controller
+ * @hbus: hdac_bus pointer, needed for power management
*/
struct sdw_intel_link_res {
const struct sdw_intel_hw_ops *hw_ops;
void __iomem *mmio_base; /* not strictly needed, useful for debug */
void __iomem *registers;
+ u32 ip_offset;
void __iomem *shim;
+ void __iomem *shim_vs;
void __iomem *alh;
int irq;
const struct sdw_intel_ops *ops;
@@ -38,6 +45,7 @@ struct sdw_intel_link_res {
u32 link_mask;
struct sdw_cdns *cdns;
struct list_head list;
+ struct hdac_bus *hbus;
};
struct sdw_intel {
@@ -87,6 +95,14 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
(sdw)->link_res->hw_ops->cb)
#define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb)
+#ifdef CONFIG_DEBUG_FS
+void intel_ace2x_debugfs_init(struct sdw_intel *sdw);
+void intel_ace2x_debugfs_exit(struct sdw_intel *sdw);
+#else
+static inline void intel_ace2x_debugfs_init(struct sdw_intel *sdw) {}
+static inline void intel_ace2x_debugfs_exit(struct sdw_intel *sdw) {}
+#endif
+
static inline void sdw_intel_debugfs_init(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, debugfs_init))
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
new file mode 100644
index 000000000000..1be0bea5f40f
--- /dev/null
+++ b/drivers/soundwire/intel_ace2x.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+// Copyright(c) 2023 Intel Corporation. All rights reserved.
+
+/*
+ * Soundwire Intel ops for LunarLake
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/hda-mlink.h>
+#include "cadence_master.h"
+#include "bus.h"
+#include "intel.h"
+
+/*
+ * shim vendor-specific (vs) ops
+ */
+
+static void intel_shim_vs_init(struct sdw_intel *sdw)
+{
+ void __iomem *shim_vs = sdw->link_res->shim_vs;
+ u16 act = 0;
+
+ u16p_replace_bits(&act, 0x1, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
+ act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
+ act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DODS;
+ intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL, act);
+ usleep_range(10, 15);
+}
+
+static int intel_shim_check_wake(struct sdw_intel *sdw)
+{
+ void __iomem *shim_vs;
+ u16 wake_sts;
+
+ shim_vs = sdw->link_res->shim_vs;
+ wake_sts = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_WAKESTS);
+
+ return wake_sts & SDW_SHIM2_INTEL_VS_WAKEEN_PWS;
+}
+
+static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
+{
+ void __iomem *shim_vs = sdw->link_res->shim_vs;
+ u16 wake_en;
+ u16 wake_sts;
+
+ wake_en = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_WAKEEN);
+
+ if (wake_enable) {
+ /* Enable the wakeup */
+ wake_en |= SDW_SHIM2_INTEL_VS_WAKEEN_PWE;
+ intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_WAKEEN, wake_en);
+ } else {
+ /* Disable the wake up interrupt */
+ wake_en &= ~SDW_SHIM2_INTEL_VS_WAKEEN_PWE;
+ intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_WAKEEN, wake_en);
+
+ /* Clear wake status (W1C) */
+ wake_sts = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_WAKESTS);
+ wake_sts |= SDW_SHIM2_INTEL_VS_WAKEEN_PWS;
+ intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_WAKESTS, wake_sts);
+ }
+}
+
+static int intel_link_power_up(struct sdw_intel *sdw)
+{
+ struct sdw_bus *bus = &sdw->cdns.bus;
+ struct sdw_master_prop *prop = &bus->prop;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ unsigned int link_id = sdw->instance;
+ u32 syncprd;
+ int ret;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ if (!*shim_mask) {
+ /* we first need to program the SyncPRD/CPU registers */
+ dev_dbg(sdw->cdns.dev, "first link up, programming SYNCPRD\n");
+
+ if (prop->mclk_freq % 6000000)
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
+ else
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
+
+ ret = hdac_bus_eml_sdw_set_syncprd_unlocked(sdw->link_res->hbus, syncprd);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_set_syncprd failed: %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ ret = hdac_bus_eml_sdw_power_up_unlocked(sdw->link_res->hbus, link_id);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_up failed: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ if (!*shim_mask) {
+ /* SYNCPU will change once link is active */
+ ret = hdac_bus_eml_sdw_wait_syncpu_unlocked(sdw->link_res->hbus);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_wait_syncpu failed: %d\n",
+ __func__, ret);
+ goto out;
+ }
+ }
+
+ *shim_mask |= BIT(link_id);
+
+ sdw->cdns.link_up = true;
+
+ intel_shim_vs_init(sdw);
+
+out:
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
+static int intel_link_power_down(struct sdw_intel *sdw)
+{
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ unsigned int link_id = sdw->instance;
+ int ret;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ sdw->cdns.link_up = false;
+
+ *shim_mask &= ~BIT(link_id);
+
+ ret = hdac_bus_eml_sdw_power_down_unlocked(sdw->link_res->hbus, link_id);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_down failed: %d\n",
+ __func__, ret);
+
+ /*
+ * we leave the sdw->cdns.link_up flag as false since we've disabled
+ * the link at this point and cannot handle interrupts any longer.
+ */
+ }
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
+static void intel_sync_arm(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ hdac_bus_eml_sdw_sync_arm_unlocked(sdw->link_res->hbus, link_id);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+}
+
+static int intel_sync_go_unlocked(struct sdw_intel *sdw)
+{
+ int ret;
+
+ ret = hdac_bus_eml_sdw_sync_go_unlocked(sdw->link_res->hbus);
+ if (ret < 0)
+ dev_err(sdw->cdns.dev, "%s: SyncGO clear failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int intel_sync_go(struct sdw_intel *sdw)
+{
+ int ret;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ ret = intel_sync_go_unlocked(sdw);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
+static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
+{
+ return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus);
+}
+
+/*
+ * DAI operations
+ */
+static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
+};
+
+static const struct snd_soc_component_driver dai_component = {
+ .name = "soundwire",
+};
+
+/*
+ * PDI routines
+ */
+static void intel_pdi_init(struct sdw_intel *sdw,
+ struct sdw_cdns_stream_config *config)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ int pcm_cap;
+
+ /* PCM Stream Capability */
+ pcm_cap = intel_readw(shim, SDW_SHIM2_PCMSCAP);
+
+ config->pcm_bd = FIELD_GET(SDW_SHIM2_PCMSCAP_BSS, pcm_cap);
+ config->pcm_in = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
+ config->pcm_out = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
+
+ dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
+ config->pcm_bd, config->pcm_in, config->pcm_out);
+}
+
+static int
+intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
+{
+ void __iomem *shim = sdw->link_res->shim;
+
+ /* zero based values for channel count in register */
+ return intel_readw(shim, SDW_SHIM2_PCMSYCHC(pdi_num)) + 1;
+}
+
+static void intel_pdi_get_ch_update(struct sdw_intel *sdw,
+ struct sdw_cdns_pdi *pdi,
+ unsigned int num_pdi,
+ unsigned int *num_ch)
+{
+ int ch_count = 0;
+ int i;
+
+ for (i = 0; i < num_pdi; i++) {
+ pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
+ ch_count += pdi->ch_count;
+ pdi++;
+ }
+
+ *num_ch = ch_count;
+}
+
+static void intel_pdi_stream_ch_update(struct sdw_intel *sdw,
+ struct sdw_cdns_streams *stream)
+{
+ intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
+ &stream->num_ch_bd);
+
+ intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
+ &stream->num_ch_in);
+
+ intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
+ &stream->num_ch_out);
+}
+
+static int intel_create_dai(struct sdw_cdns *cdns,
+ struct snd_soc_dai_driver *dais,
+ enum intel_pdi_type type,
+ u32 num, u32 off, u32 max_ch)
+{
+ int i;
+
+ if (!num)
+ return 0;
+
+ for (i = off; i < (off + num); i++) {
+ dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
+ "SDW%d Pin%d",
+ cdns->instance, i);
+ if (!dais[i].name)
+ return -ENOMEM;
+
+ if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
+ dais[i].playback.channels_min = 1;
+ dais[i].playback.channels_max = max_ch;
+ }
+
+ if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
+ dais[i].capture.channels_min = 1;
+ dais[i].capture.channels_max = max_ch;
+ }
+
+ dais[i].ops = &intel_pcm_dai_ops;
+ }
+
+ return 0;
+}
+
+static int intel_register_dai(struct sdw_intel *sdw)
+{
+ struct sdw_cdns_dai_runtime **dai_runtime_array;
+ struct sdw_cdns_stream_config config;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_cdns_streams *stream;
+ struct snd_soc_dai_driver *dais;
+ int num_dai;
+ int ret;
+ int off = 0;
+
+ /* Read the PDI config and initialize cadence PDI */
+ intel_pdi_init(sdw, &config);
+ ret = sdw_cdns_pdi_init(cdns, config);
+ if (ret)
+ return ret;
+
+ intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
+
+ /* DAIs are created based on total number of PDIs supported */
+ num_dai = cdns->pcm.num_pdi;
+
+ dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
+ sizeof(struct sdw_cdns_dai_runtime *),
+ GFP_KERNEL);
+ if (!dai_runtime_array)
+ return -ENOMEM;
+ cdns->dai_runtime_array = dai_runtime_array;
+
+ dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
+ if (!dais)
+ return -ENOMEM;
+
+ /* Create PCM DAIs */
+ stream = &cdns->pcm;
+
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
+ off, stream->num_ch_in);
+ if (ret)
+ return ret;
+
+ off += cdns->pcm.num_in;
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
+ off, stream->num_ch_out);
+ if (ret)
+ return ret;
+
+ off += cdns->pcm.num_out;
+ ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
+ off, stream->num_ch_bd);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_component(cdns->dev, &dai_component,
+ dais, num_dai);
+}
+
+static void intel_program_sdi(struct sdw_intel *sdw, int dev_num)
+{
+ int ret;
+
+ ret = hdac_bus_eml_sdw_set_lsdiid(sdw->link_res->hbus, sdw->instance, dev_num);
+ if (ret < 0)
+ dev_err(sdw->cdns.dev, "%s: could not set lsdiid for link %d %d\n",
+ __func__, sdw->instance, dev_num);
+}
+
+const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = {
+ .debugfs_init = intel_ace2x_debugfs_init,
+ .debugfs_exit = intel_ace2x_debugfs_exit,
+
+ .register_dai = intel_register_dai,
+
+ .check_clock_stop = intel_check_clock_stop,
+ .start_bus = intel_start_bus,
+ .start_bus_after_reset = intel_start_bus_after_reset,
+ .start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
+ .stop_bus = intel_stop_bus,
+
+ .link_power_up = intel_link_power_up,
+ .link_power_down = intel_link_power_down,
+
+ .shim_check_wake = intel_shim_check_wake,
+ .shim_wake = intel_shim_wake,
+
+ .pre_bank_switch = intel_pre_bank_switch,
+ .post_bank_switch = intel_post_bank_switch,
+
+ .sync_arm = intel_sync_arm,
+ .sync_go_unlocked = intel_sync_go_unlocked,
+ .sync_go = intel_sync_go,
+ .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
+
+ .program_sdi = intel_program_sdi,
+};
+EXPORT_SYMBOL_NS(sdw_intel_lnl_hw_ops, SOUNDWIRE_INTEL);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
diff --git a/drivers/soundwire/intel_ace2x_debugfs.c b/drivers/soundwire/intel_ace2x_debugfs.c
new file mode 100644
index 000000000000..3d24661ffd37
--- /dev/null
+++ b/drivers/soundwire/intel_ace2x_debugfs.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2023 Intel Corporation. All rights reserved.
+
+#include <linux/acpi.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <linux/soundwire/sdw_registers.h>
+#include "bus.h"
+#include "cadence_master.h"
+#include "intel.h"
+
+/*
+ * debugfs
+ */
+#ifdef CONFIG_DEBUG_FS
+
+#define RD_BUF (2 * PAGE_SIZE)
+
+static ssize_t intel_sprintf(void __iomem *mem, bool l,
+ char *buf, size_t pos, unsigned int reg)
+{
+ int value;
+
+ if (l)
+ value = intel_readl(mem, reg);
+ else
+ value = intel_readw(mem, reg);
+
+ return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
+}
+
+static int intel_reg_show(struct seq_file *s_file, void *data)
+{
+ struct sdw_intel *sdw = s_file->private;
+ void __iomem *s = sdw->link_res->shim;
+ void __iomem *vs_s = sdw->link_res->shim_vs;
+ ssize_t ret;
+ u32 pcm_cap;
+ int pcm_bd;
+ char *buf;
+ int j;
+
+ buf = kzalloc(RD_BUF, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = scnprintf(buf, RD_BUF, "Register Value\n");
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
+
+ ret += intel_sprintf(s, true, buf, ret, SDW_SHIM2_LECAP);
+ ret += intel_sprintf(s, false, buf, ret, SDW_SHIM2_PCMSCAP);
+
+ pcm_cap = intel_readw(s, SDW_SHIM2_PCMSCAP);
+ pcm_bd = FIELD_GET(SDW_SHIM2_PCMSCAP_BSS, pcm_cap);
+
+ for (j = 0; j < pcm_bd; j++) {
+ ret += intel_sprintf(s, false, buf, ret,
+ SDW_SHIM2_PCMSYCHM(j));
+ ret += intel_sprintf(s, false, buf, ret,
+ SDW_SHIM2_PCMSYCHC(j));
+ }
+
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\nVS CLK controls\n");
+ ret += intel_sprintf(vs_s, true, buf, ret, SDW_SHIM2_INTEL_VS_LVSCTL);
+
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\nVS Wake registers\n");
+ ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_WAKEEN);
+ ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_WAKESTS);
+
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\nVS IOCTL, ACTMCTL\n");
+ ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_IOCTL);
+ ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_ACTMCTL);
+
+ seq_printf(s_file, "%s", buf);
+ kfree(buf);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_reg);
+
+static int intel_set_m_datamode(void *data, u64 value)
+{
+ struct sdw_intel *sdw = data;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+
+ if (value > SDW_PORT_DATA_MODE_STATIC_1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ bus->params.m_data_mode = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
+ intel_set_m_datamode, "%llu\n");
+
+static int intel_set_s_datamode(void *data, u64 value)
+{
+ struct sdw_intel *sdw = data;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+
+ if (value > SDW_PORT_DATA_MODE_STATIC_1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ bus->params.s_data_mode = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
+ intel_set_s_datamode, "%llu\n");
+
+void intel_ace2x_debugfs_init(struct sdw_intel *sdw)
+{
+ struct dentry *root = sdw->cdns.bus.debugfs;
+
+ if (!root)
+ return;
+
+ sdw->debugfs = debugfs_create_dir("intel-sdw", root);
+
+ debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
+ &intel_reg_fops);
+
+ debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
+ &intel_set_m_datamode_fops);
+
+ debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
+ &intel_set_s_datamode_fops);
+
+ sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
+}
+
+void intel_ace2x_debugfs_exit(struct sdw_intel *sdw)
+{
+ debugfs_remove_recursive(sdw->debugfs);
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index b21e86084f7b..0daa6ca9a224 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -60,6 +60,21 @@ static int generic_post_bank_switch(struct sdw_bus *bus)
return sdw->link_res->hw_ops->post_bank_switch(sdw);
}
+static void generic_new_peripheral_assigned(struct sdw_bus *bus, int dev_num)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+
+ /* paranoia check, this should never happen */
+ if (dev_num < INTEL_DEV_NUM_IDA_MIN || dev_num > SDW_MAX_DEVICES) {
+ dev_err(bus->dev, "%s: invalid dev_num %d\n", __func__, dev_num);
+ return;
+ }
+
+ if (sdw->link_res->hw_ops->program_sdi)
+ sdw->link_res->hw_ops->program_sdi(sdw, dev_num);
+}
+
static int sdw_master_read_intel_prop(struct sdw_bus *bus)
{
struct sdw_master_prop *prop = &bus->prop;
@@ -117,6 +132,7 @@ static struct sdw_master_ops sdw_intel_ops = {
.pre_bank_switch = generic_pre_bank_switch,
.post_bank_switch = generic_post_bank_switch,
.read_ping_status = cdns_read_ping_status,
+ .new_peripheral_assigned = generic_new_peripheral_assigned,
};
/*
@@ -144,6 +160,7 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
sdw->link_res = &ldev->link_res;
cdns->dev = dev;
cdns->registers = sdw->link_res->registers;
+ cdns->ip_offset = sdw->link_res->ip_offset;
cdns->instance = sdw->instance;
cdns->msg_count = 0;
diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c
index f180e3bea989..e5ac3cc7cb79 100644
--- a/drivers/soundwire/intel_bus_common.c
+++ b/drivers/soundwire/intel_bus_common.c
@@ -16,12 +16,6 @@ int intel_start_bus(struct sdw_intel *sdw)
struct sdw_bus *bus = &cdns->bus;
int ret;
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
- return ret;
- }
-
/*
* follow recommended programming flows to avoid timeouts when
* gsync is enabled
@@ -32,30 +26,41 @@ int intel_start_bus(struct sdw_intel *sdw)
ret = sdw_cdns_init(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
- goto err_interrupt;
+ return ret;
}
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
- goto err_interrupt;
- }
+ sdw_cdns_config_update(cdns);
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
- goto err_interrupt;
+ return ret;
}
}
+
+ ret = sdw_cdns_config_update_set_wait(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: CONFIG_UPDATE BIT still set\n", __func__);
+ return ret;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
-
-err_interrupt:
- sdw_cdns_enable_interrupt(cdns, false);
- return ret;
}
int intel_start_bus_after_reset(struct sdw_intel *sdw)
@@ -86,12 +91,6 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw)
status = SDW_UNATTACH_REQUEST_MASTER_RESET;
sdw_clear_slave_status(bus, status);
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
/*
* follow recommended programming flows to avoid
* timeouts when gsync is enabled
@@ -115,31 +114,44 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw)
ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
if (ret < 0) {
dev_err(dev, "unable to restart clock during resume\n");
- goto err_interrupt;
+ if (!clock_stop0)
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
}
if (!clock_stop0) {
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
- goto err_interrupt;
- }
+ sdw_cdns_config_update(cdns);
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(sdw->cdns.dev, "sync go failed during resume\n");
- goto err_interrupt;
+ return ret;
}
}
+
+ ret = sdw_cdns_config_update_set_wait(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: CONFIG_UPDATE BIT still set\n", __func__);
+ return ret;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ return ret;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
-
-err_interrupt:
- sdw_cdns_enable_interrupt(cdns, false);
- return ret;
}
void intel_check_clock_stop(struct sdw_intel *sdw)
@@ -158,21 +170,19 @@ int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
struct sdw_cdns *cdns = &sdw->cdns;
int ret;
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = sdw_cdns_clock_restart(cdns, false);
if (ret < 0) {
- dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
return ret;
}
- ret = sdw_cdns_clock_restart(cdns, false);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
- sdw_cdns_enable_interrupt(cdns, false);
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
- true, INTEL_MASTER_RESET_ITERATIONS);
+ sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index cbe56b993c6c..534c8795e7e8 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -63,19 +63,30 @@ static struct sdw_intel_link_dev *intel_link_dev_register(struct sdw_intel_res *
link = &ldev->link_res;
link->hw_ops = res->hw_ops;
link->mmio_base = res->mmio_base;
- link->registers = res->mmio_base + SDW_LINK_BASE
- + (SDW_LINK_SIZE * link_id);
- link->shim = res->mmio_base + res->shim_base;
- link->alh = res->mmio_base + res->alh_base;
+ if (!res->ext) {
+ link->registers = res->mmio_base + SDW_LINK_BASE
+ + (SDW_LINK_SIZE * link_id);
+ link->ip_offset = 0;
+ link->shim = res->mmio_base + res->shim_base;
+ link->alh = res->mmio_base + res->alh_base;
+ link->shim_lock = &ctx->shim_lock;
+ } else {
+ link->registers = res->mmio_base + SDW_IP_BASE(link_id);
+ link->ip_offset = SDW_CADENCE_MCP_IP_OFFSET;
+ link->shim = res->mmio_base + SDW_SHIM2_GENERIC_BASE(link_id);
+ link->shim_vs = res->mmio_base + SDW_SHIM2_VS_BASE(link_id);
+ link->shim_lock = res->eml_lock;
+ }
link->ops = res->ops;
link->dev = res->dev;
link->clock_stop_quirks = res->clock_stop_quirks;
- link->shim_lock = &ctx->shim_lock;
link->shim_mask = &ctx->shim_mask;
link->link_mask = ctx->link_mask;
+ link->hbus = res->hbus;
+
/* now follow the two-step init/add sequence */
ret = auxiliary_device_init(auxdev);
if (ret < 0) {
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 280455f047a3..7970fdb27ba0 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -31,6 +31,7 @@
#define SWRM_VERSION_1_3_0 0x01030000
#define SWRM_VERSION_1_5_1 0x01050001
#define SWRM_VERSION_1_7_0 0x01070000
+#define SWRM_VERSION_2_0_0 0x02000000
#define SWRM_COMP_HW_VERSION 0x00
#define SWRM_COMP_CFG_ADDR 0x04
#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
@@ -41,7 +42,8 @@
#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0)
#define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5)
#define SWRM_COMP_MASTER_ID 0x104
-#define SWRM_INTERRUPT_STATUS 0x200
+#define SWRM_V1_3_INTERRUPT_STATUS 0x200
+#define SWRM_V2_0_INTERRUPT_STATUS 0x5000
#define SWRM_INTERRUPT_STATUS_RMSK GENMASK(16, 0)
#define SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ BIT(0)
#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED BIT(1)
@@ -54,24 +56,32 @@
#define SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION BIT(8)
#define SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH BIT(9)
#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
-#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
-#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
-#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED BIT(11)
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL BIT(12)
+#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
+#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
+#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
#define SWRM_INTERRUPT_MAX 17
-#define SWRM_INTERRUPT_MASK_ADDR 0x204
-#define SWRM_INTERRUPT_CLEAR 0x208
-#define SWRM_INTERRUPT_CPU_EN 0x210
-#define SWRM_CMD_FIFO_WR_CMD 0x300
-#define SWRM_CMD_FIFO_RD_CMD 0x304
+#define SWRM_V1_3_INTERRUPT_MASK_ADDR 0x204
+#define SWRM_V1_3_INTERRUPT_CLEAR 0x208
+#define SWRM_V2_0_INTERRUPT_CLEAR 0x5008
+#define SWRM_V1_3_INTERRUPT_CPU_EN 0x210
+#define SWRM_V2_0_INTERRUPT_CPU_EN 0x5004
+#define SWRM_V1_3_CMD_FIFO_WR_CMD 0x300
+#define SWRM_V2_0_CMD_FIFO_WR_CMD 0x5020
+#define SWRM_V1_3_CMD_FIFO_RD_CMD 0x304
+#define SWRM_V2_0_CMD_FIFO_RD_CMD 0x5024
#define SWRM_CMD_FIFO_CMD 0x308
#define SWRM_CMD_FIFO_FLUSH 0x1
-#define SWRM_CMD_FIFO_STATUS 0x30C
+#define SWRM_V1_3_CMD_FIFO_STATUS 0x30C
+#define SWRM_V2_0_CMD_FIFO_STATUS 0x5050
#define SWRM_RD_CMD_FIFO_CNT_MASK GENMASK(20, 16)
#define SWRM_WR_CMD_FIFO_CNT_MASK GENMASK(12, 8)
#define SWRM_CMD_FIFO_CFG_ADDR 0x314
#define SWRM_CONTINUE_EXEC_ON_CMD_IGNORE BIT(31)
#define SWRM_RD_WR_CMD_RETRIES 0x7
-#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_V1_3_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR 0x5040
#define SWRM_RD_FIFO_CMD_ID_MASK GENMASK(11, 8)
#define SWRM_ENUMERATOR_CFG_ADDR 0x500
#define SWRM_ENUMERATOR_SLAVE_DEV_ID_1(m) (0x530 + 0x8 * (m))
@@ -95,8 +105,14 @@
#define SWRM_DP_BLOCK_CTRL2_BANK(n, m) (0x1130 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_PORT_HCTRL_BANK(n, m) (0x1134 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_SAMPLECTRL2_BANK(n, m) (0x113C + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DIN_DPn_PCM_PORT_CTRL(n) (0x1054 + 0x100 * (n - 1))
-#define SWR_MSTR_MAX_REG_ADDR (0x1740)
+#define SWR_V1_3_MSTR_MAX_REG_ADDR 0x1740
+#define SWR_V2_0_MSTR_MAX_REG_ADDR 0x50ac
+
+#define SWRM_V2_0_CLK_CTRL 0x5060
+#define SWRM_V2_0_CLK_CTRL_CLK_START BIT(0)
+#define SWRM_V2_0_LINK_STATUS 0x5064
#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
@@ -109,20 +125,20 @@
#define SWRM_REG_VAL_PACK(data, dev, id, reg) \
((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
-#define MAX_FREQ_NUM 1
-#define TIMEOUT_MS 100
-#define QCOM_SWRM_MAX_RD_LEN 0x1
-#define QCOM_SDW_MAX_PORTS 14
-#define DEFAULT_CLK_FREQ 9600000
-#define SWRM_MAX_DAIS 0xF
-#define SWR_INVALID_PARAM 0xFF
-#define SWR_HSTOP_MAX_VAL 0xF
-#define SWR_HSTART_MIN_VAL 0x0
-#define SWR_BROADCAST_CMD_ID 0x0F
-#define SWR_MAX_CMD_ID 14
-#define MAX_FIFO_RD_RETRY 3
-#define SWR_OVERFLOW_RETRY_COUNT 30
-#define SWRM_LINK_STATUS_RETRY_CNT 100
+#define MAX_FREQ_NUM 1
+#define TIMEOUT_MS 100
+#define QCOM_SWRM_MAX_RD_LEN 0x1
+#define QCOM_SDW_MAX_PORTS 14
+#define DEFAULT_CLK_FREQ 9600000
+#define SWRM_MAX_DAIS 0xF
+#define SWR_INVALID_PARAM 0xFF
+#define SWR_HSTOP_MAX_VAL 0xF
+#define SWR_HSTART_MIN_VAL 0x0
+#define SWR_BROADCAST_CMD_ID 0x0F
+#define SWR_MAX_CMD_ID 14
+#define MAX_FIFO_RD_RETRY 3
+#define SWR_OVERFLOW_RETRY_COUNT 30
+#define SWRM_LINK_STATUS_RETRY_CNT 100
enum {
MASTER_ID_WSA = 1,
@@ -131,7 +147,7 @@ enum {
};
struct qcom_swrm_port_config {
- u8 si;
+ u16 si;
u8 off1;
u8 off2;
u8 bp_mode;
@@ -142,10 +158,28 @@ struct qcom_swrm_port_config {
u8 lane_control;
};
+/*
+ * Internal IDs for different register layouts. Only few registers differ per
+ * each variant, so the list of IDs below does not include all of registers.
+ */
+enum {
+ SWRM_REG_FRAME_GEN_ENABLED,
+ SWRM_REG_INTERRUPT_STATUS,
+ SWRM_REG_INTERRUPT_MASK_ADDR,
+ SWRM_REG_INTERRUPT_CLEAR,
+ SWRM_REG_INTERRUPT_CPU_EN,
+ SWRM_REG_CMD_FIFO_WR_CMD,
+ SWRM_REG_CMD_FIFO_RD_CMD,
+ SWRM_REG_CMD_FIFO_STATUS,
+ SWRM_REG_CMD_FIFO_RD_FIFO_ADDR,
+};
+
struct qcom_swrm_ctrl {
struct sdw_bus bus;
struct device *dev;
struct regmap *regmap;
+ u32 max_reg;
+ const unsigned int *reg_layout;
void __iomem *mmio;
struct reset_control *audio_cgcr;
#ifdef CONFIG_DEBUG_FS
@@ -153,12 +187,9 @@ struct qcom_swrm_ctrl {
#endif
struct completion broadcast;
struct completion enumeration;
- struct work_struct slave_work;
/* Port alloc/free lock */
struct mutex port_lock;
struct clk *hclk;
- u8 wr_cmd_id;
- u8 rd_cmd_id;
int irq;
unsigned int version;
int wake_irq;
@@ -171,7 +202,8 @@ struct qcom_swrm_ctrl {
u32 intr_mask;
u8 rcmd_id;
u8 wcmd_id;
- struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
+ /* Port numbers are 1 - 14 */
+ struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS + 1];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
@@ -186,22 +218,62 @@ struct qcom_swrm_data {
u32 default_cols;
u32 default_rows;
bool sw_clk_gate_required;
+ u32 max_reg;
+ const unsigned int *reg_layout;
+};
+
+static const unsigned int swrm_v1_3_reg_layout[] = {
+ [SWRM_REG_FRAME_GEN_ENABLED] = SWRM_COMP_STATUS,
+ [SWRM_REG_INTERRUPT_STATUS] = SWRM_V1_3_INTERRUPT_STATUS,
+ [SWRM_REG_INTERRUPT_MASK_ADDR] = SWRM_V1_3_INTERRUPT_MASK_ADDR,
+ [SWRM_REG_INTERRUPT_CLEAR] = SWRM_V1_3_INTERRUPT_CLEAR,
+ [SWRM_REG_INTERRUPT_CPU_EN] = SWRM_V1_3_INTERRUPT_CPU_EN,
+ [SWRM_REG_CMD_FIFO_WR_CMD] = SWRM_V1_3_CMD_FIFO_WR_CMD,
+ [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V1_3_CMD_FIFO_RD_CMD,
+ [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V1_3_CMD_FIFO_STATUS,
+ [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V1_3_CMD_FIFO_RD_FIFO_ADDR,
};
static const struct qcom_swrm_data swrm_v1_3_data = {
.default_rows = 48,
.default_cols = 16,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
};
static const struct qcom_swrm_data swrm_v1_5_data = {
.default_rows = 50,
.default_cols = 16,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
};
static const struct qcom_swrm_data swrm_v1_6_data = {
.default_rows = 50,
.default_cols = 16,
.sw_clk_gate_required = true,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
+};
+
+static const unsigned int swrm_v2_0_reg_layout[] = {
+ [SWRM_REG_FRAME_GEN_ENABLED] = SWRM_V2_0_LINK_STATUS,
+ [SWRM_REG_INTERRUPT_STATUS] = SWRM_V2_0_INTERRUPT_STATUS,
+ [SWRM_REG_INTERRUPT_MASK_ADDR] = 0, /* Not present */
+ [SWRM_REG_INTERRUPT_CLEAR] = SWRM_V2_0_INTERRUPT_CLEAR,
+ [SWRM_REG_INTERRUPT_CPU_EN] = SWRM_V2_0_INTERRUPT_CPU_EN,
+ [SWRM_REG_CMD_FIFO_WR_CMD] = SWRM_V2_0_CMD_FIFO_WR_CMD,
+ [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V2_0_CMD_FIFO_RD_CMD,
+ [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V2_0_CMD_FIFO_STATUS,
+ [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR,
+};
+
+static const struct qcom_swrm_data swrm_v2_0_data = {
+ .default_rows = 50,
+ .default_cols = 16,
+ .sw_clk_gate_required = true,
+ .max_reg = SWR_V2_0_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v2_0_reg_layout,
};
#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
@@ -278,14 +350,15 @@ static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
return val;
}
-static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
+static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *ctrl)
{
u32 fifo_outstanding_data, value;
int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
do {
/* Check for fifo underflow during read */
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
fifo_outstanding_data = FIELD_GET(SWRM_RD_CMD_FIFO_CNT_MASK, value);
/* Check if read data is available in read fifo */
@@ -296,39 +369,66 @@ static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
} while (fifo_retry_count--);
if (fifo_outstanding_data == 0) {
- dev_err_ratelimited(swrm->dev, "%s err read underflow\n", __func__);
+ dev_err_ratelimited(ctrl->dev, "%s err read underflow\n", __func__);
return -EIO;
}
return 0;
}
-static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *swrm)
+static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *ctrl)
{
u32 fifo_outstanding_cmds, value;
int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
do {
/* Check for fifo overflow during write */
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
/* Check for space in write fifo before writing */
- if (fifo_outstanding_cmds < swrm->wr_fifo_depth)
+ if (fifo_outstanding_cmds < ctrl->wr_fifo_depth)
return 0;
usleep_range(500, 510);
} while (fifo_retry_count--);
- if (fifo_outstanding_cmds == swrm->wr_fifo_depth) {
- dev_err_ratelimited(swrm->dev, "%s err write overflow\n", __func__);
+ if (fifo_outstanding_cmds == ctrl->wr_fifo_depth) {
+ dev_err_ratelimited(ctrl->dev, "%s err write overflow\n", __func__);
return -EIO;
}
return 0;
}
-static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
+static bool swrm_wait_for_wr_fifo_done(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 fifo_outstanding_cmds, value;
+ int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
+
+ /* Check for fifo overflow during write */
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS], &value);
+ fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
+
+ if (fifo_outstanding_cmds) {
+ while (fifo_retry_count) {
+ usleep_range(500, 510);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS], &value);
+ fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
+ fifo_retry_count--;
+ if (fifo_outstanding_cmds == 0)
+ return true;
+ }
+ } else {
+ return true;
+ }
+
+
+ return false;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
u8 dev_addr, u16 reg_addr)
{
@@ -341,28 +441,29 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
dev_addr, reg_addr);
} else {
- val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
+ val = swrm_get_packed_reg_val(&ctrl->wcmd_id, cmd_data,
dev_addr, reg_addr);
}
- if (swrm_wait_for_wr_fifo_avail(swrm))
+ if (swrm_wait_for_wr_fifo_avail(ctrl))
return SDW_CMD_FAIL_OTHER;
if (cmd_id == SWR_BROADCAST_CMD_ID)
- reinit_completion(&swrm->broadcast);
+ reinit_completion(&ctrl->broadcast);
/* Its assumed that write is okay as we do not get any status back */
- swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_WR_CMD], val);
- if (swrm->version <= SWRM_VERSION_1_3_0)
+ if (ctrl->version <= SWRM_VERSION_1_3_0)
usleep_range(150, 155);
if (cmd_id == SWR_BROADCAST_CMD_ID) {
+ swrm_wait_for_wr_fifo_done(ctrl);
/*
* sleep for 10ms for MSM soundwire variant to allow broadcast
* command to complete.
*/
- ret = wait_for_completion_timeout(&swrm->broadcast,
+ ret = wait_for_completion_timeout(&ctrl->broadcast,
msecs_to_jiffies(TIMEOUT_MS));
if (!ret)
ret = SDW_CMD_IGNORED;
@@ -375,41 +476,44 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
return ret;
}
-static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
u8 dev_addr, u16 reg_addr,
u32 len, u8 *rval)
{
u32 cmd_data, cmd_id, val, retry_attempt = 0;
- val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+ val = swrm_get_packed_reg_val(&ctrl->rcmd_id, len, dev_addr, reg_addr);
/*
* Check for outstanding cmd wrt. write fifo depth to avoid
* overflow as read will also increase write fifo cnt.
*/
- swrm_wait_for_wr_fifo_avail(swrm);
+ swrm_wait_for_wr_fifo_avail(ctrl);
/* wait for FIFO RD to complete to avoid overflow */
usleep_range(100, 105);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_CMD], val);
/* wait for FIFO RD CMD complete to avoid overflow */
usleep_range(250, 255);
- if (swrm_wait_for_rd_fifo_avail(swrm))
+ if (swrm_wait_for_rd_fifo_avail(ctrl))
return SDW_CMD_FAIL_OTHER;
do {
- swrm->reg_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR, &cmd_data);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_FIFO_ADDR],
+ &cmd_data);
rval[0] = cmd_data & 0xFF;
cmd_id = FIELD_GET(SWRM_RD_FIFO_CMD_ID_MASK, cmd_data);
- if (cmd_id != swrm->rcmd_id) {
+ if (cmd_id != ctrl->rcmd_id) {
if (retry_attempt < (MAX_FIFO_RD_RETRY - 1)) {
/* wait 500 us before retry on fifo read failure */
usleep_range(500, 505);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD,
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD,
SWRM_CMD_FIFO_FLUSH);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_CMD],
+ val);
}
retry_attempt++;
} else {
@@ -418,9 +522,9 @@ static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
} while (retry_attempt < MAX_FIFO_RD_RETRY);
- dev_err(swrm->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
+ dev_err(ctrl->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
dev_num: 0x%x, cmd_data: 0x%x\n",
- reg_addr, swrm->rcmd_id, dev_addr, cmd_data);
+ reg_addr, ctrl->rcmd_id, dev_addr, cmd_data);
return SDW_CMD_IGNORED;
}
@@ -511,10 +615,14 @@ static int qcom_swrm_enumerate(struct sdw_bus *bus)
sdw_extract_slave_id(bus, addr, &id);
found = false;
+ ctrl->clock_stop_not_supported = false;
/* Now compare with entries */
list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
if (sdw_compare_devid(slave, id) == 0) {
qcom_swrm_set_slave_dev_num(bus, slave, i);
+ if (slave->prop.clk_stop_mode1)
+ ctrl->clock_stop_not_supported = true;
+
found = true;
break;
}
@@ -532,39 +640,41 @@ static int qcom_swrm_enumerate(struct sdw_bus *bus)
static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id)
{
- struct qcom_swrm_ctrl *swrm = dev_id;
+ struct qcom_swrm_ctrl *ctrl = dev_id;
int ret;
- ret = pm_runtime_resume_and_get(swrm->dev);
+ ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
- dev_err_ratelimited(swrm->dev,
- "pm_runtime_resume_and_get failed in %s, ret %d\n",
+ dev_err_ratelimited(ctrl->dev,
+ "pm_runtime_get_sync failed in %s, ret %d\n",
__func__, ret);
+ pm_runtime_put_noidle(ctrl->dev);
return ret;
}
- if (swrm->wake_irq > 0) {
- if (!irqd_irq_disabled(irq_get_irq_data(swrm->wake_irq)))
- disable_irq_nosync(swrm->wake_irq);
+ if (ctrl->wake_irq > 0) {
+ if (!irqd_irq_disabled(irq_get_irq_data(ctrl->wake_irq)))
+ disable_irq_nosync(ctrl->wake_irq);
}
- pm_runtime_mark_last_busy(swrm->dev);
- pm_runtime_put_autosuspend(swrm->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
return IRQ_HANDLED;
}
static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
{
- struct qcom_swrm_ctrl *swrm = dev_id;
+ struct qcom_swrm_ctrl *ctrl = dev_id;
u32 value, intr_sts, intr_sts_masked, slave_status;
u32 i;
int devnum;
int ret = IRQ_HANDLED;
- clk_prepare_enable(swrm->hclk);
+ clk_prepare_enable(ctrl->hclk);
- swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
- intr_sts_masked = intr_sts & swrm->intr_mask;
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_STATUS],
+ &intr_sts);
+ intr_sts_masked = intr_sts & ctrl->intr_mask;
do {
for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
@@ -574,80 +684,92 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
switch (value) {
case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
- devnum = qcom_swrm_get_alert_slave_dev_num(swrm);
+ devnum = qcom_swrm_get_alert_slave_dev_num(ctrl);
if (devnum < 0) {
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"no slave alert found.spurious interrupt\n");
} else {
- sdw_handle_slave_status(&swrm->bus, swrm->status);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
}
break;
case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
- dev_dbg_ratelimited(swrm->dev, "SWR new slave attached\n");
- swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
- if (swrm->slave_status == slave_status) {
- dev_dbg(swrm->dev, "Slave status not changed %x\n",
+ dev_dbg_ratelimited(ctrl->dev, "SWR new slave attached\n");
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &slave_status);
+ if (ctrl->slave_status == slave_status) {
+ dev_dbg(ctrl->dev, "Slave status not changed %x\n",
slave_status);
} else {
- qcom_swrm_get_device_status(swrm);
- qcom_swrm_enumerate(&swrm->bus);
- sdw_handle_slave_status(&swrm->bus, swrm->status);
+ qcom_swrm_get_device_status(ctrl);
+ qcom_swrm_enumerate(&ctrl->bus);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
}
break;
case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR bus clsh detected\n",
__func__);
- swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- swrm->reg_write(swrm, SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read FIFO overflow fifo status 0x%x\n",
__func__, value);
break;
case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read FIFO underflow fifo status 0x%x\n",
__func__, value);
break;
case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err(ctrl->dev,
"%s: SWR write FIFO overflow fifo status %x\n",
__func__, value);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
break;
case SWRM_INTERRUPT_STATUS_CMD_ERROR:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
__func__, value);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
break;
case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR Port collision detected\n",
__func__);
- swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
- swrm->reg_write(swrm,
- SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read enable valid mismatch\n",
__func__);
- swrm->intr_mask &=
+ ctrl->intr_mask &=
~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
- swrm->reg_write(swrm,
- SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
- complete(&swrm->broadcast);
+ complete(&ctrl->broadcast);
break;
case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
break;
@@ -656,22 +778,44 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
break;
default:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR unknown interrupt value: %d\n",
__func__, value);
ret = IRQ_NONE;
break;
}
}
- swrm->reg_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
- swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
- intr_sts_masked = intr_sts & swrm->intr_mask;
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CLEAR],
+ intr_sts);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_STATUS],
+ &intr_sts);
+ intr_sts_masked = intr_sts & ctrl->intr_mask;
} while (intr_sts_masked);
- clk_disable_unprepare(swrm->hclk);
+ clk_disable_unprepare(ctrl->hclk);
return ret;
}
+static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *ctrl)
+{
+ int retry = SWRM_LINK_STATUS_RETRY_CNT;
+ int comp_sts;
+
+ do {
+ ctrl->reg_read(ctrl, SWRM_COMP_STATUS, &comp_sts);
+
+ if (comp_sts & SWRM_FRM_GEN_ENABLED)
+ return true;
+
+ usleep_range(500, 510);
+ } while (retry--);
+
+ dev_err(ctrl->dev, "%s: link status not %s\n", __func__,
+ comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
+
+ return false;
+}
+
static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
{
u32 val;
@@ -689,18 +833,23 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
ctrl->intr_mask = SWRM_INTERRUPT_STATUS_RMSK;
/* Mask soundwire interrupts */
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
- SWRM_INTERRUPT_STATUS_RMSK);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ SWRM_INTERRUPT_STATUS_RMSK);
/* Configure No pings */
ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
- if (ctrl->version >= SWRM_VERSION_1_7_0) {
+ if (ctrl->version == SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
+ } else if (ctrl->version >= SWRM_VERSION_2_0_0) {
+ ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
+ ctrl->reg_write(ctrl, SWRM_V2_0_CLK_CTRL,
+ SWRM_V2_0_CLK_CTRL_CLK_START);
} else {
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
}
@@ -715,16 +864,28 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
SWRM_RD_WR_CMD_RETRIES);
}
+ /* COMP Enable */
+ ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR, SWRM_COMP_CFG_ENABLE_MSK);
+
/* Set IRQ to PULSE */
ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
- SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
- SWRM_COMP_CFG_ENABLE_MSK);
+ SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK);
+
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CLEAR],
+ 0xFFFFFFFF);
/* enable CPU IRQs */
if (ctrl->mmio) {
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN,
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
SWRM_INTERRUPT_STATUS_RMSK);
}
+
+ /* Set IRQ to PULSE */
+ ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
+ SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
+ SWRM_COMP_CFG_ENABLE_MSK);
+
+ swrm_wait_for_frame_gen_enabled(ctrl);
ctrl->slave_status = 0;
ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
ctrl->rd_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_RD_FIFO_DEPTH, val);
@@ -806,12 +967,20 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus,
value = pcfg->off1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
value |= pcfg->off2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
- value |= pcfg->si;
+ value |= pcfg->si & 0xff;
ret = ctrl->reg_write(ctrl, reg, value);
if (ret)
goto err;
+ if (pcfg->si > 0xff) {
+ value = (pcfg->si >> 8) & 0xff;
+ reg = SWRM_DP_SAMPLECTRL2_BANK(params->port_num, bank);
+ ret = ctrl->reg_write(ctrl, reg, value);
+ if (ret)
+ goto err;
+ }
+
if (pcfg->lane_control != SWR_INVALID_PARAM) {
reg = SWRM_DP_PORT_CTRL_2_BANK(params->port_num, bank);
value = pcfg->lane_control;
@@ -1090,11 +1259,12 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int ret, i;
- ret = pm_runtime_resume_and_get(ctrl->dev);
+ ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(ctrl->dev,
- "pm_runtime_resume_and_get failed in %s, ret %d\n",
+ "pm_runtime_get_sync failed in %s, ret %d\n",
__func__, ret);
+ pm_runtime_put_noidle(ctrl->dev);
return ret;
}
@@ -1132,6 +1302,7 @@ static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ swrm_wait_for_wr_fifo_done(ctrl);
sdw_release_stream(ctrl->sruntime[dai->id]);
ctrl->sruntime[dai->id] = NULL;
pm_runtime_mark_last_busy(ctrl->dev);
@@ -1194,7 +1365,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
struct device_node *np = ctrl->dev->of_node;
u8 off1[QCOM_SDW_MAX_PORTS];
u8 off2[QCOM_SDW_MAX_PORTS];
- u8 si[QCOM_SDW_MAX_PORTS];
+ u16 si[QCOM_SDW_MAX_PORTS];
u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, };
u8 hstart[QCOM_SDW_MAX_PORTS];
u8 hstop[QCOM_SDW_MAX_PORTS];
@@ -1202,6 +1373,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
u8 blk_group_count[QCOM_SDW_MAX_PORTS];
u8 lane_control[QCOM_SDW_MAX_PORTS];
int i, ret, nports, val;
+ bool si_16 = false;
ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
@@ -1245,9 +1417,14 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
return ret;
ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low",
- si, nports);
- if (ret)
- return ret;
+ (u8 *)si, nports);
+ if (ret) {
+ ret = of_property_read_u16_array(np, "qcom,ports-sinterval",
+ si, nports);
+ if (ret)
+ return ret;
+ si_16 = true;
+ }
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
@@ -1275,7 +1452,10 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
for (i = 0; i < nports; i++) {
/* Valid port number range is from 1-14 */
- ctrl->pconfig[i + 1].si = si[i];
+ if (si_16)
+ ctrl->pconfig[i + 1].si = si[i];
+ else
+ ctrl->pconfig[i + 1].si = ((u8 *)si)[i];
ctrl->pconfig[i + 1].off1 = off1[i];
ctrl->pconfig[i + 1].off2 = off2[i];
ctrl->pconfig[i + 1].bp_mode = bp_mode[i];
@@ -1292,23 +1472,24 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
#ifdef CONFIG_DEBUG_FS
static int swrm_reg_show(struct seq_file *s_file, void *data)
{
- struct qcom_swrm_ctrl *swrm = s_file->private;
+ struct qcom_swrm_ctrl *ctrl = s_file->private;
int reg, reg_val, ret;
- ret = pm_runtime_resume_and_get(swrm->dev);
+ ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
- dev_err_ratelimited(swrm->dev,
- "pm_runtime_resume_and_get failed in %s, ret %d\n",
+ dev_err_ratelimited(ctrl->dev,
+ "pm_runtime_get_sync failed in %s, ret %d\n",
__func__, ret);
+ pm_runtime_put_noidle(ctrl->dev);
return ret;
}
- for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) {
- swrm->reg_read(swrm, reg, &reg_val);
+ for (reg = 0; reg <= ctrl->max_reg; reg += 4) {
+ ctrl->reg_read(ctrl, reg, &reg_val);
seq_printf(s_file, "0x%.3x: 0x%.2x\n", reg, reg_val);
}
- pm_runtime_mark_last_busy(swrm->dev);
- pm_runtime_put_autosuspend(swrm->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
return 0;
@@ -1331,6 +1512,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
return -ENOMEM;
data = of_device_get_match_data(dev);
+ ctrl->max_reg = data->max_reg;
+ ctrl->reg_layout = data->reg_layout;
ctrl->rows_index = sdw_find_row_index(data->default_rows);
ctrl->cols_index = sdw_find_col_index(data->default_cols);
#if IS_REACHABLE(CONFIG_SLIMBUS)
@@ -1454,15 +1637,6 @@ static int qcom_swrm_probe(struct platform_device *pdev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- /* Clk stop is not supported on WSA Soundwire masters */
- if (ctrl->version <= SWRM_VERSION_1_3_0) {
- ctrl->clock_stop_not_supported = true;
- } else {
- ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val);
- if (val == MASTER_ID_WSA)
- ctrl->clock_stop_not_supported = true;
- }
-
#ifdef CONFIG_DEBUG_FS
ctrl->debugfs = debugfs_create_dir("qualcomm-sdw", ctrl->bus.debugfs);
debugfs_create_file("qualcomm-registers", 0400, ctrl->debugfs, ctrl,
@@ -1489,26 +1663,6 @@ static int qcom_swrm_remove(struct platform_device *pdev)
return 0;
}
-static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
-{
- int retry = SWRM_LINK_STATUS_RETRY_CNT;
- int comp_sts;
-
- do {
- swrm->reg_read(swrm, SWRM_COMP_STATUS, &comp_sts);
-
- if (comp_sts & SWRM_FRM_GEN_ENABLED)
- return true;
-
- usleep_range(500, 510);
- } while (retry--);
-
- dev_err(swrm->dev, "%s: link status not %s\n", __func__,
- comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
-
- return false;
-}
-
static int __maybe_unused swrm_runtime_resume(struct device *dev)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dev);
@@ -1540,19 +1694,27 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev)
} else {
reset_control_reset(ctrl->audio_cgcr);
- if (ctrl->version >= SWRM_VERSION_1_7_0) {
+ if (ctrl->version == SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
+ } else if (ctrl->version >= SWRM_VERSION_2_0_0) {
+ ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
+ ctrl->reg_write(ctrl, SWRM_V2_0_CLK_CTRL,
+ SWRM_V2_0_CLK_CTRL_CLK_START);
} else {
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
}
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR,
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CLEAR],
SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET);
ctrl->intr_mask |= SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR, ctrl->intr_mask);
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN, ctrl->intr_mask);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ ctrl->intr_mask);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
usleep_range(100, 105);
if (!swrm_wait_for_frame_gen_enabled(ctrl))
@@ -1571,11 +1733,16 @@ static int __maybe_unused swrm_runtime_suspend(struct device *dev)
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dev);
int ret;
+ swrm_wait_for_wr_fifo_done(ctrl);
if (!ctrl->clock_stop_not_supported) {
/* Mask bus clash interrupt */
ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR, ctrl->intr_mask);
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN, ctrl->intr_mask);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ ctrl->intr_mask);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
/* Prepare slaves for clock stop */
ret = sdw_bus_prep_clk_stop(&ctrl->bus);
if (ret < 0 && ret != -ENODATA) {
@@ -1611,6 +1778,7 @@ static const struct of_device_id qcom_swrm_of_match[] = {
{ .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
{ .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_6_data },
{ .compatible = "qcom,soundwire-v1.7.0", .data = &swrm_v1_5_data },
+ { .compatible = "qcom,soundwire-v2.0.0", .data = &swrm_v2_0_data },
{/* sentinel */},
};
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 379228f22186..d77a8a0d42c8 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1150,7 +1150,8 @@ static struct sdw_master_runtime
*sdw_master_rt_alloc(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt;
+ struct sdw_master_runtime *m_rt, *walk_m_rt;
+ struct list_head *insert_after;
m_rt = kzalloc(sizeof(*m_rt), GFP_KERNEL);
if (!m_rt)
@@ -1159,7 +1160,20 @@ static struct sdw_master_runtime
/* Initialization of Master runtime handle */
INIT_LIST_HEAD(&m_rt->port_list);
INIT_LIST_HEAD(&m_rt->slave_rt_list);
- list_add_tail(&m_rt->stream_node, &stream->master_list);
+
+ /*
+ * Add in order of bus id so that when taking the bus_lock
+ * of multiple buses they will always be taken in the same
+ * order to prevent a mutex deadlock.
+ */
+ insert_after = &stream->master_list;
+ list_for_each_entry_reverse(walk_m_rt, &stream->master_list, stream_node) {
+ if (walk_m_rt->bus->id < bus->id) {
+ insert_after = &walk_m_rt->stream_node;
+ break;
+ }
+ }
+ list_add(&m_rt->stream_node, insert_after);
list_add_tail(&m_rt->bus_node, &bus->m_rt_list);
@@ -1338,7 +1352,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
bool update_params)
{
struct sdw_master_runtime *m_rt;
- struct sdw_bus *bus = NULL;
+ struct sdw_bus *bus;
struct sdw_master_prop *prop;
struct sdw_bus_params params;
int ret;
@@ -1355,25 +1369,23 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
return -EINVAL;
}
- if (!update_params)
- goto program_params;
-
- /* Increment cumulative bus bandwidth */
- /* TODO: Update this during Device-Device support */
- bus->params.bandwidth += m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ if (update_params) {
+ /* Increment cumulative bus bandwidth */
+ /* TODO: Update this during Device-Device support */
+ bus->params.bandwidth += m_rt->stream->params.rate *
+ m_rt->ch_count * m_rt->stream->params.bps;
- /* Compute params */
- if (bus->compute_params) {
- ret = bus->compute_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Compute params failed: %d\n",
- ret);
- goto restore_params;
+ /* Compute params */
+ if (bus->compute_params) {
+ ret = bus->compute_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Compute params failed: %d\n",
+ ret);
+ goto restore_params;
+ }
}
}
-program_params:
/* Program params */
ret = sdw_program_params(bus, true);
if (ret < 0) {
@@ -1382,11 +1394,6 @@ program_params:
}
}
- if (!bus) {
- pr_err("Configuration error in %s\n", __func__);
- return -EINVAL;
- }
-
ret = do_bank_switch(stream);
if (ret < 0) {
pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
@@ -1467,7 +1474,7 @@ EXPORT_SYMBOL(sdw_prepare_stream);
static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
- struct sdw_bus *bus = NULL;
+ struct sdw_bus *bus;
int ret;
/* Enable Master(s) and Slave(s) port(s) associated with stream */
@@ -1490,11 +1497,6 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
}
}
- if (!bus) {
- pr_err("Configuration error in %s\n", __func__);
- return -EINVAL;
- }
-
ret = do_bank_switch(stream);
if (ret < 0) {
pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
@@ -1864,7 +1866,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
- bool alloc_master_rt = true;
+ bool alloc_master_rt = false;
int ret;
mutex_lock(&bus->bus_lock);
@@ -1886,30 +1888,25 @@ int sdw_stream_add_master(struct sdw_bus *bus,
* it first), if so skip allocation and go to configuration
*/
m_rt = sdw_master_rt_find(bus, stream);
- if (m_rt) {
- alloc_master_rt = false;
- goto skip_alloc_master_rt;
- }
-
- m_rt = sdw_master_rt_alloc(bus, stream);
if (!m_rt) {
- dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s\n",
- __func__, stream->name);
- ret = -ENOMEM;
- goto unlock;
- }
-skip_alloc_master_rt:
-
- if (sdw_master_port_allocated(m_rt))
- goto skip_alloc_master_port;
+ m_rt = sdw_master_rt_alloc(bus, stream);
+ if (!m_rt) {
+ dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s\n",
+ __func__, stream->name);
+ ret = -ENOMEM;
+ goto unlock;
+ }
- ret = sdw_master_port_alloc(m_rt, num_ports);
- if (ret)
- goto alloc_error;
+ alloc_master_rt = true;
+ }
- stream->m_rt_count++;
+ if (!sdw_master_port_allocated(m_rt)) {
+ ret = sdw_master_port_alloc(m_rt, num_ports);
+ if (ret)
+ goto alloc_error;
-skip_alloc_master_port:
+ stream->m_rt_count++;
+ }
ret = sdw_master_rt_config(m_rt, stream_config);
if (ret < 0)
@@ -1990,8 +1987,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
{
struct sdw_slave_runtime *s_rt;
struct sdw_master_runtime *m_rt;
- bool alloc_master_rt = true;
- bool alloc_slave_rt = true;
+ bool alloc_master_rt = false;
+ bool alloc_slave_rt = false;
int ret;
@@ -2002,47 +1999,41 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
* and go to configuration
*/
m_rt = sdw_master_rt_find(slave->bus, stream);
- if (m_rt) {
- alloc_master_rt = false;
- goto skip_alloc_master_rt;
- }
-
- /*
- * If this API is invoked by Slave first then m_rt is not valid.
- * So, allocate m_rt and add Slave to it.
- */
- m_rt = sdw_master_rt_alloc(slave->bus, stream);
if (!m_rt) {
- dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s\n",
- __func__, stream->name);
- ret = -ENOMEM;
- goto unlock;
- }
+ /*
+ * If this API is invoked by Slave first then m_rt is not valid.
+ * So, allocate m_rt and add Slave to it.
+ */
+ m_rt = sdw_master_rt_alloc(slave->bus, stream);
+ if (!m_rt) {
+ dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s\n",
+ __func__, stream->name);
+ ret = -ENOMEM;
+ goto unlock;
+ }
-skip_alloc_master_rt:
- s_rt = sdw_slave_rt_find(slave, stream);
- if (s_rt) {
- alloc_slave_rt = false;
- goto skip_alloc_slave_rt;
+ alloc_master_rt = true;
}
- s_rt = sdw_slave_rt_alloc(slave, m_rt);
+ s_rt = sdw_slave_rt_find(slave, stream);
if (!s_rt) {
- dev_err(&slave->dev, "Slave runtime alloc failed for stream:%s\n", stream->name);
- alloc_slave_rt = false;
- ret = -ENOMEM;
- goto alloc_error;
- }
+ s_rt = sdw_slave_rt_alloc(slave, m_rt);
+ if (!s_rt) {
+ dev_err(&slave->dev, "Slave runtime alloc failed for stream:%s\n",
+ stream->name);
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
-skip_alloc_slave_rt:
- if (sdw_slave_port_allocated(s_rt))
- goto skip_port_alloc;
+ alloc_slave_rt = true;
+ }
- ret = sdw_slave_port_alloc(slave, s_rt, num_ports);
- if (ret)
- goto alloc_error;
+ if (!sdw_slave_port_allocated(s_rt)) {
+ ret = sdw_slave_port_alloc(slave, s_rt, num_ports);
+ if (ret)
+ goto alloc_error;
+ }
-skip_port_alloc:
ret = sdw_master_rt_config(m_rt, stream_config);
if (ret)
goto unlock;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index abbd1fb5fbc0..8962b2557615 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -826,7 +826,7 @@ config SPI_RSPI
SPI driver for Renesas RSPI and QSPI blocks.
config SPI_RZV2M_CSI
- tristate "Renesas RZV2M CSI controller"
+ tristate "Renesas RZ/V2M CSI controller"
depends on ARCH_RENESAS || COMPILE_TEST
help
SPI driver for Renesas RZ/V2M Clocked Serial Interface (CSI)
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6b46a3b67c41..d91dfbe47aa5 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1543,13 +1543,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mspi");
- if (res) {
- qspi->base[MSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[MSPI]))
- return PTR_ERR(qspi->base[MSPI]);
- } else {
- return 0;
- }
+ qspi->base[MSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[MSPI]))
+ return PTR_ERR(qspi->base[MSPI]);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index ee2528dad02d..9e218e143263 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -2,7 +2,7 @@
* Broadcom BCM63XX High Speed SPI Controller driver
*
* Copyright 2000-2010 Broadcom Corporation
- * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright 2012-2013 Jonas Gorski <jonas.gorski@gmail.com>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
index 8cbd01619789..ca1b4741e9f4 100644
--- a/drivers/spi/spi-bcmbca-hsspi.c
+++ b/drivers/spi/spi-bcmbca-hsspi.c
@@ -3,7 +3,7 @@
* Broadcom BCMBCA High Speed SPI Controller driver
*
* Copyright 2000-2010 Broadcom Corporation
- * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright 2012-2013 Jonas Gorski <jonas.gorski@gmail.com>
* Copyright 2019-2022 Broadcom Ltd
*/
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 26ce959d98df..1df9d4844a68 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1097,6 +1097,12 @@ static int spi_geni_probe(struct platform_device *pdev)
if (mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
+ /*
+ * TX is required per GSI spec, see setup_gsi_xfer().
+ */
+ if (mas->cur_xfer_mode == GENI_GPI_DMA)
+ spi->flags = SPI_CONTROLLER_MUST_TX;
+
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_release_dma;
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 7a21f2423204..98db47cb4fa4 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -906,9 +906,6 @@ static int axis_fifo_probe(struct platform_device *pdev)
if (rc < 0)
goto err_initial;
- dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i\n",
- &r_mem->start, &fifo->base_addr, fifo->irq);
-
return 0;
err_initial:
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 7e3d1a6f30ba..6c1f91c859ca 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -138,7 +138,7 @@ static struct i2c_driver adt7316_driver = {
.of_match_table = adt7316_of_match,
.pm = ADT7316_PM_OPS,
},
- .probe_new = adt7316_i2c_probe,
+ .probe = adt7316_i2c_probe,
.id_table = adt7316_i2c_id,
};
module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index b3152f7153fb..46db6d91542a 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -781,7 +781,7 @@ static struct i2c_driver ad5933_driver = {
.name = "ad5933",
.of_match_table = ad5933_of_match,
},
- .probe_new = ad5933_probe,
+ .probe = ad5933_probe,
.id_table = ad5933_id,
};
module_i2c_driver(ad5933_driver);
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 532e12ed72e6..38b370124109 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -16,6 +16,7 @@ atomisp-objs += \
pci/atomisp_cmd.o \
pci/atomisp_compat_css20.o \
pci/atomisp_csi2.o \
+ pci/atomisp_csi2_bridge.o \
pci/atomisp_drvfs.o \
pci/atomisp_fops.o \
pci/atomisp_ioctl.o \
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 43b842043f29..ecf8ba67b7af 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -1,213 +1,113 @@
-For both Cherrytrail (CHT) and Baytrail (BHT) the driver
-requires the "candrpv_0415_20150521_0458" firmware version.
-It should be noticed that the firmware file is different,
-depending on the ISP model, so they're stored with different
-names:
+Required firmware
+=================
-- for BHT: /lib/firmware/shisp_2400b0_v21.bin
+The atomisp driver requires the following firmware:
- Warning: The driver was not tested yet for BHT.
+- for BYT: /lib/firmware/shisp_2400b0_v21.bin
-- for CHT: /lib/firmware/shisp_2401a0_v21.bin
-
- https://github.com/intel-aero/meta-intel-aero-base/blob/master/recipes-kernel/linux/linux-yocto/shisp_2401a0_v21.bin
-
-NOTE:
-=====
-
-This driver currently doesn't work with most V4L2 applications,
-as there are still some issues with regards to implementing
-certain APIs at the standard way.
-
-Also, currently only USERPTR streaming mode is working.
-
-In order to test, it is needed to know what's the sensor's
-resolution. This can be checked with:
-
-$ v4l2-ctl --get-fmt-video
- Format Video Capture:
- Width/Height : 1600/1200
- ...
+ With a version of "irci_stable_candrpv_0415_20150423_1753" to check
+ the version run: "strings shisp_2400b0_v21.bin | head -n1", sha256sum:
-It is known to work with:
+ 3847b95fb9f1f8352c595ba7394d55b33176751372baae17f89aa483ec02a21b shisp_2400b0_v21.bin
-- v4l2grab at contrib/test directory at https://git.linuxtv.org/v4l-utils.git/
+ The shisp_2400b0_v21.bin file with this version can be found on
+ the Android factory images of various X86 Android tablets such as
+ e.g. the Chuwi Hi8 Pro.
- The resolution should not be bigger than the max resolution
- supported by the sensor, or it will fail. So, if the sensor
- reports:
-
- The driver can be tested with:
-
- v4l2grab -f YUYV -x 1600 -y 1200 -d /dev/video2 -u
+- for CHT: /lib/firmware/shisp_2401a0_v21.bin
-- NVT at https://github.com/intel/nvt
+ With a version of "irci_stable_candrpv_0415_20150521_0458", sha256sum:
- $ ./v4l2n -o testimage_@.raw \
- --device /dev/video2 \
- --input 0 \
- --exposure=30000,30000,30000,30000 \
- --parm type=1,capturemode=CI_MODE_PREVIEW \
- --fmt type=1,width=1600,height=1200,pixelformat=YUYV \
- --reqbufs count=2,memory=USERPTR \
- --parameters=wb_config.r=32768,wb_config.gr=21043,wb_config.gb=21043,wb_config.b=30863 \
- --capture=20
+ e89359f4e4934c410c83d525e283f34c5fcce9cb5caa75ad8a32d66d3842d95c shisp_2401a0_v21.bin
- As the output is in raw format, images need to be converted with:
+ This can be found here:
+ https://github.com/intel-aero/meta-intel-aero-base/blob/master/recipes-kernel/linux/linux-yocto/shisp_2401a0_v21.bin
- $ for i in $(seq 0 19); do
- name="testimage_$(printf "%03i" $i)"
- ./raw2pnm -x$WIDTH -y$HEIGHT -f$FORMAT $name.raw $name.pnm
- rm $name.raw
- done
TODO
====
-1. Fix support for MMAP streaming mode. This is required for most
- V4L2 applications;
-
-2. Implement and/or fix V4L2 ioctls in order to allow a normal app to
- use it;
-
-3. Ensure that the driver will pass v4l2-compliance tests;
-
-4. Get manufacturer's authorization to redistribute the binaries for
- the firmware files;
-
-5. remove VIDEO_ATOMISP_ISP2401, making the driver to auto-detect the
- register address differences between ISP2400 and ISP2401;
-
-6. Cleanup the driver code, removing the abstraction layers inside it;
-
-7. The atomisp doesn't rely at the usual i2c stuff to discover the
- sensors. Instead, it calls a function from atomisp_gmin_platform.c.
- There are some hacks added there for it to wait for sensors to be
- probed (with a timeout of 2 seconds or so). This should be converted
- to the usual way, using V4L2 async subdev framework to wait for
- cameras to be probed;
-
-8. Switch to standard V4L2 sub-device API for sensor and lens. In
- particular, the user space API needs to support V4L2 controls as
- defined in the V4L2 spec and references to atomisp must be removed from
- these drivers.
-
-9. Use LED flash API for flash LED drivers such as LM3554 (which already
- has a LED class driver).
-
-10. Migrate the sensor drivers out of staging or re-using existing
- drivers;
-
-11. Switch the driver to use pm_runtime stuff. Right now, it probes the
- existing PMIC code and sensors call it directly.
-
-12. There's a problem on sensor drivers: when trying to set a video
- format, the atomisp main driver calls the sensor drivers with the
- sensor turned off. This causes them to fail.
-
- This was fixed at atomisp-ov2880, which has a hack inside it
- to turn it on when VIDIOC_S_FMT is called, but this has to be
- cheked on other drivers as well.
-
- The right fix seems to power on the sensor when a video device is
- opened (or at the first VIDIOC_ ioctl - except for VIDIOC_QUERYCAP),
- powering it down at close() syscall.
-
- Such kind of control would need to be done inside the atomisp driver,
- not at the sensors code.
-
-13. There are several issues related to memory management, that can
- cause crashes and/or memory leaks. The atomisp splits the memory
- management on three separate regions:
-
- - dynamic pool;
- - reserved pool;
- - generic pool
-
- The code implementing it is at:
-
- drivers/staging/media/atomisp/pci/hmm/
+1. Items which MUST be fixed before the driver can be moved out of staging:
- It also has a separate code for managing DMA buffers at:
+* The atomisp ov2680 and ov5693 sensor drivers bind to the same hw-ids as
+ the standard ov2680 and ov5693 drivers under drivers/media/i2c, which
+ conflicts. Drop the atomisp private ov2680 and ov5693 drivers:
+ * Port various ov2680 improvements from atomisp_ov2680.c to regular ov2680.c
+ and switch to regular ov2680 driver
+ * Make atomisp work with the regular ov5693 driver and drop atomisp_ov5693
- drivers/staging/media/atomisp/pci/mmu/
+* Fix atomisp causing the whole machine to hang in its probe() error-exit
+ path taken in the firmware missing case
- The code there is really dirty, ugly and probably wrong. I fixed
- one bug there already, but the best would be to just trash it and use
- something else. Maybe the code from the newer intel driver could
- serve as a model:
+* Remove/disable private IOCTLs
- drivers/staging/media/ipu3/ipu3-mmu.c
+* Remove/disable custom v4l2-ctrls
- But converting it to use something like that is painful and may
- cause some breakages.
+* Remove custom sysfs files created by atomisp_drvfs.c
-14. The file structure needs to get tidied up to resemble a normal Linux
- driver.
+* Remove abuse of priv field in various v4l2 userspace API structs
-15. Lots of the midlayer glue. Unused code and abstraction needs removing.
+* Without a 3A library the capture behaviour is not very good. To take a good
+ picture, the exposure/gain needs to be tuned using v4l2-ctl on the sensor
+ subdev. To fix this, support for the atomisp needs to be added to libcamera.
-16. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX)
- and controls that require some cleanup. Some of those code may have
- been removed during the cleanups. They could be needed in order to
- properly support 3A algorithms.
+ This MUST be done before moving the driver out of staging so that we can
+ still make changes to e.g. the mediactl topology if necessary for
+ libcamera integration. Since this would be a userspace API break, this
+ means that at least proof-of-concept libcamera integration needs to be
+ ready before moving the driver out of staging.
- Such IOCTL interface needs more documentation. The better would
- be to use something close to the interface used by the IPU3 IMGU driver.
-17. The ISP code has some dependencies of the exact FW version.
- The version defined in pci/sh_css_firmware.c:
+2. Items which SHOULD also be fixed eventually:
- BYT (isp2400): "irci_stable_candrpv_0415_20150521_0458"
+* Remove VIDEO_ATOMISP_ISP2401, making the driver to auto-detect the
+ register address differences between ISP2400 and ISP2401
- CHT (isp2401): "irci_ecr - master_20150911_0724"
+* The driver is intended to drive the PCI exposed versions of the device.
+ It will not detect those devices enumerated via ACPI as a field of the
+ i915 GPU driver (only a problem on BYT).
- Those versions don't seem to be available anymore. On the tests we've
- done so far, this version also seems to work for CHT:
+ There are some patches adding i915 GPU support floating at the Yocto's
+ Aero repository (so far, untested upstream).
- "irci_stable_candrpv_0415_20150521_0458"
+* Ensure that the driver will pass v4l2-compliance tests
- Which can be obtainable from Yocto Atom ISP respository.
+* Fix not all v4l2 apps working, e.g. cheese does not work
- but this was not thoroughly tested.
+* Get manufacturer's authorization to redistribute the binaries for
+ the firmware files
- At some point we may need to round up a few driver versions and see if
- there are any specific things that can be done to fold in support for
- multiple firmware versions.
+* The atomisp code still has a lot of cruft which needs cleaning up
-18. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+Testing
+=======
-19. Correct Coding Style. Please refrain sending coding style patches
- for this driver until the other work is done, as there will be a lot
- of code churn until this driver becomes functional again.
+Since libcamera support is not available yet, the easiest way to test for
+now is using v4l2-ctl to select the input and gstreamer for streaming.
-20. Remove the logic which sets up pipelines inside it, moving it to
- libcamera and implement MC support.
+To select the input run:
+v4l2-ctl -i <input>
-Limitations
-===========
+Where <input> is 0 (front cam) or 1 (back cam).
-1. To test the patches, you also need the ISP firmware
+The simplest gstreamer pipeline for testing running the sensor
+at its max resolution is:
- for BYT: /lib/firmware/shisp_2400b0_v21.bin
- for CHT: /lib/firmware/shisp_2401a0_v21.bin
+gst-launch-1.0 v4l2src ! videoconvert ! xvimagesink sync=false
- The firmware files will usually be found in /etc/firmware on an Android
- device but can also be extracted from the upgrade kit if you've managed
- to lose them somehow.
+To select e.g 640x480 as resolution use:
-2. Without a 3A library the capture behaviour is not very good. To take a good
- picture, you need tune ISP parameters by IOCTL functions or use a 3A library
- such as libxcam.
+gst-launch-1.0 v4l2src ! video/x-raw,format=YV12,width=640,height=480 ! \
+ videoconvert ! xvimagesink sync=false
-3. The driver is intended to drive the PCI exposed versions of the device.
- It will not detect those devices enumerated via ACPI as a field of the
- i915 GPU driver.
+And to show fps use:
- There are some patches adding i915 GPU support floating at the Yocto's
- Aero repository (so far, untested upstream).
+gst-launch-1.0 v4l2src ! video/x-raw,format=YV12,width=640,height=480 ! \
+ videoconvert ! fpsdisplaysink video-sink=xvimagesink sync=false
-4. The driver supports only v2 of the IPU/Camera. It will not work with the
- versions of the hardware in other SoCs.
+Often the image will be over / under exposed. This can be fixed by using
+v4l2-ctl on the sensor subdev to tweak the exposure ctrl; or by using a GUI
+app for v4l2-controls which also supports subdev such as the Fedora patched
+gtk-v4l tool.
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 273155308fe3..9a11793f34f7 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -16,27 +16,259 @@
*
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/slab.h>
+#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
#include <linux/i2c.h>
-#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <linux/io.h>
-#include "../include/linux/atomisp_gmin_platform.h"
-#include "gc0310.h"
+#define GC0310_NATIVE_WIDTH 656
+#define GC0310_NATIVE_HEIGHT 496
+
+#define GC0310_FPS 30
+#define GC0310_SKIP_FRAMES 3
+
+#define GC0310_FOCAL_LENGTH_NUM 278 /* 2.78mm */
+
+#define GC0310_ID 0xa310
+
+#define GC0310_RESET_RELATED 0xFE
+#define GC0310_REGISTER_PAGE_0 0x0
+#define GC0310_REGISTER_PAGE_3 0x3
+
+/*
+ * GC0310 System control registers
+ */
+#define GC0310_SW_STREAM 0x10
+
+#define GC0310_SC_CMMN_CHIP_ID_H 0xf0
+#define GC0310_SC_CMMN_CHIP_ID_L 0xf1
+
+#define GC0310_AEC_PK_EXPO_H 0x03
+#define GC0310_AEC_PK_EXPO_L 0x04
+#define GC0310_AGC_ADJ 0x48
+#define GC0310_DGC_ADJ 0x71
+#define GC0310_GROUP_ACCESS 0x3208
+
+#define GC0310_H_CROP_START_H 0x09
+#define GC0310_H_CROP_START_L 0x0A
+#define GC0310_V_CROP_START_H 0x0B
+#define GC0310_V_CROP_START_L 0x0C
+#define GC0310_H_OUTSIZE_H 0x0F
+#define GC0310_H_OUTSIZE_L 0x10
+#define GC0310_V_OUTSIZE_H 0x0D
+#define GC0310_V_OUTSIZE_L 0x0E
+#define GC0310_H_BLANKING_H 0x05
+#define GC0310_H_BLANKING_L 0x06
+#define GC0310_V_BLANKING_H 0x07
+#define GC0310_V_BLANKING_L 0x08
+#define GC0310_SH_DELAY 0x11
+
+#define GC0310_START_STREAMING 0x94 /* 8-bit enable */
+#define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */
+
+#define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd)
+
+struct gc0310_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ /* Protect against concurrent changes to controls */
+ struct mutex input_lock;
+ bool is_streaming;
+
+ struct fwnode_handle *ep_fwnode;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerdown;
+
+ struct gc0310_mode {
+ struct v4l2_mbus_framefmt fmt;
+ } mode;
+
+ struct gc0310_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ } ctrls;
+};
+
+struct gc0310_reg {
+ u8 reg;
+ u8 val;
+};
+
+static const struct gc0310_reg gc0310_reset_register[] = {
+ /* System registers */
+ { 0xfe, 0xf0 },
+ { 0xfe, 0xf0 },
+ { 0xfe, 0x00 },
+
+ { 0xfc, 0x0e }, /* 4e */
+ { 0xfc, 0x0e }, /* 16//4e // [0]apwd [6]regf_clk_gate */
+ { 0xf2, 0x80 }, /* sync output */
+ { 0xf3, 0x00 }, /* 1f//01 data output */
+ { 0xf7, 0x33 }, /* f9 */
+ { 0xf8, 0x05 }, /* 00 */
+ { 0xf9, 0x0e }, /* 0x8e //0f */
+ { 0xfa, 0x11 },
+
+ /* MIPI */
+ { 0xfe, 0x03 },
+ { 0x01, 0x03 }, /* mipi 1lane */
+ { 0x02, 0x22 }, /* 0x33 */
+ { 0x03, 0x94 },
+ { 0x04, 0x01 }, /* fifo_prog */
+ { 0x05, 0x00 }, /* fifo_prog */
+ { 0x06, 0x80 }, /* b0 //YUV ISP data */
+ { 0x11, 0x2a }, /* 1e //LDI set YUV422 */
+ { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */
+ { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */
+ { 0x15, 0x12 }, /* 0x10 //DPHYY_MODE read_ready */
+ { 0x17, 0x01 },
+ { 0x40, 0x08 },
+ { 0x41, 0x00 },
+ { 0x42, 0x00 },
+ { 0x43, 0x00 },
+ { 0x21, 0x02 }, /* 0x01 */
+ { 0x22, 0x02 }, /* 0x01 */
+ { 0x23, 0x01 }, /* 0x05 //Nor:0x05 DOU:0x06 */
+ { 0x29, 0x00 },
+ { 0x2A, 0x25 }, /* 0x05 //data zero 0x7a de */
+ { 0x2B, 0x02 },
+
+ { 0xfe, 0x00 },
+
+ /* CISCTL */
+ { 0x00, 0x2f }, /* 2f//0f//02//01 */
+ { 0x01, 0x0f }, /* 06 */
+ { 0x02, 0x04 },
+ { 0x4f, 0x00 }, /* AEC 0FF */
+ { 0x03, 0x01 }, /* 0x03 //04 */
+ { 0x04, 0xc0 }, /* 0xe8 //58 */
+ { 0x05, 0x00 },
+ { 0x06, 0xb2 }, /* 0x0a //HB */
+ { 0x07, 0x00 },
+ { 0x08, 0x0c }, /* 0x89 //VB */
+ { 0x09, 0x00 }, /* row start */
+ { 0x0a, 0x00 },
+ { 0x0b, 0x00 }, /* col start */
+ { 0x0c, 0x00 },
+ { 0x0d, 0x01 }, /* height */
+ { 0x0e, 0xf2 }, /* 0xf7 //height */
+ { 0x0f, 0x02 }, /* width */
+ { 0x10, 0x94 }, /* 0xa0 //height */
+ { 0x17, 0x14 },
+ { 0x18, 0x1a }, /* 0a//[4]double reset */
+ { 0x19, 0x14 }, /* AD pipeline */
+ { 0x1b, 0x48 },
+ { 0x1e, 0x6b }, /* 3b//col bias */
+ { 0x1f, 0x28 }, /* 20//00//08//txlow */
+ { 0x20, 0x89 }, /* 88//0c//[3:2]DA15 */
+ { 0x21, 0x49 }, /* 48//[3] txhigh */
+ { 0x22, 0xb0 },
+ { 0x23, 0x04 }, /* [1:0]vcm_r */
+ { 0x24, 0x16 }, /* 15 */
+ { 0x34, 0x20 }, /* [6:4] rsg high//range */
+
+ /* BLK */
+ { 0x26, 0x23 }, /* [1]dark_current_en [0]offset_en */
+ { 0x28, 0xff }, /* BLK_limie_value */
+ { 0x29, 0x00 }, /* global offset */
+ { 0x33, 0x18 }, /* offset_ratio */
+ { 0x37, 0x20 }, /* dark_current_ratio */
+ { 0x2a, 0x00 },
+ { 0x2b, 0x00 },
+ { 0x2c, 0x00 },
+ { 0x2d, 0x00 },
+ { 0x2e, 0x00 },
+ { 0x2f, 0x00 },
+ { 0x30, 0x00 },
+ { 0x31, 0x00 },
+ { 0x47, 0x80 }, /* a7 */
+ { 0x4e, 0x66 }, /* select_row */
+ { 0xa8, 0x02 }, /* win_width_dark, same with crop_win_width */
+ { 0xa9, 0x80 },
+
+ /* ISP */
+ { 0x40, 0x06 }, /* 0xff //ff //48 */
+ { 0x41, 0x00 }, /* 0x21 //00//[0]curve_en */
+ { 0x42, 0x04 }, /* 0xcf //0a//[1]awn_en */
+ { 0x44, 0x18 }, /* 0x18 //02 */
+ { 0x46, 0x02 }, /* 0x03 //sync */
+ { 0x49, 0x03 },
+ { 0x4c, 0x20 }, /* 00[5]pretect exp */
+ { 0x50, 0x01 }, /* crop enable */
+ { 0x51, 0x00 },
+ { 0x52, 0x00 },
+ { 0x53, 0x00 },
+ { 0x54, 0x01 },
+ { 0x55, 0x01 }, /* crop window height */
+ { 0x56, 0xf0 },
+ { 0x57, 0x02 }, /* crop window width */
+ { 0x58, 0x90 },
+
+ /* Gain */
+ { 0x70, 0x70 }, /* 70 //80//global gain */
+ { 0x71, 0x20 }, /* pregain gain */
+ { 0x72, 0x40 }, /* post gain */
+ { 0x5a, 0x84 }, /* 84//analog gain 0 */
+ { 0x5b, 0xc9 }, /* c9 */
+ { 0x5c, 0xed }, /* ed//not use pga gain highest level */
+ { 0x77, 0x40 }, /* R gain 0x74 //awb gain */
+ { 0x78, 0x40 }, /* G gain */
+ { 0x79, 0x40 }, /* B gain 0x5f */
+
+ { 0x48, 0x00 },
+ { 0xfe, 0x01 },
+ { 0x0a, 0x45 }, /* [7]col gain mode */
+
+ { 0x3e, 0x40 },
+ { 0x3f, 0x5c },
+ { 0x40, 0x7b },
+ { 0x41, 0xbd },
+ { 0x42, 0xf6 },
+ { 0x43, 0x63 },
+ { 0x03, 0x60 },
+ { 0x44, 0x03 },
+
+ /* Dark / Sun mode related */
+ { 0xfe, 0x01 },
+ { 0x45, 0xa4 }, /* 0xf7 */
+ { 0x46, 0xf0 }, /* 0xff //f0//sun value th */
+ { 0x48, 0x03 }, /* sun mode */
+ { 0x4f, 0x60 }, /* sun_clamp */
+ { 0xfe, 0x00 },
+};
+
+static const struct gc0310_reg gc0310_VGA_30fps[] = {
+ { 0xfe, 0x00 },
+ { 0x0d, 0x01 }, /* height */
+ { 0x0e, 0xf2 }, /* 0xf7 //height */
+ { 0x0f, 0x02 }, /* width */
+ { 0x10, 0x94 }, /* 0xa0 //height */
+
+ { 0x50, 0x01 }, /* crop enable */
+ { 0x51, 0x00 },
+ { 0x52, 0x00 },
+ { 0x53, 0x00 },
+ { 0x54, 0x01 },
+ { 0x55, 0x01 }, /* crop window height */
+ { 0x56, 0xf0 },
+ { 0x57, 0x02 }, /* crop window width */
+ { 0x58, 0x90 },
+
+ { 0xfe, 0x03 },
+ { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */
+ { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */
+
+ { 0xfe, 0x00 },
+};
/*
* gc0310_write_reg_array - Initializes a list of GC0310 registers
@@ -179,7 +411,10 @@ static int gc0310_detect(struct i2c_client *client)
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -ENODEV;
- ret = i2c_smbus_read_word_swapped(client, GC0310_SC_CMMN_CHIP_ID_H);
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret >= 0)
+ ret = i2c_smbus_read_word_swapped(client, GC0310_SC_CMMN_CHIP_ID_H);
+ pm_runtime_put(&client->dev);
if (ret < 0) {
dev_err(&client->dev, "read sensor_id failed: %d\n", ret);
return -ENODEV;
@@ -268,19 +503,6 @@ error_unlock:
return ret;
}
-static int gc0310_s_config(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = pm_runtime_get_sync(&client->dev);
- if (ret >= 0)
- ret = gc0310_detect(client);
-
- pm_runtime_put(&client->dev);
- return ret;
-}
-
static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *interval)
{
@@ -373,12 +595,12 @@ static void gc0310_remove(struct i2c_client *client)
dev_dbg(&client->dev, "gc0310_remove...\n");
- atomisp_unregister_subdev(sd);
- v4l2_device_unregister_subdev(sd);
+ v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrls.handler);
+ mutex_destroy(&dev->input_lock);
+ fwnode_handle_put(dev->ep_fwnode);
pm_runtime_disable(&client->dev);
- kfree(dev);
}
static int gc0310_probe(struct i2c_client *client)
@@ -390,19 +612,27 @@ static int gc0310_probe(struct i2c_client *client)
if (!dev)
return -ENOMEM;
- ret = v4l2_get_acpi_sensor_info(&client->dev, NULL);
- if (ret)
- return ret;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver.
+ * Bridge drivers doing this may also add GPIO mappings, wait for this.
+ */
+ dev->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ if (!dev->ep_fwnode)
+ return dev_err_probe(&client->dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n");
dev->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dev->reset))
+ if (IS_ERR(dev->reset)) {
+ fwnode_handle_put(dev->ep_fwnode);
return dev_err_probe(&client->dev, PTR_ERR(dev->reset),
"getting reset GPIO\n");
+ }
dev->powerdown = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(dev->powerdown))
+ if (IS_ERR(dev->powerdown)) {
+ fwnode_handle_put(dev->ep_fwnode);
return dev_err_probe(&client->dev, PTR_ERR(dev->powerdown),
"getting powerdown GPIO\n");
+ }
mutex_init(&dev->input_lock);
v4l2_i2c_subdev_init(&dev->sd, client, &gc0310_ops);
@@ -413,7 +643,7 @@ static int gc0310_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
- ret = gc0310_s_config(&dev->sd);
+ ret = gc0310_detect(client);
if (ret) {
gc0310_remove(client);
return ret;
@@ -422,6 +652,7 @@ static int gc0310_probe(struct i2c_client *client)
dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ dev->sd.fwnode = dev->ep_fwnode;
ret = gc0310_init_controls(dev);
if (ret) {
@@ -435,8 +666,7 @@ static int gc0310_probe(struct i2c_client *client)
return ret;
}
- ret = atomisp_register_sensor_no_gmin(&dev->sd, 1, ATOMISP_INPUT_FORMAT_RAW_8,
- atomisp_bayer_order_grbg);
+ ret = v4l2_async_register_subdev_sensor(&dev->sd);
if (ret) {
gc0310_remove(client);
return ret;
@@ -471,7 +701,6 @@ static int gc0310_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(gc0310_pm_ops, gc0310_suspend, gc0310_resume, NULL);
static const struct acpi_device_id gc0310_acpi_match[] = {
- {"XXGC0310"},
{"INT0310"},
{},
};
@@ -483,7 +712,7 @@ static struct i2c_driver gc0310_driver = {
.pm = pm_sleep_ptr(&gc0310_pm_ops),
.acpi_match_table = gc0310_acpi_match,
},
- .probe_new = gc0310_probe,
+ .probe = gc0310_probe,
.remove = gc0310_remove,
};
module_i2c_driver(gc0310_driver);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index cb4c79b483ca..9fa390fbc5f3 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -864,7 +864,7 @@ static struct i2c_driver gc2235_driver = {
.name = "gc2235",
.acpi_match_table = gc2235_acpi_match,
},
- .probe_new = gc2235_probe,
+ .probe = gc2235_probe,
.remove = gc2235_remove,
};
module_i2c_driver(gc2235_driver);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index c4ce4cd445d7..cf5d9317b11a 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -945,7 +945,7 @@ static struct i2c_driver lm3554_driver = {
.pm = &lm3554_pm_ops,
.acpi_match_table = lm3554_acpi_match,
},
- .probe_new = lm3554_probe,
+ .probe = lm3554_probe,
.remove = lm3554_remove,
};
module_i2c_driver(lm3554_driver);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 0e5a981dd331..1c6643c442ef 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -1600,7 +1600,7 @@ static struct i2c_driver mt9m114_driver = {
.name = "mt9m114",
.acpi_match_table = mt9m114_acpi_match,
},
- .probe_new = mt9m114_probe,
+ .probe = mt9m114_probe,
.remove = mt9m114_remove,
};
module_i2c_driver(mt9m114_driver);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index c079368019e8..4cc2839937af 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -28,15 +28,13 @@
#include <media/ov_16bit_addr_reg_helpers.h>
#include <media/v4l2-device.h>
-#include "../include/linux/atomisp_gmin_platform.h"
-
#include "ov2680.h"
-static enum atomisp_bayer_order ov2680_bayer_order_mapping[] = {
- atomisp_bayer_order_bggr,
- atomisp_bayer_order_grbg,
- atomisp_bayer_order_gbrg,
- atomisp_bayer_order_rggb,
+static const struct v4l2_rect ov2680_default_crop = {
+ .left = OV2680_ACTIVE_START_LEFT,
+ .top = OV2680_ACTIVE_START_TOP,
+ .width = OV2680_ACTIVE_WIDTH,
+ .height = OV2680_ACTIVE_HEIGHT,
};
static int ov2680_write_reg_array(struct i2c_client *client,
@@ -54,7 +52,7 @@ static int ov2680_write_reg_array(struct i2c_client *client,
return 0;
}
-static void ov2680_set_bayer_order(struct ov2680_device *sensor, struct v4l2_mbus_framefmt *fmt)
+static void ov2680_set_bayer_order(struct ov2680_dev *sensor, struct v4l2_mbus_framefmt *fmt)
{
static const int ov2680_hv_flip_bayer_order[] = {
MEDIA_BUS_FMT_SBGGR10_1X10,
@@ -62,7 +60,6 @@ static void ov2680_set_bayer_order(struct ov2680_device *sensor, struct v4l2_mbu
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10,
};
- struct camera_mipi_info *ov2680_info;
int hv_flip = 0;
if (sensor->ctrls.vflip->val)
@@ -72,14 +69,9 @@ static void ov2680_set_bayer_order(struct ov2680_device *sensor, struct v4l2_mbu
hv_flip += 2;
fmt->code = ov2680_hv_flip_bayer_order[hv_flip];
-
- /* TODO atomisp specific custom API, should be removed */
- ov2680_info = v4l2_get_subdev_hostdata(&sensor->sd);
- if (ov2680_info)
- ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[hv_flip];
}
-static int ov2680_set_vflip(struct ov2680_device *sensor, s32 val)
+static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
{
int ret;
@@ -94,7 +86,7 @@ static int ov2680_set_vflip(struct ov2680_device *sensor, s32 val)
return 0;
}
-static int ov2680_set_hflip(struct ov2680_device *sensor, s32 val)
+static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
{
int ret;
@@ -109,17 +101,17 @@ static int ov2680_set_hflip(struct ov2680_device *sensor, s32 val)
return 0;
}
-static int ov2680_exposure_set(struct ov2680_device *sensor, u32 exp)
+static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
{
return ov_write_reg24(sensor->client, OV2680_REG_EXPOSURE_PK_HIGH, exp << 4);
}
-static int ov2680_gain_set(struct ov2680_device *sensor, u32 gain)
+static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
{
return ov_write_reg16(sensor->client, OV2680_REG_GAIN_PK, gain);
}
-static int ov2680_test_pattern_set(struct ov2680_device *sensor, int value)
+static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
{
int ret;
@@ -140,7 +132,7 @@ static int ov2680_test_pattern_set(struct ov2680_device *sensor, int value)
static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
int ret;
/* Only apply changes to the controls if the device is powered up */
@@ -183,14 +175,17 @@ static int ov2680_init_registers(struct v4l2_subdev *sd)
int ret;
ret = ov_write_reg8(client, OV2680_SW_RESET, 0x01);
+
+ /* Wait for sensor reset */
+ usleep_range(1000, 2000);
+
ret |= ov2680_write_reg_array(client, ov2680_global_setting);
return ret;
}
static struct v4l2_mbus_framefmt *
-__ov2680_get_pad_format(struct ov2680_device *sensor,
- struct v4l2_subdev_state *state,
+__ov2680_get_pad_format(struct ov2680_dev *sensor, struct v4l2_subdev_state *state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
@@ -199,7 +194,17 @@ __ov2680_get_pad_format(struct ov2680_device *sensor,
return &sensor->mode.fmt;
}
-static void ov2680_fill_format(struct ov2680_device *sensor,
+static struct v4l2_rect *
+__ov2680_get_pad_crop(struct ov2680_dev *sensor, struct v4l2_subdev_state *state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&sensor->sd, state, pad);
+
+ return &sensor->mode.crop;
+}
+
+static void ov2680_fill_format(struct ov2680_dev *sensor,
struct v4l2_mbus_framefmt *fmt,
unsigned int width, unsigned int height)
{
@@ -210,13 +215,15 @@ static void ov2680_fill_format(struct ov2680_device *sensor,
ov2680_set_bayer_order(sensor, fmt);
}
-static void ov2680_calc_mode(struct ov2680_device *sensor, int width, int height)
+static void ov2680_calc_mode(struct ov2680_dev *sensor)
{
+ int width = sensor->mode.fmt.width;
+ int height = sensor->mode.fmt.height;
int orig_width = width;
int orig_height = height;
- if (width <= (OV2680_NATIVE_WIDTH / 2) &&
- height <= (OV2680_NATIVE_HEIGHT / 2)) {
+ if (width <= (sensor->mode.crop.width / 2) &&
+ height <= (sensor->mode.crop.height / 2)) {
sensor->mode.binning = true;
width *= 2;
height *= 2;
@@ -224,8 +231,10 @@ static void ov2680_calc_mode(struct ov2680_device *sensor, int width, int height
sensor->mode.binning = false;
}
- sensor->mode.h_start = ((OV2680_NATIVE_WIDTH - width) / 2) & ~1;
- sensor->mode.v_start = ((OV2680_NATIVE_HEIGHT - height) / 2) & ~1;
+ sensor->mode.h_start =
+ (sensor->mode.crop.left + (sensor->mode.crop.width - width) / 2) & ~1;
+ sensor->mode.v_start =
+ (sensor->mode.crop.top + (sensor->mode.crop.height - height) / 2) & ~1;
sensor->mode.h_end = min(sensor->mode.h_start + width + OV2680_END_MARGIN - 1,
OV2680_NATIVE_WIDTH - 1);
sensor->mode.v_end = min(sensor->mode.v_start + height + OV2680_END_MARGIN - 1,
@@ -236,31 +245,25 @@ static void ov2680_calc_mode(struct ov2680_device *sensor, int width, int height
sensor->mode.vts = OV2680_LINES_PER_FRAME;
}
-static int ov2680_set_mode(struct ov2680_device *sensor)
+static int ov2680_set_mode(struct ov2680_dev *sensor)
{
struct i2c_client *client = sensor->client;
- u8 pll_div, unknown, inc, fmt1, fmt2;
+ u8 sensor_ctrl_0a, inc, fmt1, fmt2;
int ret;
if (sensor->mode.binning) {
- pll_div = 1;
- unknown = 0x23;
+ sensor_ctrl_0a = 0x23;
inc = 0x31;
fmt1 = 0xc2;
fmt2 = 0x01;
} else {
- pll_div = 0;
- unknown = 0x21;
+ sensor_ctrl_0a = 0x21;
inc = 0x11;
fmt1 = 0xc0;
fmt2 = 0x00;
}
- ret = ov_write_reg8(client, 0x3086, pll_div);
- if (ret)
- return ret;
-
- ret = ov_write_reg8(client, 0x370a, unknown);
+ ret = ov_write_reg8(client, OV2680_REG_SENSOR_CTRL_0A, sensor_ctrl_0a);
if (ret)
return ret;
@@ -337,12 +340,18 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
struct v4l2_mbus_framefmt *fmt;
+ const struct v4l2_rect *crop;
unsigned int width, height;
- width = min_t(unsigned int, ALIGN(format->format.width, 2), OV2680_NATIVE_WIDTH);
- height = min_t(unsigned int, ALIGN(format->format.height, 2), OV2680_NATIVE_HEIGHT);
+ crop = __ov2680_get_pad_crop(sensor, sd_state, format->pad, format->which);
+
+ /* Limit set_fmt max size to crop width / height */
+ width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ OV2680_MIN_CROP_WIDTH, crop->width);
+ height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
+ OV2680_MIN_CROP_HEIGHT, crop->height);
fmt = __ov2680_get_pad_format(sensor, sd_state, format->pad, format->which);
ov2680_fill_format(sensor, fmt, width, height);
@@ -352,9 +361,9 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- mutex_lock(&sensor->input_lock);
- ov2680_calc_mode(sensor, fmt->width, fmt->height);
- mutex_unlock(&sensor->input_lock);
+ mutex_lock(&sensor->lock);
+ ov2680_calc_mode(sensor);
+ mutex_unlock(&sensor->lock);
return 0;
}
@@ -362,7 +371,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
struct v4l2_mbus_framefmt *fmt;
fmt = __ov2680_get_pad_format(sensor, sd_state, format->pad, format->which);
@@ -370,6 +379,105 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int ov2680_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mutex_lock(&sensor->lock);
+ sel->r = *__ov2680_get_pad_crop(sensor, state, sel->pad, sel->which);
+ mutex_unlock(&sensor->lock);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV2680_NATIVE_WIDTH;
+ sel->r.height = OV2680_NATIVE_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = OV2680_ACTIVE_START_TOP;
+ sel->r.left = OV2680_ACTIVE_START_LEFT;
+ sel->r.width = OV2680_ACTIVE_WIDTH;
+ sel->r.height = OV2680_ACTIVE_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov2680_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect rect;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * Clamp the boundaries of the crop rectangle to the size of the sensor
+ * pixel array. Align to multiples of 2 to ensure Bayer pattern isn't
+ * disrupted.
+ */
+ rect.left = clamp(ALIGN(sel->r.left, 2), OV2680_NATIVE_START_LEFT,
+ OV2680_NATIVE_WIDTH);
+ rect.top = clamp(ALIGN(sel->r.top, 2), OV2680_NATIVE_START_TOP,
+ OV2680_NATIVE_HEIGHT);
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+ OV2680_MIN_CROP_WIDTH, OV2680_NATIVE_WIDTH);
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ OV2680_MIN_CROP_HEIGHT, OV2680_NATIVE_HEIGHT);
+
+ /* Make sure the crop rectangle isn't outside the bounds of the array */
+ rect.width = min_t(unsigned int, rect.width,
+ OV2680_NATIVE_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ OV2680_NATIVE_HEIGHT - rect.top);
+
+ __crop = __ov2680_get_pad_crop(sensor, state, sel->pad, sel->which);
+
+ if (rect.width != __crop->width || rect.height != __crop->height) {
+ /*
+ * Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ format = __ov2680_get_pad_format(sensor, state, sel->pad, sel->which);
+ format->width = rect.width;
+ format->height = rect.height;
+ }
+
+ *__crop = rect;
+ sel->r = rect;
+
+ return 0;
+}
+
+static int ov2680_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .width = 800,
+ .height = 600,
+ },
+ };
+
+ sd_state->pads[0].try_crop = ov2680_default_crop;
+
+ return ov2680_set_fmt(sd, sd_state, &fmt);
+}
+
static int ov2680_detect(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@@ -405,11 +513,11 @@ static int ov2680_detect(struct i2c_client *client)
static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- mutex_lock(&sensor->input_lock);
+ mutex_lock(&sensor->lock);
if (sensor->is_streaming == enable) {
dev_warn(&client->dev, "stream already %s\n", enable ? "started" : "stopped");
@@ -442,14 +550,14 @@ static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
v4l2_ctrl_activate(sensor->ctrls.vflip, !enable);
v4l2_ctrl_activate(sensor->ctrls.hflip, !enable);
- mutex_unlock(&sensor->input_lock);
+ mutex_unlock(&sensor->lock);
return 0;
error_power_down:
pm_runtime_put(sensor->sd.dev);
sensor->is_streaming = false;
error_unlock:
- mutex_unlock(&sensor->input_lock);
+ mutex_unlock(&sensor->lock);
return ret;
}
@@ -550,11 +658,14 @@ static const struct v4l2_subdev_sensor_ops ov2680_sensor_ops = {
};
static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
+ .init_cfg = ov2680_init_cfg,
.enum_mbus_code = ov2680_enum_mbus_code,
.enum_frame_size = ov2680_enum_frame_size,
.enum_frame_interval = ov2680_enum_frame_interval,
.get_fmt = ov2680_get_fmt,
.set_fmt = ov2680_set_fmt,
+ .get_selection = ov2680_get_selection,
+ .set_selection = ov2680_set_selection,
};
static const struct v4l2_subdev_ops ov2680_ops = {
@@ -563,7 +674,7 @@ static const struct v4l2_subdev_ops ov2680_ops = {
.sensor = &ov2680_sensor_ops,
};
-static int ov2680_init_controls(struct ov2680_device *sensor)
+static int ov2680_init_controls(struct ov2680_dev *sensor)
{
static const char * const test_pattern_menu[] = {
"Disabled",
@@ -579,7 +690,7 @@ static int ov2680_init_controls(struct ov2680_device *sensor)
v4l2_ctrl_handler_init(hdl, 4);
- hdl->lock = &sensor->input_lock;
+ hdl->lock = &sensor->lock;
ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
@@ -605,39 +716,46 @@ static int ov2680_init_controls(struct ov2680_device *sensor)
static void ov2680_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
dev_dbg(&client->dev, "ov2680_remove...\n");
- atomisp_unregister_subdev(sd);
- v4l2_device_unregister_subdev(sd);
+ v4l2_async_unregister_subdev(&sensor->sd);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->lock);
+ fwnode_handle_put(sensor->ep_fwnode);
pm_runtime_disable(&client->dev);
}
static int ov2680_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct ov2680_device *sensor;
+ struct ov2680_dev *sensor;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
- mutex_init(&sensor->input_lock);
+ mutex_init(&sensor->lock);
sensor->client = client;
v4l2_i2c_subdev_init(&sensor->sd, client, &ov2680_ops);
- ret = v4l2_get_acpi_sensor_info(dev, NULL);
- if (ret)
- return ret;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver.
+ * Bridge drivers doing this may also add GPIO mappings, wait for this.
+ */
+ sensor->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!sensor->ep_fwnode)
+ return dev_err_probe(dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n");
sensor->powerdown = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(sensor->powerdown))
+ if (IS_ERR(sensor->powerdown)) {
+ fwnode_handle_put(sensor->ep_fwnode);
return dev_err_probe(dev, PTR_ERR(sensor->powerdown), "getting powerdown GPIO\n");
+ }
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
@@ -653,6 +771,7 @@ static int ov2680_probe(struct i2c_client *client)
sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ sensor->sd.fwnode = sensor->ep_fwnode;
ret = ov2680_init_controls(sensor);
if (ret) {
@@ -666,10 +785,11 @@ static int ov2680_probe(struct i2c_client *client)
return ret;
}
+ sensor->mode.crop = ov2680_default_crop;
ov2680_fill_format(sensor, &sensor->mode.fmt, OV2680_NATIVE_WIDTH, OV2680_NATIVE_HEIGHT);
+ ov2680_calc_mode(sensor);
- ret = atomisp_register_sensor_no_gmin(&sensor->sd, 1, ATOMISP_INPUT_FORMAT_RAW_10,
- atomisp_bayer_order_bggr);
+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
if (ret) {
ov2680_remove(client);
return ret;
@@ -681,7 +801,7 @@ static int ov2680_probe(struct i2c_client *client)
static int ov2680_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
gpiod_set_value_cansleep(sensor->powerdown, 1);
return 0;
@@ -690,7 +810,7 @@ static int ov2680_suspend(struct device *dev)
static int ov2680_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2680_device *sensor = to_ov2680_sensor(sd);
+ struct ov2680_dev *sensor = to_ov2680_sensor(sd);
/* according to DS, at least 5ms is needed after DOVDD (enabled by ACPI) */
usleep_range(5000, 6000);
@@ -719,7 +839,7 @@ static struct i2c_driver ov2680_driver = {
.pm = pm_sleep_ptr(&ov2680_pm_ops),
.acpi_match_table = ov2680_acpi_match,
},
- .probe_new = ov2680_probe,
+ .probe = ov2680_probe,
.remove = ov2680_remove,
};
module_i2c_driver(ov2680_driver);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 5d2e6e2e72f0..6a72691ed5b7 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -1019,7 +1019,7 @@ static struct i2c_driver ov2722_driver = {
.name = "ov2722",
.acpi_match_table = ov2722_acpi_match,
},
- .probe_new = ov2722_probe,
+ .probe = ov2722_probe,
.remove = ov2722_remove,
};
module_i2c_driver(ov2722_driver);
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
deleted file mode 100644
index d40406289598..000000000000
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for GalaxyCore GC0310 VGA camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __GC0310_H__
-#define __GC0310_H__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-
-#include "../include/linux/atomisp_platform.h"
-
-#define GC0310_NATIVE_WIDTH 656
-#define GC0310_NATIVE_HEIGHT 496
-
-#define GC0310_FPS 30
-#define GC0310_SKIP_FRAMES 3
-
-#define GC0310_FOCAL_LENGTH_NUM 278 /* 2.78mm */
-
-#define GC0310_ID 0xa310
-
-#define GC0310_RESET_RELATED 0xFE
-#define GC0310_REGISTER_PAGE_0 0x0
-#define GC0310_REGISTER_PAGE_3 0x3
-
-#define GC0310_FINE_INTG_TIME_MIN 0
-#define GC0310_FINE_INTG_TIME_MAX_MARGIN 0
-#define GC0310_COARSE_INTG_TIME_MIN 1
-#define GC0310_COARSE_INTG_TIME_MAX_MARGIN 6
-
-/*
- * GC0310 System control registers
- */
-#define GC0310_SW_STREAM 0x10
-
-#define GC0310_SC_CMMN_CHIP_ID_H 0xf0
-#define GC0310_SC_CMMN_CHIP_ID_L 0xf1
-
-#define GC0310_AEC_PK_EXPO_H 0x03
-#define GC0310_AEC_PK_EXPO_L 0x04
-#define GC0310_AGC_ADJ 0x48
-#define GC0310_DGC_ADJ 0x71
-#if 0
-#define GC0310_GROUP_ACCESS 0x3208
-#endif
-
-#define GC0310_H_CROP_START_H 0x09
-#define GC0310_H_CROP_START_L 0x0A
-#define GC0310_V_CROP_START_H 0x0B
-#define GC0310_V_CROP_START_L 0x0C
-#define GC0310_H_OUTSIZE_H 0x0F
-#define GC0310_H_OUTSIZE_L 0x10
-#define GC0310_V_OUTSIZE_H 0x0D
-#define GC0310_V_OUTSIZE_L 0x0E
-#define GC0310_H_BLANKING_H 0x05
-#define GC0310_H_BLANKING_L 0x06
-#define GC0310_V_BLANKING_H 0x07
-#define GC0310_V_BLANKING_L 0x08
-#define GC0310_SH_DELAY 0x11
-
-#define GC0310_START_STREAMING 0x94 /* 8-bit enable */
-#define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */
-
-/*
- * gc0310 device structure.
- */
-struct gc0310_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct mutex input_lock;
- bool is_streaming;
-
- struct gpio_desc *reset;
- struct gpio_desc *powerdown;
-
- struct gc0310_mode {
- struct v4l2_mbus_framefmt fmt;
- } mode;
-
- struct gc0310_ctrls {
- struct v4l2_ctrl_handler handler;
- struct v4l2_ctrl *exposure;
- struct v4l2_ctrl *gain;
- } ctrls;
-};
-
-/**
- * struct gc0310_reg - MI sensor register format
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct gc0310_reg {
- u8 reg;
- u8 val; /* @set value for read/mod/write, @mask */
-};
-
-#define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd)
-
-/*
- * Register settings for various resolution
- */
-static const struct gc0310_reg gc0310_reset_register[] = {
-/////////////////////////////////////////////////
-///////////////// system reg /////////////////
-/////////////////////////////////////////////////
- { 0xfe, 0xf0 },
- { 0xfe, 0xf0 },
- { 0xfe, 0x00 },
-
- { 0xfc, 0x0e }, /* 4e */
- { 0xfc, 0x0e }, /* 16//4e // [0]apwd [6]regf_clk_gate */
- { 0xf2, 0x80 }, /* sync output */
- { 0xf3, 0x00 }, /* 1f//01 data output */
- { 0xf7, 0x33 }, /* f9 */
- { 0xf8, 0x05 }, /* 00 */
- { 0xf9, 0x0e }, /* 0x8e //0f */
- { 0xfa, 0x11 },
-
-/////////////////////////////////////////////////
-/////////////////// MIPI ////////////////////
-/////////////////////////////////////////////////
- { 0xfe, 0x03 },
- { 0x01, 0x03 }, /* mipi 1lane */
- { 0x02, 0x22 }, /* 0x33 */
- { 0x03, 0x94 },
- { 0x04, 0x01 }, /* fifo_prog */
- { 0x05, 0x00 }, /* fifo_prog */
- { 0x06, 0x80 }, /* b0 //YUV ISP data */
- { 0x11, 0x2a }, /* 1e //LDI set YUV422 */
- { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */
- { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */
- { 0x15, 0x12 }, /* 0x10 //DPHYY_MODE read_ready */
- { 0x17, 0x01 },
- { 0x40, 0x08 },
- { 0x41, 0x00 },
- { 0x42, 0x00 },
- { 0x43, 0x00 },
- { 0x21, 0x02 }, /* 0x01 */
- { 0x22, 0x02 }, /* 0x01 */
- { 0x23, 0x01 }, /* 0x05 //Nor:0x05 DOU:0x06 */
- { 0x29, 0x00 },
- { 0x2A, 0x25 }, /* 0x05 //data zero 0x7a de */
- { 0x2B, 0x02 },
-
- { 0xfe, 0x00 },
-
-/////////////////////////////////////////////////
-///////////////// CISCTL reg /////////////////
-/////////////////////////////////////////////////
- { 0x00, 0x2f }, /* 2f//0f//02//01 */
- { 0x01, 0x0f }, /* 06 */
- { 0x02, 0x04 },
- { 0x4f, 0x00 }, /* AEC 0FF */
- { 0x03, 0x01 }, /* 0x03 //04 */
- { 0x04, 0xc0 }, /* 0xe8 //58 */
- { 0x05, 0x00 },
- { 0x06, 0xb2 }, /* 0x0a //HB */
- { 0x07, 0x00 },
- { 0x08, 0x0c }, /* 0x89 //VB */
- { 0x09, 0x00 }, /* row start */
- { 0x0a, 0x00 },
- { 0x0b, 0x00 }, /* col start */
- { 0x0c, 0x00 },
- { 0x0d, 0x01 }, /* height */
- { 0x0e, 0xf2 }, /* 0xf7 //height */
- { 0x0f, 0x02 }, /* width */
- { 0x10, 0x94 }, /* 0xa0 //height */
- { 0x17, 0x14 },
- { 0x18, 0x1a }, /* 0a//[4]double reset */
- { 0x19, 0x14 }, /* AD pipeline */
- { 0x1b, 0x48 },
- { 0x1e, 0x6b }, /* 3b//col bias */
- { 0x1f, 0x28 }, /* 20//00//08//txlow */
- { 0x20, 0x89 }, /* 88//0c//[3:2]DA15 */
- { 0x21, 0x49 }, /* 48//[3] txhigh */
- { 0x22, 0xb0 },
- { 0x23, 0x04 }, /* [1:0]vcm_r */
- { 0x24, 0x16 }, /* 15 */
- { 0x34, 0x20 }, /* [6:4] rsg high//range */
-
-/////////////////////////////////////////////////
-//////////////////// BLK ////////////////////
-/////////////////////////////////////////////////
- { 0x26, 0x23 }, /* [1]dark_current_en [0]offset_en */
- { 0x28, 0xff }, /* BLK_limie_value */
- { 0x29, 0x00 }, /* global offset */
- { 0x33, 0x18 }, /* offset_ratio */
- { 0x37, 0x20 }, /* dark_current_ratio */
- { 0x2a, 0x00 },
- { 0x2b, 0x00 },
- { 0x2c, 0x00 },
- { 0x2d, 0x00 },
- { 0x2e, 0x00 },
- { 0x2f, 0x00 },
- { 0x30, 0x00 },
- { 0x31, 0x00 },
- { 0x47, 0x80 }, /* a7 */
- { 0x4e, 0x66 }, /* select_row */
- { 0xa8, 0x02 }, /* win_width_dark, same with crop_win_width */
- { 0xa9, 0x80 },
-
-/////////////////////////////////////////////////
-////////////////// ISP reg ///////////////////
-/////////////////////////////////////////////////
- { 0x40, 0x06 }, /* 0xff //ff //48 */
- { 0x41, 0x00 }, /* 0x21 //00//[0]curve_en */
- { 0x42, 0x04 }, /* 0xcf //0a//[1]awn_en */
- { 0x44, 0x18 }, /* 0x18 //02 */
- { 0x46, 0x02 }, /* 0x03 //sync */
- { 0x49, 0x03 },
- { 0x4c, 0x20 }, /* 00[5]pretect exp */
- { 0x50, 0x01 }, /* crop enable */
- { 0x51, 0x00 },
- { 0x52, 0x00 },
- { 0x53, 0x00 },
- { 0x54, 0x01 },
- { 0x55, 0x01 }, /* crop window height */
- { 0x56, 0xf0 },
- { 0x57, 0x02 }, /* crop window width */
- { 0x58, 0x90 },
-
-/////////////////////////////////////////////////
-/////////////////// GAIN ////////////////////
-/////////////////////////////////////////////////
- { 0x70, 0x70 }, /* 70 //80//global gain */
- { 0x71, 0x20 }, /* pregain gain */
- { 0x72, 0x40 }, /* post gain */
- { 0x5a, 0x84 }, /* 84//analog gain 0 */
- { 0x5b, 0xc9 }, /* c9 */
- { 0x5c, 0xed }, /* ed//not use pga gain highest level */
- { 0x77, 0x40 }, /* R gain 0x74 //awb gain */
- { 0x78, 0x40 }, /* G gain */
- { 0x79, 0x40 }, /* B gain 0x5f */
-
- { 0x48, 0x00 },
- { 0xfe, 0x01 },
- { 0x0a, 0x45 }, /* [7]col gain mode */
-
- { 0x3e, 0x40 },
- { 0x3f, 0x5c },
- { 0x40, 0x7b },
- { 0x41, 0xbd },
- { 0x42, 0xf6 },
- { 0x43, 0x63 },
- { 0x03, 0x60 },
- { 0x44, 0x03 },
-
-/////////////////////////////////////////////////
-///////////////// dark sun //////////////////
-/////////////////////////////////////////////////
- { 0xfe, 0x01 },
- { 0x45, 0xa4 }, /* 0xf7 */
- { 0x46, 0xf0 }, /* 0xff //f0//sun value th */
- { 0x48, 0x03 }, /* sun mode */
- { 0x4f, 0x60 }, /* sun_clamp */
- { 0xfe, 0x00 },
-};
-
-static struct gc0310_reg const gc0310_VGA_30fps[] = {
- { 0xfe, 0x00 },
- { 0x0d, 0x01 }, /* height */
- { 0x0e, 0xf2 }, /* 0xf7 //height */
- { 0x0f, 0x02 }, /* width */
- { 0x10, 0x94 }, /* 0xa0 //height */
-
- { 0x50, 0x01 }, /* crop enable */
- { 0x51, 0x00 },
- { 0x52, 0x00 },
- { 0x53, 0x00 },
- { 0x54, 0x01 },
- { 0x55, 0x01 }, /* crop window height */
- { 0x56, 0xf0 },
- { 0x57, 0x02 }, /* crop window width */
- { 0x58, 0x90 },
-
- { 0xfe, 0x03 },
- { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */
- { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */
-
- { 0xfe, 0x00 },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index baf49eb0659e..d032af245674 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -30,10 +30,16 @@
#include <linux/v4l2-mediabus.h>
#include <media/media-entity.h>
-#include "../include/linux/atomisp_platform.h"
-
#define OV2680_NATIVE_WIDTH 1616
#define OV2680_NATIVE_HEIGHT 1216
+#define OV2680_NATIVE_START_LEFT 0
+#define OV2680_NATIVE_START_TOP 0
+#define OV2680_ACTIVE_WIDTH 1600
+#define OV2680_ACTIVE_HEIGHT 1200
+#define OV2680_ACTIVE_START_LEFT 8
+#define OV2680_ACTIVE_START_TOP 8
+#define OV2680_MIN_CROP_WIDTH 2
+#define OV2680_MIN_CROP_HEIGHT 2
/* 1704 * 1294 * 30fps = 66MHz pixel clock */
#define OV2680_PIXELS_PER_LINE 1704
@@ -66,6 +72,8 @@
#define OV2680_REG_EXPOSURE_PK_HIGH 0x3500
#define OV2680_REG_GAIN_PK 0x350a
+#define OV2680_REG_SENSOR_CTRL_0A 0x370a
+
#define OV2680_HORIZONTAL_START_H 0x3800 /* Bit[11:8] */
#define OV2680_HORIZONTAL_START_L 0x3801 /* Bit[7:0] */
#define OV2680_VERTICAL_START_H 0x3802 /* Bit[11:8] */
@@ -108,15 +116,18 @@
/*
* ov2680 device structure.
*/
-struct ov2680_device {
+struct ov2680_dev {
struct v4l2_subdev sd;
struct media_pad pad;
- struct mutex input_lock;
+ /* Protect against concurrent changes to controls */
+ struct mutex lock;
struct i2c_client *client;
struct gpio_desc *powerdown;
+ struct fwnode_handle *ep_fwnode;
bool is_streaming;
struct ov2680_mode {
+ struct v4l2_rect crop;
struct v4l2_mbus_framefmt fmt;
bool binning;
u16 h_start;
@@ -152,92 +163,86 @@ struct ov2680_reg {
u32 val; /* @set value for read/mod/write, @mask */
};
-#define to_ov2680_sensor(x) container_of(x, struct ov2680_device, sd)
+#define to_ov2680_sensor(x) container_of(x, struct ov2680_dev, sd)
static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
{
- struct ov2680_device *sensor =
- container_of(ctrl->handler, struct ov2680_device, ctrls.handler);
+ struct ov2680_dev *sensor =
+ container_of(ctrl->handler, struct ov2680_dev, ctrls.handler);
return &sensor->sd;
}
static struct ov2680_reg const ov2680_global_setting[] = {
- {0x0103, 0x01},
- {0x3002, 0x00},
+ /* MIPI PHY, 0x10 -> 0x1c enable bp_c_hs_en_lat and bp_d_hs_en_lat */
{0x3016, 0x1c},
- {0x3018, 0x44},
- {0x3020, 0x00},
- {0x3080, 0x02},
+
+ /* PLL MULT bits 0-7, datasheet default 0x37 for 24MHz extclk, use 0x45 for 19.2 Mhz extclk */
{0x3082, 0x45},
- {0x3084, 0x09},
- {0x3085, 0x04},
+
+ /* R MANUAL set exposure (0x01) and gain (0x02) to manual (hw does not do auto) */
{0x3503, 0x03},
- {0x350b, 0x36},
- {0x3600, 0xb4},
- {0x3603, 0x39},
- {0x3604, 0x24},
- {0x3605, 0x00},
- {0x3620, 0x26},
- {0x3621, 0x37},
- {0x3622, 0x04},
- {0x3628, 0x00},
- {0x3705, 0x3c},
- {0x370c, 0x50},
- {0x370d, 0xc0},
- {0x3718, 0x88},
- {0x3720, 0x00},
- {0x3721, 0x00},
- {0x3722, 0x00},
- {0x3723, 0x00},
- {0x3738, 0x00},
- {0x3717, 0x58},
- {0x3781, 0x80},
- {0x3789, 0x60},
- {0x3800, 0x00},
- {0x3819, 0x04},
+
+ /* Analog control register tweaks */
+ {0x3603, 0x39}, /* Reset value 0x99 */
+ {0x3604, 0x24}, /* Reset value 0x74 */
+ {0x3621, 0x37}, /* Reset value 0x44 */
+
+ /* Sensor control register tweaks */
+ {0x3701, 0x64}, /* Reset value 0x61 */
+ {0x3705, 0x3c}, /* Reset value 0x21 */
+ {0x370c, 0x50}, /* Reset value 0x10 */
+ {0x370d, 0xc0}, /* Reset value 0x00 */
+ {0x3718, 0x88}, /* Reset value 0x80 */
+
+ /* PSRAM tweaks */
+ {0x3781, 0x80}, /* Reset value 0x00 */
+ {0x3784, 0x0c}, /* Reset value 0x00, based on OV2680_R1A_AM10.ovt */
+ {0x3789, 0x60}, /* Reset value 0x50 */
+
+ /* BLC CTRL00 0x01 -> 0x81 set avg_weight to 8 */
{0x4000, 0x81},
- {0x4001, 0x40},
+
+ /* Set black level compensation range to 0 - 3 (default 0 - 11) */
{0x4008, 0x00},
{0x4009, 0x03},
+
+ /* VFIFO R2 0x00 -> 0x02 set Frame reset enable */
{0x4602, 0x02},
+
+ /* MIPI ctrl CLK PREPARE MIN change from 0x26 (38) -> 0x36 (54) */
{0x481f, 0x36},
+
+ /* MIPI ctrl CLK LPX P MIN change from 0x32 (50) -> 0x36 (54) */
{0x4825, 0x36},
- {0x4837, 0x18},
+
+ /* R ISP CTRL2 0x20 -> 0x30, set sof_sel bit */
{0x5002, 0x30},
- {0x5004, 0x04},//manual awb 1x
- {0x5005, 0x00},
- {0x5006, 0x04},
- {0x5007, 0x00},
- {0x5008, 0x04},
- {0x5009, 0x00},
- {0x5080, 0x00},
- {0x5081, 0x41},
- {0x5708, 0x01}, /* add for full size flip off and mirror off 2014/09/11 */
- {0x3701, 0x64}, //add on 14/05/13
- {0x3784, 0x0c}, //based OV2680_R1A_AM10.ovt add on 14/06/13
- {0x5780, 0x3e}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
- {0x5781, 0x0f},
- {0x5782, 0x04},
- {0x5783, 0x02},
- {0x5784, 0x01},
- {0x5785, 0x01},
- {0x5786, 0x00},
- {0x5787, 0x04},
+
+ /*
+ * Window CONTROL 0x00 -> 0x01, enable manual window control,
+ * this is necessary for full size flip and mirror support.
+ */
+ {0x5708, 0x01},
+
+ /*
+ * DPC CTRL0 0x14 -> 0x3e, set enable_tail, enable_3x3_cluster
+ * and enable_general_tail bits based OV2680_R1A_AM10.ovt.
+ */
+ {0x5780, 0x3e},
+
+ /* DPC MORE CONNECTION CASE THRE 0x0c (12) -> 0x02 (2) */
{0x5788, 0x02},
- {0x5789, 0x00},
- {0x578a, 0x01},
- {0x578b, 0x02},
- {0x578c, 0x03},
- {0x578d, 0x03},
+
+ /* DPC GAIN LIST1 0x0f (15) -> 0x08 (8) */
{0x578e, 0x08},
+
+ /* DPC GAIN LIST2 0x3f (63) -> 0x0c (12) */
{0x578f, 0x0c},
- {0x5790, 0x08},
- {0x5791, 0x04},
+
+ /* DPC THRE RATIO 0x04 (4) -> 0x00 (0) */
{0x5792, 0x00},
- {0x5793, 0x00},
- {0x5794, 0x03}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
- {0x0100, 0x00}, //stream off
+
{}
};
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index da8c3b1d3bcd..460a4e34c55b 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -726,51 +726,11 @@ static void *ov5693_otp_read(struct v4l2_subdev *sd)
return buf;
}
-static int ov5693_g_priv_int_data(struct v4l2_subdev *sd,
- struct v4l2_private_int_data *priv)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov5693_device *dev = to_ov5693_sensor(sd);
- u8 __user *to = priv->data;
- u32 read_size = priv->size;
- int ret;
-
- /* No need to copy data if size is 0 */
- if (!read_size)
- goto out;
-
- if (IS_ERR(dev->otp_data)) {
- dev_err(&client->dev, "OTP data not available");
- return PTR_ERR(dev->otp_data);
- }
-
- /* Correct read_size value only if bigger than maximum */
- if (read_size > OV5693_OTP_DATA_SIZE)
- read_size = OV5693_OTP_DATA_SIZE;
-
- ret = copy_to_user(to, dev->otp_data, read_size);
- if (ret) {
- dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
- __func__);
- return -EFAULT;
- }
-
- pr_debug("%s read_size:%d\n", __func__, read_size);
-
-out:
- /* Return correct size */
- priv->size = dev->otp_size;
-
- return 0;
-}
-
static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
switch (cmd) {
case ATOMISP_IOC_S_EXPOSURE:
return ov5693_s_exposure(sd, arg);
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- return ov5693_g_priv_int_data(sd, arg);
default:
return -EINVAL;
}
@@ -1794,7 +1754,7 @@ static struct i2c_driver ov5693_driver = {
.name = "ov5693",
.acpi_match_table = ov5693_acpi_match,
},
- .probe_new = ov5693_probe,
+ .probe = ov5693_probe,
.remove = ov5693_remove,
};
module_i2c_driver(ov5693_driver);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index c7ec56a1c064..14b1757e6674 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -38,7 +38,6 @@
#define CI_MODE_PREVIEW 0x8000
#define CI_MODE_VIDEO 0x4000
#define CI_MODE_STILL_CAPTURE 0x2000
-#define CI_MODE_CONTINUOUS 0x1000
#define CI_MODE_NONE 0x0000
#define OUTPUT_MODE_FILE 0x0100
@@ -150,12 +149,6 @@ enum atomisp_calibration_type {
calibration_type3
};
-struct atomisp_calibration_group {
- unsigned int size;
- unsigned int type;
- unsigned short *calb_grp_values;
-};
-
struct atomisp_gc_config {
__u16 gain_k1;
__u16 gain_k2;
@@ -266,26 +259,6 @@ enum atomisp_metadata_type {
ATOMISP_METADATA_TYPE_NUM,
};
-struct atomisp_metadata_with_type {
- /* to specify which type of metadata to get */
- enum atomisp_metadata_type type;
- void __user *data;
- u32 width;
- u32 height;
- u32 stride; /* in bytes */
- u32 exp_id; /* exposure ID */
- u32 *effective_width; /* mipi packets valid data size */
-};
-
-struct atomisp_metadata {
- void __user *data;
- u32 width;
- u32 height;
- u32 stride; /* in bytes */
- u32 exp_id; /* exposure ID */
- u32 *effective_width; /* mipi packets valid data size */
-};
-
struct atomisp_ext_isp_ctrl {
u32 id;
u32 data;
@@ -299,14 +272,6 @@ struct atomisp_3a_statistics {
u32 isp_config_id; /* isp config ID */
};
-struct atomisp_ae_window {
- int x_left;
- int x_right;
- int y_top;
- int y_bottom;
- int weight;
-};
-
/* White Balance (Gain Adjust) */
struct atomisp_wb_config {
unsigned int integer_bits;
@@ -755,53 +720,6 @@ struct atomisp_s_runmode {
__u32 mode;
};
-struct atomisp_update_exposure {
- unsigned int gain;
- unsigned int digi_gain;
- unsigned int update_gain;
- unsigned int update_digi_gain;
-};
-
-/*
- * V4L2 private internal data interface.
- * -----------------------------------------------------------------------------
- * struct v4l2_private_int_data - request private data stored in video device
- * internal memory.
- * @size: sanity check to ensure userspace's buffer fits whole private data.
- * If not, kernel will make partial copy (or nothing if @size == 0).
- * @size is always corrected for the minimum necessary if IOCTL returns
- * no error.
- * @data: pointer to userspace buffer.
- */
-struct v4l2_private_int_data {
- __u32 size;
- void __user *data;
- __u32 reserved[2];
-};
-
-enum atomisp_sensor_ae_bracketing_mode {
- SENSOR_AE_BRACKETING_MODE_OFF = 0,
- SENSOR_AE_BRACKETING_MODE_SINGLE, /* back to SW standby after bracketing */
- SENSOR_AE_BRACKETING_MODE_SINGLE_TO_STREAMING, /* back to normal streaming after bracketing */
- SENSOR_AE_BRACKETING_MODE_LOOP, /* continue AE bracketing in loop mode */
-};
-
-struct atomisp_sensor_ae_bracketing_info {
- unsigned int modes; /* bit mask to indicate supported modes */
- unsigned int lut_depth;
-};
-
-struct atomisp_sensor_ae_bracketing_lut_entry {
- __u16 coarse_integration_time;
- __u16 analog_gain;
- __u16 digital_gain;
-};
-
-struct atomisp_sensor_ae_bracketing_lut {
- struct atomisp_sensor_ae_bracketing_lut_entry *lut;
- unsigned int lut_size;
-};
-
/*Private IOCTLs for ISP */
#define ATOMISP_IOC_G_XNR \
_IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
@@ -906,20 +824,12 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_EXPOSURE \
_IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure)
-/* sensor calibration registers group */
-#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group)
-
/* white balance Correction */
#define ATOMISP_IOC_G_3A_CONFIG \
_IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
#define ATOMISP_IOC_S_3A_CONFIG \
_IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
-/* sensor OTP memory read */
-#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data)
-
/* LCS (shading) table write */
#define ATOMISP_IOC_S_ISP_SHD_TAB \
_IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table)
@@ -931,19 +841,9 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
_IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
-/* motor internal memory read */
-#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data)
-
#define ATOMISP_IOC_S_PARAMETERS \
_IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
-#define ATOMISP_IOC_G_METADATA \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata)
-
-#define ATOMISP_IOC_G_METADATA_BY_TYPE \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type)
-
#define ATOMISP_IOC_EXT_ISP_CTRL \
_IOWR('v', BASE_VIDIOC_PRIVATE + 35, struct atomisp_ext_isp_ctrl)
@@ -962,27 +862,9 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_FORMATS_CONFIG \
_IOW('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
-#define ATOMISP_IOC_S_EXPOSURE_WINDOW \
- _IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window)
-
#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \
_IOW('v', BASE_VIDIOC_PRIVATE + 42, int)
-#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO \
- _IOR('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_info)
-
-#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE \
- _IOW('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
-
-#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE \
- _IOR('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
-
-#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT \
- _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut)
-
-#define ATOMISP_IOC_G_INVALID_FRAME_NUM \
- _IOR('v', BASE_VIDIOC_PRIVATE + 44, unsigned int)
-
#define ATOMISP_IOC_S_ARRAY_RESOLUTION \
_IOW('v', BASE_VIDIOC_PRIVATE + 45, struct atomisp_resolution)
@@ -996,9 +878,6 @@ struct atomisp_sensor_ae_bracketing_lut {
#define ATOMISP_IOC_S_SENSOR_RUNMODE \
_IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode)
-#define ATOMISP_IOC_G_UPDATE_EXPOSURE \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 49, struct atomisp_update_exposure)
-
/*
* Reserved ioctls. We have customer implementing it internally.
* We can't use both numbers to not cause ABI conflict.
@@ -1059,9 +938,9 @@ struct atomisp_sensor_ae_bracketing_lut {
#define V4L2_CID_RUN_MODE (V4L2_CID_CAMERA_LASTP1 + 20)
#define ATOMISP_RUN_MODE_VIDEO 1
#define ATOMISP_RUN_MODE_STILL_CAPTURE 2
-#define ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE 3
-#define ATOMISP_RUN_MODE_PREVIEW 4
-#define ATOMISP_RUN_MODE_SDV 5
+#define ATOMISP_RUN_MODE_PREVIEW 3
+#define ATOMISP_RUN_MODE_MIN 1
+#define ATOMISP_RUN_MODE_MAX 3
#define V4L2_CID_ENABLE_VFPP (V4L2_CID_CAMERA_LASTP1 + 21)
#define V4L2_CID_ATOMISP_CONTINUOUS_MODE (V4L2_CID_CAMERA_LASTP1 + 22)
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index e8e965f73fc8..487ef5846c24 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -125,6 +125,7 @@ struct intel_v4l2_subdev_id {
struct intel_v4l2_subdev_table {
enum intel_v4l2_subdev_type type;
enum atomisp_camera_port port;
+ unsigned int lanes;
struct v4l2_subdev *subdev;
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp-regs.h
index 022997f47121..a7b0196686be 100644
--- a/drivers/staging/media/atomisp/pci/atomisp-regs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp-regs.h
@@ -112,7 +112,6 @@
/* MRFLD CSI lane configuration related */
#define MRFLD_PORT_CONFIG_NUM 8
-#define MRFLD_PORT_NUM 3
#define MRFLD_PORT1_ENABLE_SHIFT 0
#define MRFLD_PORT2_ENABLE_SHIFT 1
#define MRFLD_PORT3_ENABLE_SHIFT 2
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index aa790ae746f3..e27f9dc8e7aa 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -229,8 +229,8 @@ int atomisp_freq_scaling(struct atomisp_device *isp,
goto done;
}
- curr_rules.width = isp->asd.fmt[isp->asd.capture_pad].fmt.width;
- curr_rules.height = isp->asd.fmt[isp->asd.capture_pad].fmt.height;
+ curr_rules.width = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.width;
+ curr_rules.height = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.height;
curr_rules.fps = fps;
curr_rules.run_mode = isp->asd.run_mode->val;
@@ -472,39 +472,31 @@ irqreturn_t atomisp_isr(int irq, void *dev)
clear_irq_reg(isp);
- if (!atomisp_streaming_count(isp))
+ if (!isp->asd.streaming)
goto out_nowake;
- if (isp->asd.streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
- if (irq_infos & IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
- atomic_inc(&isp->asd.sof_count);
- atomisp_sof_event(&isp->asd);
-
- /* If sequence_temp and sequence are the same
- * there where no frames lost so we can increase
- * sequence_temp.
- * If not then processing of frame is still in progress
- * and driver needs to keep old sequence_temp value.
- * NOTE: There is assumption here that ISP will not
- * start processing next frame from sensor before old
- * one is completely done. */
- if (atomic_read(&isp->asd.sequence) ==
- atomic_read(&isp->asd.sequence_temp))
- atomic_set(&isp->asd.sequence_temp,
- atomic_read(&isp->asd.sof_count));
- }
- if (irq_infos & IA_CSS_IRQ_INFO_EVENTS_READY)
- atomic_set(&isp->asd.sequence,
- atomic_read(&isp->asd.sequence_temp));
- }
-
if (irq_infos & IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
- dev_dbg_ratelimited(isp->dev,
- "irq:0x%x (SOF)\n",
- irq_infos);
+ atomic_inc(&isp->asd.sof_count);
+ atomisp_sof_event(&isp->asd);
+
+ /*
+ * If sequence_temp and sequence are the same there where no frames
+ * lost so we can increase sequence_temp.
+ * If not then processing of frame is still in progress and driver
+ * needs to keep old sequence_temp value.
+ * NOTE: There is assumption here that ISP will not start processing
+ * next frame from sensor before old one is completely done.
+ */
+ if (atomic_read(&isp->asd.sequence) == atomic_read(&isp->asd.sequence_temp))
+ atomic_set(&isp->asd.sequence_temp, atomic_read(&isp->asd.sof_count));
+
+ dev_dbg_ratelimited(isp->dev, "irq:0x%x (SOF)\n", irq_infos);
irq_infos &= ~IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
}
+ if (irq_infos & IA_CSS_IRQ_INFO_EVENTS_READY)
+ atomic_set(&isp->asd.sequence, atomic_read(&isp->asd.sequence_temp));
+
if ((irq_infos & IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) ||
(irq_infos & IA_CSS_IRQ_INFO_IF_ERROR)) {
/* handle mipi receiver error */
@@ -640,15 +632,6 @@ void atomisp_flush_video_pipe(struct atomisp_video_pipe *pipe, enum vb2_buffer_s
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
}
-/* Returns queued buffers back to video-core */
-void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd)
-{
- atomisp_flush_video_pipe(&asd->video_out_capture, VB2_BUF_STATE_ERROR, false);
- atomisp_flush_video_pipe(&asd->video_out_vf, VB2_BUF_STATE_ERROR, false);
- atomisp_flush_video_pipe(&asd->video_out_preview, VB2_BUF_STATE_ERROR, false);
- atomisp_flush_video_pipe(&asd->video_out_video_capture, VB2_BUF_STATE_ERROR, false);
-}
-
/* clean out the parameters that did not apply */
void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe)
{
@@ -944,53 +927,39 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
atomisp_qbuffers_to_css(asd);
}
-static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
+void atomisp_assert_recovery_work(struct work_struct *work)
{
+ struct atomisp_device *isp = container_of(work, struct atomisp_device,
+ assert_recovery_work);
struct pci_dev *pdev = to_pci_dev(isp->dev);
- enum ia_css_pipe_id css_pipe_id;
- bool stream_restart = false;
unsigned long flags;
int ret;
- lockdep_assert_held(&isp->mutex);
+ mutex_lock(&isp->mutex);
- if (!atomisp_streaming_count(isp))
- return;
+ if (!isp->asd.streaming)
+ goto out_unlock;
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
- if (isp->asd.streaming == ATOMISP_DEVICE_STREAMING_ENABLED ||
- isp->asd.stream_prepared) {
- stream_restart = true;
-
- spin_lock_irqsave(&isp->lock, flags);
- isp->asd.streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
- spin_unlock_irqrestore(&isp->lock, flags);
-
- /* stream off sensor */
- ret = v4l2_subdev_call(
- isp->inputs[isp->asd.input_curr].
- camera, video, s_stream, 0);
- if (ret)
- dev_warn(isp->dev,
- "can't stop streaming on sensor!\n");
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->asd.streaming = false;
+ spin_unlock_irqrestore(&isp->lock, flags);
- atomisp_clear_css_buffer_counters(&isp->asd);
+ /* stream off sensor */
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
- css_pipe_id = atomisp_get_css_pipe_id(&isp->asd);
- atomisp_css_stop(&isp->asd, css_pipe_id, true);
+ atomisp_clear_css_buffer_counters(&isp->asd);
- spin_lock_irqsave(&isp->lock, flags);
- isp->asd.streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
- spin_unlock_irqrestore(&isp->lock, flags);
+ atomisp_css_stop(&isp->asd, true);
- isp->asd.preview_exp_id = 1;
- isp->asd.postview_exp_id = 1;
- /* notify HAL the CSS reset */
- dev_dbg(isp->dev,
- "send reset event to %s\n", isp->asd.subdev.devnode->name);
- atomisp_reset_event(&isp->asd);
- }
+ isp->asd.preview_exp_id = 1;
+ isp->asd.postview_exp_id = 1;
+ /* notify HAL the CSS reset */
+ dev_dbg(isp->dev, "send reset event to %s\n", isp->asd.subdev.devnode->name);
+ atomisp_reset_event(&isp->asd);
/* clear irq */
disable_isp_irq(hrt_isp_css_irq_sp);
@@ -1001,71 +970,46 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
/* reset ISP and restore its state */
- isp->isp_timeout = true;
atomisp_reset(isp);
- isp->isp_timeout = false;
- if (stream_restart) {
- atomisp_css_input_set_mode(&isp->asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ atomisp_css_input_set_mode(&isp->asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
- css_pipe_id = atomisp_get_css_pipe_id(&isp->asd);
- if (atomisp_css_start(&isp->asd, css_pipe_id, true)) {
- dev_warn(isp->dev,
- "start SP failed, so do not set streaming to be enable!\n");
- } else {
- spin_lock_irqsave(&isp->lock, flags);
- isp->asd.streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
- spin_unlock_irqrestore(&isp->lock, flags);
- }
+ /* Recreate streams destroyed by atomisp_css_stop() */
+ atomisp_create_pipes_stream(&isp->asd);
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
- atomisp_csi2_configure(&isp->asd);
+ if (atomisp_css_start(&isp->asd)) {
+ dev_warn(isp->dev, "start SP failed, so do not set streaming to be enable!\n");
+ } else {
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->asd.streaming = true;
+ spin_unlock_irqrestore(&isp->lock, flags);
}
+ atomisp_csi2_configure(&isp->asd);
+
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
atomisp_css_valid_sof(isp));
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
dev_dbg(isp->dev, "DFS auto failed while recovering!\n");
- if (stream_restart) {
- /*
- * dequeueing buffers is not needed. CSS will recycle
- * buffers that it has.
- */
- atomisp_flush_bufs_and_wakeup(&isp->asd);
+ /* Dequeueing buffers is not needed, CSS will recycle buffers that it has */
+ atomisp_flush_video_pipe(&isp->asd.video_out, VB2_BUF_STATE_ERROR, false);
- /* Requeue unprocessed per-frame parameters. */
- atomisp_recover_params_queue(&isp->asd.video_out_capture);
- atomisp_recover_params_queue(&isp->asd.video_out_preview);
- atomisp_recover_params_queue(&isp->asd.video_out_video_capture);
-
- ret = v4l2_subdev_call(
- isp->inputs[isp->asd.input_curr].camera, video,
- s_stream, 1);
- if (ret)
- dev_warn(isp->dev,
- "can't start streaming on sensor!\n");
- }
-}
+ /* Requeue unprocessed per-frame parameters. */
+ atomisp_recover_params_queue(&isp->asd.video_out);
-void atomisp_assert_recovery_work(struct work_struct *work)
-{
- struct atomisp_device *isp = container_of(work, struct atomisp_device,
- assert_recovery_work);
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 1);
+ if (ret)
+ dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
- mutex_lock(&isp->mutex);
- __atomisp_css_recover(isp, true);
+out_unlock:
mutex_unlock(&isp->mutex);
}
-void atomisp_css_flush(struct atomisp_device *isp)
-{
- /* Start recover */
- __atomisp_css_recover(isp, false);
-
- dev_dbg(isp->dev, "atomisp css flush done\n");
-}
-
void atomisp_setup_flash(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
@@ -1105,7 +1049,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
spin_lock_irqsave(&isp->lock, flags);
- if (!atomisp_streaming_count(isp)) {
+ if (!isp->asd.streaming) {
spin_unlock_irqrestore(&isp->lock, flags);
return IRQ_HANDLED;
}
@@ -1141,7 +1085,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
if (atomisp_css_isr_thread(isp))
goto out;
- if (isp->asd.streaming == ATOMISP_DEVICE_STREAMING_ENABLED)
+ if (isp->asd.streaming)
atomisp_setup_flash(&isp->asd);
out:
mutex_unlock(&isp->mutex);
@@ -1298,7 +1242,7 @@ static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_ADVANCED);
else if (asd->params.low_light)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_LOW_LIGHT);
- else if (asd->video_out_capture.sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
+ else if (asd->video_out.sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_RAW);
else
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_PRIMARY);
@@ -1553,13 +1497,12 @@ void atomisp_free_internal_buffers(struct atomisp_sub_device *asd)
}
static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id,
- int source_pad)
+ enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
int err;
- if (atomisp_css_get_grid_info(asd, pipe_id, source_pad))
+ if (atomisp_css_get_grid_info(asd, pipe_id))
return;
/* We must free all buffers because they no longer match
@@ -1908,161 +1851,6 @@ int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
return 0;
}
-int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
- struct atomisp_metadata *md)
-{
- struct atomisp_device *isp = asd->isp;
- struct ia_css_stream_info *stream_info;
- struct camera_mipi_info *mipi_info;
- struct atomisp_metadata_buf *md_buf;
- enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA;
- int ret, i;
-
- if (flag != 0)
- return -EINVAL;
-
- stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
- stream_info;
-
- /* We always return the resolution and stride even if there is
- * no valid metadata. This allows the caller to get the information
- * needed to allocate user-space buffers. */
- md->width = stream_info->metadata_info.resolution.width;
- md->height = stream_info->metadata_info.resolution.height;
- md->stride = stream_info->metadata_info.stride;
-
- /* sanity check to avoid writing into unallocated memory.
- * This does not return an error because it is a valid way
- * for applications to detect that metadata is not enabled. */
- if (md->width == 0 || md->height == 0 || !md->data)
- return 0;
-
- /* This is done in the atomisp_buf_done() */
- if (list_empty(&asd->metadata_ready[md_type])) {
- dev_warn(isp->dev, "Metadata queue is empty now!\n");
- return -EAGAIN;
- }
-
- mipi_info = atomisp_to_sensor_mipi_info(
- isp->inputs[asd->input_curr].camera);
- if (!mipi_info)
- return -EINVAL;
-
- if (mipi_info->metadata_effective_width) {
- for (i = 0; i < md->height; i++)
- md->effective_width[i] =
- mipi_info->metadata_effective_width[i];
- }
-
- md_buf = list_entry(asd->metadata_ready[md_type].next,
- struct atomisp_metadata_buf, list);
- md->exp_id = md_buf->metadata->exp_id;
- if (md_buf->md_vptr) {
- ret = copy_to_user(md->data,
- md_buf->md_vptr,
- stream_info->metadata_info.size);
- } else {
- hmm_load(md_buf->metadata->address,
- asd->params.metadata_user[md_type],
- stream_info->metadata_info.size);
-
- ret = copy_to_user(md->data,
- asd->params.metadata_user[md_type],
- stream_info->metadata_info.size);
- }
- if (ret) {
- dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
- ret);
- return -EFAULT;
- }
-
- list_del_init(&md_buf->list);
- list_add_tail(&md_buf->list, &asd->metadata[md_type]);
-
- dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
- __func__, md_type, md->exp_id);
- return 0;
-}
-
-int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
- struct atomisp_metadata_with_type *md)
-{
- struct atomisp_device *isp = asd->isp;
- struct ia_css_stream_info *stream_info;
- struct camera_mipi_info *mipi_info;
- struct atomisp_metadata_buf *md_buf;
- enum atomisp_metadata_type md_type;
- int ret, i;
-
- if (flag != 0)
- return -EINVAL;
-
- stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
- stream_info;
-
- /* We always return the resolution and stride even if there is
- * no valid metadata. This allows the caller to get the information
- * needed to allocate user-space buffers. */
- md->width = stream_info->metadata_info.resolution.width;
- md->height = stream_info->metadata_info.resolution.height;
- md->stride = stream_info->metadata_info.stride;
-
- /* sanity check to avoid writing into unallocated memory.
- * This does not return an error because it is a valid way
- * for applications to detect that metadata is not enabled. */
- if (md->width == 0 || md->height == 0 || !md->data)
- return 0;
-
- md_type = md->type;
- if (md_type < 0 || md_type >= ATOMISP_METADATA_TYPE_NUM)
- return -EINVAL;
-
- /* This is done in the atomisp_buf_done() */
- if (list_empty(&asd->metadata_ready[md_type])) {
- dev_warn(isp->dev, "Metadata queue is empty now!\n");
- return -EAGAIN;
- }
-
- mipi_info = atomisp_to_sensor_mipi_info(
- isp->inputs[asd->input_curr].camera);
- if (!mipi_info)
- return -EINVAL;
-
- if (mipi_info->metadata_effective_width) {
- for (i = 0; i < md->height; i++)
- md->effective_width[i] =
- mipi_info->metadata_effective_width[i];
- }
-
- md_buf = list_entry(asd->metadata_ready[md_type].next,
- struct atomisp_metadata_buf, list);
- md->exp_id = md_buf->metadata->exp_id;
- if (md_buf->md_vptr) {
- ret = copy_to_user(md->data,
- md_buf->md_vptr,
- stream_info->metadata_info.size);
- } else {
- hmm_load(md_buf->metadata->address,
- asd->params.metadata_user[md_type],
- stream_info->metadata_info.size);
-
- ret = copy_to_user(md->data,
- asd->params.metadata_user[md_type],
- stream_info->metadata_info.size);
- }
- if (ret) {
- dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
- ret);
- return -EFAULT;
- } else {
- list_del_init(&md_buf->list);
- list_add_tail(&md_buf->list, &asd->metadata[md_type]);
- }
- dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
- __func__, md_type, md->exp_id);
- return 0;
-}
-
/*
* Function to calculate real zoom region for every pipe
*/
@@ -3221,14 +3009,11 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
lockdep_assert_held(&asd->isp->mutex);
- if (atomisp_is_vf_pipe(pipe))
- return;
-
/*
* CSS/FW requires set parameter and enqueue buffer happen after ISP
* is streamon.
*/
- if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ if (!asd->streaming)
return;
if (list_empty(&pipe->per_frame_params) ||
@@ -3299,15 +3084,7 @@ int atomisp_set_parameters(struct video_device *vdev,
dev_dbg(asd->isp->dev, "set parameter(per_frame_setting %d) isp_config_id %d of %s\n",
arg->per_frame_setting, arg->isp_config_id, vdev->name);
- if (IS_ISP2401) {
- if (atomisp_is_vf_pipe(pipe) && arg->per_frame_setting) {
- dev_err(asd->isp->dev, "%s: vf pipe not support per_frame_setting",
- __func__);
- return -EINVAL;
- }
- }
-
- if (arg->per_frame_setting && !atomisp_is_vf_pipe(pipe)) {
+ if (arg->per_frame_setting) {
/*
* Per-frame setting enabled, we allocate a new parameter
* buffer to cache the parameters and only when frame buffers
@@ -3346,7 +3123,7 @@ int atomisp_set_parameters(struct video_device *vdev,
if (ret)
goto apply_parameter_failed;
- if (!(arg->per_frame_setting && !atomisp_is_vf_pipe(pipe))) {
+ if (!arg->per_frame_setting) {
/* indicate to CSS that we have parameters to be updated */
asd->params.css_update_params_needed = true;
} else {
@@ -3880,66 +3657,202 @@ static void __atomisp_init_stream_info(u16 stream_index,
}
}
+static void atomisp_fill_pix_format(struct v4l2_pix_format *f,
+ u32 width, u32 height,
+ const struct atomisp_format_bridge *br_fmt)
+{
+ u32 bytes;
+
+ f->width = width;
+ f->height = height;
+ f->pixelformat = br_fmt->pixelformat;
+
+ /* Adding padding to width for bytesperline calculation */
+ width = ia_css_frame_pad_width(width, br_fmt->sh_fmt);
+ bytes = BITS_TO_BYTES(br_fmt->depth * width);
+
+ if (br_fmt->planar)
+ f->bytesperline = width;
+ else
+ f->bytesperline = bytes;
+
+ f->sizeimage = PAGE_ALIGN(height * bytes);
+
+ if (f->field == V4L2_FIELD_ANY)
+ f->field = V4L2_FIELD_NONE;
+
+ /*
+ * FIXME: do we need to set this up differently, depending on the
+ * sensor or the pipeline?
+ */
+ f->colorspace = V4L2_COLORSPACE_REC709;
+ f->ycbcr_enc = V4L2_YCBCR_ENC_709;
+ f->xfer_func = V4L2_XFER_FUNC_709;
+}
+
+/* Get sensor padding values for the non padded width x height resolution */
+void atomisp_get_padding(struct atomisp_device *isp, u32 width, u32 height,
+ u32 *padding_w, u32 *padding_h)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_rect native_rect = input->native_rect;
+ const struct atomisp_in_fmt_conv *fc = NULL;
+ u32 min_pad_w = ISP2400_MIN_PAD_W;
+ u32 min_pad_h = ISP2400_MIN_PAD_H;
+ struct v4l2_mbus_framefmt *sink;
+
+ if (!input->crop_support) {
+ *padding_w = pad_w;
+ *padding_h = pad_h;
+ return;
+ }
+
+ width = min(width, input->active_rect.width);
+ height = min(height, input->active_rect.height);
+
+ if (input->binning_support && width <= (input->active_rect.width / 2) &&
+ height <= (input->active_rect.height / 2)) {
+ native_rect.width /= 2;
+ native_rect.height /= 2;
+ }
+
+ *padding_w = min_t(u32, (native_rect.width - width) & ~1, pad_w);
+ *padding_h = min_t(u32, (native_rect.height - height) & ~1, pad_h);
+
+ /* The below minimum padding requirements are for BYT / ISP2400 only */
+ if (IS_ISP2401)
+ return;
+
+ sink = atomisp_subdev_get_ffmt(&isp->asd.subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ if (sink)
+ fc = atomisp_find_in_fmt_conv(sink->code);
+ if (!fc) {
+ dev_warn(isp->dev, "%s: Could not get sensor format\n", __func__);
+ goto apply_min_padding;
+ }
+
+ /*
+ * The ISP only supports GRBG for other bayer-orders additional padding
+ * is used so that the raw sensor data can be cropped to fix the order.
+ */
+ if (fc->bayer_order == IA_CSS_BAYER_ORDER_RGGB ||
+ fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
+ min_pad_w += 2;
+
+ if (fc->bayer_order == IA_CSS_BAYER_ORDER_BGGR ||
+ fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
+ min_pad_h += 2;
+
+apply_min_padding:
+ *padding_w = max_t(u32, *padding_w, min_pad_w);
+ *padding_h = max_t(u32, *padding_h, min_pad_h);
+}
+
+static int atomisp_set_crop(struct atomisp_device *isp,
+ const struct v4l2_mbus_framefmt *format,
+ int which)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_subdev_state pad_state = {
+ .pads = &input->pad_cfg,
+ };
+ struct v4l2_subdev_selection sel = {
+ .which = which,
+ .target = V4L2_SEL_TGT_CROP,
+ .r.width = format->width,
+ .r.height = format->height,
+ };
+ int ret;
+
+ if (!input->crop_support)
+ return 0;
+
+ /* Cropping is done before binning, when binning double the crop rect */
+ if (input->binning_support && sel.r.width <= (input->native_rect.width / 2) &&
+ sel.r.height <= (input->native_rect.height / 2)) {
+ sel.r.width *= 2;
+ sel.r.height *= 2;
+ }
+
+ /* Clamp to avoid top/left calculations overflowing */
+ sel.r.width = min(sel.r.width, input->native_rect.width);
+ sel.r.height = min(sel.r.height, input->native_rect.height);
+
+ sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
+ sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
+
+ ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
+ if (ret)
+ dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
+ sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
+
+ return ret;
+}
+
/* This function looks up the closest available resolution. */
-int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
- bool *res_overflow)
+int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
+ const struct atomisp_format_bridge **fmt_ret,
+ const struct atomisp_format_bridge **snr_fmt_ret)
{
- struct atomisp_device *isp = video_get_drvdata(vdev);
- struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
- struct v4l2_subdev_pad_config pad_cfg;
+ const struct atomisp_format_bridge *fmt, *snr_fmt;
+ struct atomisp_sub_device *asd = &isp->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_state pad_state = {
- .pads = &pad_cfg,
+ .pads = &input->pad_cfg,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
- const struct atomisp_format_bridge *fmt;
+ u32 padding_w, padding_h;
int ret;
- if (!asd) {
- dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, vdev->name);
- return -EINVAL;
- }
-
- if (!isp->inputs[asd->input_curr].camera)
+ if (!input->camera)
return -EINVAL;
fmt = atomisp_get_format_bridge(f->pixelformat);
- if (!fmt) {
- dev_err(isp->dev, "unsupported pixelformat!\n");
- fmt = atomisp_output_fmts;
- }
-
- if (f->width <= 0 || f->height <= 0)
- return -EINVAL;
+ /* Currently, raw formats are broken!!! */
+ if (!fmt || fmt->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
+ f->pixelformat = V4L2_PIX_FMT_YUV420;
- format.format.code = fmt->mbus_code;
- format.format.width = f->width;
- format.format.height = f->height;
+ fmt = atomisp_get_format_bridge(f->pixelformat);
+ if (!fmt)
+ return -EINVAL;
+ }
- __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL,
- (struct atomisp_input_stream_info *)format.format.reserved);
+ /*
+ * atomisp_set_fmt() will set the sensor resolution to the requested
+ * resolution + padding. Add padding here and remove it again after
+ * the set_fmt call, like atomisp_set_fmt_to_snr() does.
+ */
+ atomisp_get_padding(isp, f->width, f->height, &padding_w, &padding_h);
+ v4l2_fill_mbus_format(&format.format, f, fmt->mbus_code);
+ format.format.width += padding_w;
+ format.format.height += padding_h;
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_state, &format);
+ ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n",
format.format.width, format.format.height);
- fmt = atomisp_get_format_bridge_from_mbus(format.format.code);
- if (!fmt) {
+ snr_fmt = atomisp_get_format_bridge_from_mbus(format.format.code);
+ if (!snr_fmt) {
dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
format.format.code);
return -EINVAL;
}
- f->pixelformat = fmt->pixelformat;
+ f->width = format.format.width - padding_w;
+ f->height = format.format.height - padding_h;
/*
* If the format is jpeg or custom RAW, then the width and height will
@@ -3948,22 +3861,8 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
* the sensor driver.
*/
if (f->pixelformat == V4L2_PIX_FMT_JPEG ||
- f->pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) {
- f->width = format.format.width;
- f->height = format.format.height;
- return 0;
- }
-
- if (!res_overflow || (format.format.width < f->width &&
- format.format.height < f->height)) {
- f->width = format.format.width;
- f->height = format.format.height;
- /* Set the flag when resolution requested is
- * beyond the max value supported by sensor
- */
- if (res_overflow)
- *res_overflow = true;
- }
+ f->pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW)
+ goto out_fill_pix_format;
/* app vs isp */
f->width = rounddown(clamp_t(u32, f->width, ATOM_ISP_MIN_WIDTH,
@@ -3971,11 +3870,20 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
f->height = rounddown(clamp_t(u32, f->height, ATOM_ISP_MIN_HEIGHT,
ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+out_fill_pix_format:
+ atomisp_fill_pix_format(f, f->width, f->height, fmt);
+
+ if (fmt_ret)
+ *fmt_ret = fmt;
+
+ if (snr_fmt_ret)
+ *snr_fmt_ret = snr_fmt;
+
return 0;
}
-enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
- enum atomisp_camera_port port)
+enum mipi_port_id atomisp_port_to_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port)
{
switch (port) {
case ATOMISP_CAMERA_PORT_PRIMARY:
@@ -3983,9 +3891,7 @@ enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
case ATOMISP_CAMERA_PORT_SECONDARY:
return MIPI_PORT1_ID;
case ATOMISP_CAMERA_PORT_TERTIARY:
- if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID)
- return MIPI_PORT1_ID + 1;
- fallthrough;
+ return MIPI_PORT2_ID;
default:
dev_err(isp->dev, "unsupported port: %d\n", port);
return MIPI_PORT0_ID;
@@ -3999,13 +3905,15 @@ static inline int atomisp_set_sensor_mipi_to_isp(
{
struct v4l2_control ctrl;
struct atomisp_device *isp = asd->isp;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_in_fmt_conv *fc;
int mipi_freq = 0;
unsigned int input_format, bayer_order;
+ enum atomisp_input_format metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ u32 mipi_port, metadata_width = 0, metadata_height = 0;
ctrl.id = V4L2_CID_LINK_FREQ;
- if (v4l2_g_ctrl
- (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ if (v4l2_g_ctrl(input->camera->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
if (asd->stream_env[stream_id].isys_configs == 1) {
@@ -4029,7 +3937,7 @@ static inline int atomisp_set_sensor_mipi_to_isp(
/* Compatibility for sensors which provide no media bus code
* in s_mbus_framefmt() nor support pad formats. */
- if (mipi_info->input_format != -1) {
+ if (mipi_info && mipi_info->input_format != -1) {
bayer_order = mipi_info->raw_bayer_order;
/* Input stream config is still needs configured */
@@ -4039,6 +3947,9 @@ static inline int atomisp_set_sensor_mipi_to_isp(
if (!fc)
return -EINVAL;
input_format = fc->atomisp_in_fmt;
+ metadata_format = mipi_info->metadata_format;
+ metadata_width = mipi_info->metadata_width;
+ metadata_height = mipi_info->metadata_height;
} else {
struct v4l2_mbus_framefmt *sink;
@@ -4055,18 +3966,17 @@ static inline int atomisp_set_sensor_mipi_to_isp(
atomisp_css_input_set_format(asd, stream_id, input_format);
atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order);
- fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
- mipi_info->metadata_format);
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(metadata_format);
if (!fc)
return -EINVAL;
+
input_format = fc->atomisp_in_fmt;
- atomisp_css_input_configure_port(asd,
- __get_mipi_port(asd->isp, mipi_info->port),
- mipi_info->num_lanes,
+ mipi_port = atomisp_port_to_mipi_port(isp, input->port);
+ atomisp_css_input_configure_port(asd, mipi_port,
+ isp->sensor_lanes[mipi_port],
0xffff4, mipi_freq,
input_format,
- mipi_info->metadata_width,
- mipi_info->metadata_height);
+ metadata_width, metadata_height);
return 0;
}
@@ -4134,16 +4044,15 @@ static int css_input_resolution_changed(struct atomisp_sub_device *asd,
static int atomisp_set_fmt_to_isp(struct video_device *vdev,
struct ia_css_frame_info *output_info,
- struct v4l2_pix_format *pix,
- unsigned int source_pad)
+ const struct v4l2_pix_format *pix)
{
struct camera_mipi_info *mipi_info;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
struct v4l2_rect *isp_sink_crop;
enum ia_css_pipe_id pipe_id;
- struct v4l2_subdev_fh fh;
int (*configure_output)(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
@@ -4155,7 +4064,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
int (*configure_pp_input)(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height) =
configure_pp_input_nop;
- const struct atomisp_in_fmt_conv *fc;
+ const struct atomisp_in_fmt_conv *fc = NULL;
int ret, i;
if (!asd) {
@@ -4164,8 +4073,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
return -EINVAL;
}
- v4l2_fh_init(&fh.vfh, vdev);
-
isp_sink_crop = atomisp_subdev_get_rect(
&asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
@@ -4174,18 +4081,16 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
if (!format)
return -EINVAL;
- if (isp->inputs[asd->input_curr].type != TEST_PATTERN) {
- mipi_info = atomisp_to_sensor_mipi_info(
- isp->inputs[asd->input_curr].camera);
- if (!mipi_info) {
- dev_err(isp->dev, "mipi_info is NULL\n");
- return -EINVAL;
- }
+ if (input->type != TEST_PATTERN) {
+ mipi_info = atomisp_to_sensor_mipi_info(input->camera);
+
if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL,
mipi_info))
return -EINVAL;
- fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
- mipi_info->input_format);
+
+ if (mipi_info)
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(mipi_info->input_format);
+
if (!fc)
fc = atomisp_find_in_fmt_conv(
atomisp_subdev_get_ffmt(&asd->subdev,
@@ -4204,49 +4109,24 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
* CSS still requires viewfinder configuration.
*/
{
- struct v4l2_rect vf_size = {0};
- struct v4l2_mbus_framefmt vf_ffmt = {0};
+ u32 width, height;
if (pix->width < 640 || pix->height < 480) {
- vf_size.width = pix->width;
- vf_size.height = pix->height;
+ width = pix->width;
+ height = pix->height;
} else {
- vf_size.width = 640;
- vf_size.height = 480;
+ width = 640;
+ height = 480;
}
- /* FIXME: proper format name for this one. See
- atomisp_output_fmts[] in atomisp_v4l2.c */
- vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420;
-
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SOURCE_VF,
- V4L2_SEL_TGT_COMPOSE, 0, &vf_size);
- atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt);
- asd->video_out_vf.sh_fmt = IA_CSS_FRAME_FORMAT_NV12;
-
- if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
- atomisp_css_video_configure_viewfinder(asd,
- vf_size.width, vf_size.height, 0,
- asd->video_out_vf.sh_fmt);
- } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
- source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO)
- atomisp_css_video_configure_viewfinder(asd,
- vf_size.width, vf_size.height, 0,
- asd->video_out_vf.sh_fmt);
- else
- atomisp_css_capture_configure_viewfinder(asd,
- vf_size.width, vf_size.height, 0,
- asd->video_out_vf.sh_fmt);
- } else if (source_pad != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ atomisp_css_video_configure_viewfinder(asd, width, height, 0,
+ IA_CSS_FRAME_FORMAT_NV12);
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE ||
asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
- atomisp_css_capture_configure_viewfinder(asd,
- vf_size.width, vf_size.height, 0,
- asd->video_out_vf.sh_fmt);
+ atomisp_css_capture_configure_viewfinder(asd, width, height, 0,
+ IA_CSS_FRAME_FORMAT_NV12);
}
}
@@ -4268,7 +4148,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
configure_output = atomisp_css_video_configure_output;
get_frame_info = atomisp_css_video_get_output_frame_info;
pipe_id = IA_CSS_PIPE_ID_VIDEO;
- } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
configure_output = atomisp_css_preview_configure_output;
get_frame_info = atomisp_css_preview_get_output_frame_info;
configure_pp_input = atomisp_css_preview_configure_pp_input;
@@ -4333,7 +4213,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
return ret;
}
- atomisp_update_grid_info(asd, pipe_id, source_pad);
+ atomisp_update_grid_info(asd, pipe_id);
return 0;
}
@@ -4357,7 +4237,7 @@ static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
}
static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
- int source_pad, const struct v4l2_pix_format *f)
+ const struct v4l2_pix_format *f)
{
struct v4l2_mbus_framefmt *sink, *src;
@@ -4370,7 +4250,7 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
- V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
if (sink->code == src->code && sink->width == f->width && sink->height == f->height)
asd->copy_mode = true;
@@ -4381,26 +4261,23 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
}
static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_pix_format *f,
- unsigned int padding_w, unsigned int padding_h,
unsigned int dvs_env_w, unsigned int dvs_env_h)
{
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_state pad_state = {
- .pads = &pad_cfg,
+ .pads = &input->pad_cfg,
};
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_mbus_framefmt *ffmt = &vformat.format;
struct v4l2_mbus_framefmt *req_ffmt;
- struct atomisp_device *isp;
struct atomisp_input_stream_info *stream_info =
(struct atomisp_input_stream_info *)ffmt->reserved;
- int source_pad = atomisp_subdev_source_pad(vdev);
- struct v4l2_subdev_fh fh;
int ret;
if (!asd) {
@@ -4409,20 +4286,16 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
return -EINVAL;
}
- isp = asd->isp;
-
- v4l2_fh_init(&fh.vfh, vdev);
-
format = atomisp_get_format_bridge(f->pixelformat);
if (!format)
return -EINVAL;
v4l2_fill_mbus_format(ffmt, f, format->mbus_code);
- ffmt->height += padding_h + dvs_env_h;
- ffmt->width += padding_w + dvs_env_w;
+ ffmt->height += asd->sink_pad_padding_h + dvs_env_h;
+ ffmt->width += asd->sink_pad_padding_w + dvs_env_w;
dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
- ffmt->width, ffmt->height, padding_w, padding_h,
+ ffmt->width, ffmt->height, asd->sink_pad_padding_w, asd->sink_pad_padding_h,
dvs_env_w, dvs_env_h);
__atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
@@ -4430,11 +4303,13 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
req_ffmt = ffmt;
/* Disable dvs if resolution can't be supported by sensor */
- if (asd->params.video_dis_en &&
- source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
+ if (ret)
+ return ret;
+
vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_state, &vformat);
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
@@ -4451,9 +4326,13 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
asd->params.video_dis_en = false;
}
}
+
+ ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret)
+ return ret;
+
vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
- set_fmt, NULL, &vformat);
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
@@ -4466,15 +4345,14 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
ffmt->height < ATOM_ISP_STEP_HEIGHT)
return -EINVAL;
- if (asd->params.video_dis_en &&
- source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
+ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
(ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) {
dev_warn(isp->dev,
"can not enable video dis due to sensor limitation.");
asd->params.video_dis_en = false;
}
- atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
+ atomisp_subdev_set_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, ffmt);
@@ -4490,65 +4368,25 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
const struct atomisp_format_bridge *snr_format_bridge;
struct ia_css_frame_info output_info;
unsigned int dvs_env_w = 0, dvs_env_h = 0;
- unsigned int padding_w = pad_w, padding_h = pad_h;
struct v4l2_mbus_framefmt isp_source_fmt = {0};
- struct v4l2_subdev_format vformat = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
struct v4l2_rect isp_sink_crop;
- u16 source_pad = atomisp_subdev_source_pad(vdev);
- struct v4l2_subdev_fh fh;
int ret;
ret = atomisp_pipe_check(pipe, true);
if (ret)
return ret;
- if (source_pad >= ATOMISP_SUBDEV_PADS_NUM)
- return -EINVAL;
-
dev_dbg(isp->dev,
- "setting resolution %ux%u on pad %u bytesperline %u\n",
- f->fmt.pix.width, f->fmt.pix.height, source_pad, f->fmt.pix.bytesperline);
-
- v4l2_fh_init(&fh.vfh, vdev);
-
- format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
- if (!format_bridge)
- return -EINVAL;
-
- /* Currently, raw formats are broken!!! */
-
- if (format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
-
- format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
- if (!format_bridge)
- return -EINVAL;
- }
- pipe->sh_fmt = format_bridge->sh_fmt;
- pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+ "setting resolution %ux%u bytesperline %u\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline);
/* Ensure that the resolution is equal or below the maximum supported */
-
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_fill_mbus_format(&vformat.format, &f->fmt.pix, format_bridge->mbus_code);
- vformat.format.height += padding_h;
- vformat.format.width += padding_w;
-
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
- set_fmt, NULL, &vformat);
+ ret = atomisp_try_fmt(isp, &f->fmt.pix, &format_bridge, &snr_format_bridge);
if (ret)
return ret;
- f->fmt.pix.width = vformat.format.width - padding_w;
- f->fmt.pix.height = vformat.format.height - padding_h;
-
- snr_format_bridge = atomisp_get_format_bridge_from_mbus(vformat.format.code);
- if (!snr_format_bridge) {
- dev_warn(isp->dev, "Can't find bridge format\n");
- return -EINVAL;
- }
+ pipe->sh_fmt = format_bridge->sh_fmt;
+ pipe->pix.pixelformat = format_bridge->pixelformat;
atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -4556,22 +4394,22 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
snr_format_bridge->mbus_code;
isp_source_fmt.code = format_bridge->mbus_code;
- atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
+ atomisp_subdev_set_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
- source_pad, &isp_source_fmt);
+ ATOMISP_SUBDEV_PAD_SOURCE, &isp_source_fmt);
- if (!atomisp_subdev_format_conversion(asd, source_pad)) {
- padding_w = 0;
- padding_h = 0;
+ if (atomisp_subdev_format_conversion(asd)) {
+ atomisp_get_padding(isp, f->fmt.pix.width, f->fmt.pix.height,
+ &asd->sink_pad_padding_w, &asd->sink_pad_padding_h);
+ } else {
+ asd->sink_pad_padding_w = 0;
+ asd->sink_pad_padding_h = 0;
}
atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height,
&dvs_env_w, &dvs_env_h);
- asd->capture_pad = source_pad;
-
- ret = atomisp_set_fmt_to_snr(vdev, &f->fmt.pix,
- padding_w, padding_h, dvs_env_w, dvs_env_h);
+ ret = atomisp_set_fmt_to_snr(vdev, &f->fmt.pix, dvs_env_w, dvs_env_h);
if (ret) {
dev_warn(isp->dev,
"Set format to sensor failed with %d\n", ret);
@@ -4580,7 +4418,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
atomisp_csi_lane_config(isp);
- atomisp_check_copy_mode(asd, source_pad, &f->fmt.pix);
+ atomisp_check_copy_mode(asd, &f->fmt.pix);
isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -4589,25 +4427,20 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
/* Try to enable YUV downscaling if ISP input is 10 % (either
* width or height) bigger than the desired result. */
- if (isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
+ if (!IS_MOFD ||
+ isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
isp_sink_crop.height * 9 / 10 < f->fmt.pix.height ||
- (atomisp_subdev_format_conversion(asd, source_pad) &&
+ (atomisp_subdev_format_conversion(asd) &&
(asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) {
isp_sink_crop.width = f->fmt.pix.width;
isp_sink_crop.height = f->fmt.pix.height;
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
+ atomisp_subdev_set_selection(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SINK,
- V4L2_SEL_TGT_CROP,
- V4L2_SEL_FLAG_KEEP_CONFIG,
- &isp_sink_crop);
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- source_pad, V4L2_SEL_TGT_COMPOSE,
+ ATOMISP_SUBDEV_PAD_SOURCE, V4L2_SEL_TGT_COMPOSE,
0, &isp_sink_crop);
- } else if (IS_MOFD) {
+ } else {
struct v4l2_rect main_compose = {0};
main_compose.width = isp_sink_crop.width;
@@ -4622,104 +4455,25 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
f->fmt.pix.height);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
+ atomisp_subdev_set_selection(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
- source_pad,
- V4L2_SEL_TGT_COMPOSE, 0,
- &main_compose);
- } else {
- struct v4l2_rect sink_crop = {0};
- struct v4l2_rect main_compose = {0};
-
- main_compose.width = f->fmt.pix.width;
- main_compose.height = f->fmt.pix.height;
-
- /* WORKAROUND: this override is universally enabled in
- * GMIN to work around a CTS failures (GMINL-539)
- * which appears to be related by a hardware
- * performance limitation. It's unclear why this
- * particular code triggers the issue. */
- if (isp_sink_crop.width * main_compose.height >
- isp_sink_crop.height * main_compose.width) {
- sink_crop.height = isp_sink_crop.height;
- sink_crop.width =
- DIV_NEAREST_STEP(sink_crop.height * f->fmt.pix.width,
- f->fmt.pix.height,
- ATOM_ISP_STEP_WIDTH);
- } else {
- sink_crop.width = isp_sink_crop.width;
- sink_crop.height =
- DIV_NEAREST_STEP(sink_crop.width * f->fmt.pix.height,
- f->fmt.pix.width,
- ATOM_ISP_STEP_HEIGHT);
- }
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SINK,
- V4L2_SEL_TGT_CROP,
- V4L2_SEL_FLAG_KEEP_CONFIG,
- &sink_crop);
-
- atomisp_subdev_set_selection(&asd->subdev, fh.state,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- source_pad,
+ ATOMISP_SUBDEV_PAD_SOURCE,
V4L2_SEL_TGT_COMPOSE, 0,
&main_compose);
}
- ret = atomisp_set_fmt_to_isp(vdev, &output_info, &f->fmt.pix, source_pad);
+ ret = atomisp_set_fmt_to_isp(vdev, &output_info, &f->fmt.pix);
if (ret) {
dev_warn(isp->dev, "Can't set format on ISP. Error %d\n", ret);
return -EINVAL;
}
- pipe->pix.width = f->fmt.pix.width;
- pipe->pix.height = f->fmt.pix.height;
- pipe->pix.pixelformat = f->fmt.pix.pixelformat;
- /*
- * FIXME: do we need to setup this differently, depending on the
- * sensor or the pipeline?
- */
- pipe->pix.colorspace = V4L2_COLORSPACE_REC709;
- pipe->pix.ycbcr_enc = V4L2_YCBCR_ENC_709;
- pipe->pix.xfer_func = V4L2_XFER_FUNC_709;
-
- if (format_bridge->planar) {
- pipe->pix.bytesperline = output_info.padded_width;
- pipe->pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
- DIV_ROUND_UP(format_bridge->depth *
- output_info.padded_width, 8));
- } else {
- pipe->pix.bytesperline =
- DIV_ROUND_UP(format_bridge->depth *
- output_info.padded_width, 8);
- pipe->pix.sizeimage =
- PAGE_ALIGN(f->fmt.pix.height * pipe->pix.bytesperline);
- }
- dev_dbg(isp->dev, "%s: image size: %d, %d bytes per line\n",
- __func__, pipe->pix.sizeimage, pipe->pix.bytesperline);
-
- if (f->fmt.pix.field == V4L2_FIELD_ANY)
- f->fmt.pix.field = V4L2_FIELD_NONE;
- pipe->pix.field = f->fmt.pix.field;
+ atomisp_fill_pix_format(&pipe->pix, f->fmt.pix.width, f->fmt.pix.height, format_bridge);
f->fmt.pix = pipe->pix;
f->fmt.pix.priv = PAGE_ALIGN(pipe->pix.width *
pipe->pix.height * 2);
- /*
- * If in video 480P case, no GFX throttle
- */
- if (asd->run_mode->val == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
- f->fmt.pix.width == 720 && f->fmt.pix.height == 480)
- isp->need_gfx_throttle = false;
- else
- isp->need_gfx_throttle = true;
-
- /* Report the needed sizes */
- f->fmt.pix.sizeimage = pipe->pix.sizeimage;
- f->fmt.pix.bytesperline = pipe->pix.bytesperline;
-
dev_dbg(isp->dev, "%s: %dx%d, image size: %d, %d bytes per line\n",
__func__,
f->fmt.pix.width, f->fmt.pix.height,
@@ -4790,30 +4544,6 @@ out:
return ret;
}
-/*
- * set auto exposure metering window to camera sensor
- */
-int atomisp_s_ae_window(struct atomisp_sub_device *asd,
- struct atomisp_ae_window *arg)
-{
- struct atomisp_device *isp = asd->isp;
- /* Coverity CID 298071 - initialzize struct */
- struct v4l2_subdev_selection sel = { 0 };
-
- sel.r.left = arg->x_left;
- sel.r.top = arg->y_top;
- sel.r.width = arg->x_right - arg->x_left + 1;
- sel.r.height = arg->y_bottom - arg->y_top + 1;
-
- if (v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_selection, NULL, &sel)) {
- dev_err(isp->dev, "failed to call sensor set_selection.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
{
struct atomisp_device *isp = asd->isp;
@@ -4836,26 +4566,6 @@ int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
return 0;
}
-bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe)
-{
- struct atomisp_sub_device *asd = pipe->asd;
-
- if (!asd) {
- dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, pipe->vdev.name);
- return false;
- }
-
- if (pipe == &asd->video_out_vf)
- return true;
-
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
- pipe == &asd->video_out_preview)
- return true;
-
- return false;
-}
-
static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
{
struct atomisp_device *isp = asd->isp;
@@ -4864,7 +4574,7 @@ static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__);
return -EINVAL;
}
- if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
+ if (!asd->streaming) {
dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n",
__func__, exp_id, asd->streaming);
return -EINVAL;
@@ -4986,7 +4696,7 @@ int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
{
- if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ if (!event || !asd->streaming)
return -EINVAL;
lockdep_assert_held(&asd->isp->mutex);
@@ -5013,69 +4723,3 @@ int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
return 0;
}
-
-static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe)
-{
- struct atomisp_sub_device *asd = pipe->asd;
-
- if (!asd) {
- dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, pipe->vdev.name);
- return -EINVAL;
- }
-
- if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
- return IA_CSS_PIPE_ID_VIDEO;
- } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
- return IA_CSS_PIPE_ID_CAPTURE;
- } else if (pipe == &asd->video_out_video_capture) {
- return IA_CSS_PIPE_ID_VIDEO;
- } else if (pipe == &asd->video_out_vf) {
- return IA_CSS_PIPE_ID_CAPTURE;
- } else if (pipe == &asd->video_out_preview) {
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
- return IA_CSS_PIPE_ID_VIDEO;
- else
- return IA_CSS_PIPE_ID_PREVIEW;
- } else if (pipe == &asd->video_out_capture) {
- if (asd->copy_mode)
- return IA_CSS_PIPE_ID_COPY;
- else
- return IA_CSS_PIPE_ID_CAPTURE;
- }
-
- /* fail through */
- dev_warn(asd->isp->dev, "%s failed to find proper pipe\n",
- __func__);
- return IA_CSS_PIPE_ID_CAPTURE;
-}
-
-int atomisp_get_invalid_frame_num(struct video_device *vdev,
- int *invalid_frame_num)
-{
- struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
- struct atomisp_sub_device *asd = pipe->asd;
- enum ia_css_pipe_id pipe_id;
- struct ia_css_pipe_info p_info;
- int ret;
-
- pipe_id = atomisp_get_pipe_id(pipe);
- if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) {
- dev_warn(asd->isp->dev,
- "%s pipe %d has not been created yet, do SET_FMT first!\n",
- __func__, pipe_id);
- return -EINVAL;
- }
-
- ret = ia_css_pipe_get_info(
- asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
- .pipes[pipe_id], &p_info);
- if (!ret) {
- *invalid_frame_num = p_info.num_invalid_frames;
- return 0;
- } else {
- dev_warn(asd->isp->dev, "%s get pipe infor failed %d\n",
- __func__, ret);
- return -EINVAL;
- }
-}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
index 399b549bcf83..8305161d2062 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -59,7 +59,6 @@ int atomisp_buffers_in_css(struct atomisp_video_pipe *pipe);
void atomisp_buffer_done(struct ia_css_frame *frame, enum vb2_buffer_state state);
void atomisp_flush_video_pipe(struct atomisp_video_pipe *pipe, enum vb2_buffer_state state,
bool warn_on_css_frames);
-void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd);
void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd);
/* Interrupt functions */
@@ -160,13 +159,6 @@ int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
struct atomisp_3a_statistics *config);
-/* Function to get metadata from isp */
-int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
- struct atomisp_metadata *config);
-
-int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
- struct atomisp_metadata_with_type *config);
-
int atomisp_set_parameters(struct video_device *vdev,
struct atomisp_parameters *arg);
@@ -258,9 +250,14 @@ int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
int atomisp_compare_grid(struct atomisp_sub_device *asd,
struct atomisp_grid_info *atomgrid);
+/* Get sensor padding values for the non padded width x height resolution */
+void atomisp_get_padding(struct atomisp_device *isp, u32 width, u32 height,
+ u32 *padding_w, u32 *padding_h);
+
/* This function looks up the closest available resolution. */
-int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
- bool *res_overflow);
+int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
+ const struct atomisp_format_bridge **fmt_ret,
+ const struct atomisp_format_bridge **snr_fmt_ret);
int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f);
@@ -269,9 +266,6 @@ int atomisp_set_shading_table(struct atomisp_sub_device *asd,
void atomisp_free_internal_buffers(struct atomisp_sub_device *asd);
-int atomisp_s_ae_window(struct atomisp_sub_device *asd,
- struct atomisp_ae_window *arg);
-
int atomisp_flash_enable(struct atomisp_sub_device *asd,
int num_frames);
@@ -284,15 +278,11 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
enum ia_css_pipe_id css_pipe_id,
bool q_buffers, enum atomisp_input_stream_id stream_id);
-void atomisp_css_flush(struct atomisp_device *isp);
-
/* Events. Only one event has to be exported for now. */
void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
-enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
- enum atomisp_camera_port port);
-
-bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe);
+enum mipi_port_id atomisp_port_to_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port);
void atomisp_apply_css_parameters(
struct atomisp_sub_device *asd,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp_common.h
index 07c38e487a66..9d23a6ccfc33 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_common.h
@@ -37,6 +37,10 @@ extern int mipicsi_flag;
extern int pad_w;
extern int pad_h;
+/* Minimum padding requirements for ISP2400 (BYT) */
+#define ISP2400_MIN_PAD_W 12
+#define ISP2400_MIN_PAD_H 12
+
#define CSS_DTRACE_VERBOSITY_LEVEL 5 /* Controls trace verbosity */
#define CSS_DTRACE_VERBOSITY_TIMEOUT 9 /* Verbosity on ISP timeout */
#define MRFLD_MAX_ZOOM_FACTOR 1024
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
index 218e8ac276c8..e9e4bfb0f5f9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -78,8 +78,7 @@ int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
void ia_css_mmu_invalidate_cache(void);
-int atomisp_css_start(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id, bool in_reset);
+int atomisp_css_start(struct atomisp_sub_device *asd);
void atomisp_css_update_isp_params(struct atomisp_sub_device *asd);
void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
@@ -113,8 +112,7 @@ void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf
*metadata_buf);
int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id,
- int source_pad);
+ enum ia_css_pipe_id pipe_id);
int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd);
@@ -151,10 +149,6 @@ int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
struct v4l2_mbus_framefmt *ffmt);
-int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
- enum atomisp_input_stream_id stream_id,
- enum atomisp_input_format input_format);
-
void atomisp_css_isys_two_stream_cfg_update_stream1(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
@@ -210,15 +204,6 @@ void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
unsigned short stream_index, bool enable);
-void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
- bool enable);
-
-void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
- bool enable);
-
-void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
- bool enable);
-
int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
enum mipi_port_id port,
unsigned int num_lanes,
@@ -229,10 +214,9 @@ int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
unsigned int metadata_height);
int atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
-void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd);
+void atomisp_destroy_pipes_stream(struct atomisp_sub_device *asd);
-void atomisp_css_stop(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id, bool in_reset);
+void atomisp_css_stop(struct atomisp_sub_device *asd, bool in_reset);
void atomisp_css_continuous_set_num_raw_frames(
struct atomisp_sub_device *asd,
@@ -244,22 +228,6 @@ int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
unsigned int padded_width,
enum ia_css_frame_format format);
-int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
- unsigned int stream_index,
- unsigned int width, unsigned int height,
- unsigned int padded_width,
- enum ia_css_frame_format format);
-
-int atomisp_css_yuvpp_get_output_frame_info(
- struct atomisp_sub_device *asd,
- unsigned int stream_index,
- struct ia_css_frame_info *info);
-
-int atomisp_css_yuvpp_get_viewfinder_frame_info(
- struct atomisp_sub_device *asd,
- unsigned int stream_index,
- struct ia_css_frame_info *info);
-
int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
@@ -276,7 +244,6 @@ int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
enum ia_css_frame_format format);
int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
- u16 source_pad,
struct ia_css_frame_info *frame_info);
int atomisp_css_video_configure_viewfinder(struct atomisp_sub_device *asd,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 1dae2a7cfdd9..b13d1cb4668d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -548,7 +548,7 @@ static int __destroy_pipes(struct atomisp_sub_device *asd)
return 0;
}
-void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd)
+void atomisp_destroy_pipes_stream(struct atomisp_sub_device *asd)
{
if (__destroy_streams(asd))
dev_warn(asd->isp->dev, "destroy stream failed.\n");
@@ -650,23 +650,11 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
return true;
return false;
- case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE:
- if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
- pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- return true;
-
- return false;
case ATOMISP_RUN_MODE_VIDEO:
if (pipe_id == IA_CSS_PIPE_ID_VIDEO || pipe_id == IA_CSS_PIPE_ID_YUVPP)
return true;
return false;
- case ATOMISP_RUN_MODE_SDV:
- if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
- pipe_id == IA_CSS_PIPE_ID_VIDEO)
- return true;
-
- return false;
}
return false;
@@ -758,7 +746,7 @@ int atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
int atomisp_css_update_stream(struct atomisp_sub_device *asd)
{
- atomisp_destroy_pipes_stream_force(asd);
+ atomisp_destroy_pipes_stream(asd);
return atomisp_create_pipes_stream(asd);
}
@@ -997,57 +985,22 @@ int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_css_start(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id, bool in_reset)
+int atomisp_css_start(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
bool sp_is_started = false;
int ret = 0, i = 0;
- if (in_reset) {
- ret = atomisp_css_update_stream(asd);
- if (ret)
- return ret;
+ if (!sh_css_hrt_system_is_idle())
+ dev_err(isp->dev, "CSS HW not idle before starting SP\n");
- /* Invalidate caches. FIXME: should flush only necessary buffers */
- wbinvd();
+ if (ia_css_start_sp()) {
+ dev_err(isp->dev, "start sp error.\n");
+ ret = -EINVAL;
+ goto start_err;
}
- /*
- * For dual steam case, it is possible that:
- * 1: for this stream, it is at the stage that:
- * - after set_fmt is called
- * - before stream on is called
- * 2: for the other stream, the stream off is called which css reset
- * has been done.
- *
- * Thus the stream created in set_fmt get destroyed and need to be
- * recreated in the next stream on.
- */
- if (!asd->stream_prepared) {
- ret = atomisp_create_pipes_stream(asd);
- if (ret)
- return ret;
- }
- /*
- * SP can only be started one time
- * if atomisp_subdev_streaming_count() tell there already has some
- * subdev at streamming, then SP should already be started previously,
- * so need to skip start sp procedure
- */
- if (atomisp_streaming_count(isp)) {
- dev_dbg(isp->dev, "skip start sp\n");
- } else {
- if (!sh_css_hrt_system_is_idle())
- dev_err(isp->dev, "CSS HW not idle before starting SP\n");
- if (ia_css_start_sp()) {
- dev_err(isp->dev, "start sp error.\n");
- ret = -EINVAL;
- goto start_err;
- } else {
- sp_is_started = true;
- }
- }
+ sp_is_started = true;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
if (asd->stream_env[i].stream) {
@@ -1066,16 +1019,15 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
return 0;
start_err:
- atomisp_destroy_pipes_stream_force(asd);
-
- /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
- * destroy all pipes
- */
/*
- * SP can not be stop if other streams are in use
+ * CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
+ * destroying all pipes.
*/
- if ((atomisp_streaming_count(isp) == 0) && sp_is_started)
+ if (sp_is_started) {
+ atomisp_destroy_pipes_stream(asd);
ia_css_stop_sp();
+ atomisp_create_pipes_stream(asd);
+ }
return ret;
}
@@ -1340,8 +1292,7 @@ void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd)
}
int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id,
- int source_pad)
+ enum ia_css_pipe_id pipe_id)
{
struct ia_css_pipe_info p_info;
struct ia_css_grid_info old_info;
@@ -1624,29 +1575,6 @@ int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
- enum atomisp_input_stream_id stream_id,
- enum atomisp_input_format input_format)
-{
- struct ia_css_stream_config *s_config =
- &asd->stream_env[stream_id].stream_config;
-
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width;
-
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height / 2;
-
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
- = IA_CSS_STREAM_ISYS_STREAM_0;
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
- ATOMISP_INPUT_FORMAT_USER_DEF1;
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
- ATOMISP_INPUT_FORMAT_USER_DEF2;
- s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
- return 0;
-}
-
void atomisp_css_isys_two_stream_cfg_update_stream1(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
@@ -1832,49 +1760,6 @@ void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
}
}
-void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
- bool enable)
-{
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO];
- int i;
-
- if (stream_env->stream_config.online != enable) {
- stream_env->stream_config.online = enable;
- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
- stream_env->update_pipe[i] = true;
- }
-}
-
-void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
- bool enable)
-{
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
- int i;
-
- if (stream_env->stream_config.continuous != !!enable) {
- stream_env->stream_config.continuous = !!enable;
- stream_env->stream_config.pack_raw_pixels = true;
- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
- stream_env->update_pipe[i] = true;
- }
-}
-
-void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
- bool enable)
-{
- struct atomisp_stream_env *stream_env =
- &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
- int i;
-
- if (stream_env->stream_config.disable_cont_viewfinder != !enable) {
- stream_env->stream_config.disable_cont_viewfinder = !enable;
- for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
- stream_env->update_pipe[i] = true;
- }
-}
-
int atomisp_css_input_configure_port(
struct atomisp_sub_device *asd,
enum mipi_port_id port,
@@ -1919,23 +1804,20 @@ int atomisp_css_input_configure_port(
return 0;
}
-void atomisp_css_stop(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id, bool in_reset)
+void atomisp_css_stop(struct atomisp_sub_device *asd, bool in_reset)
{
- struct atomisp_device *isp = asd->isp;
unsigned long irqflags;
unsigned int i;
- /* if is called in atomisp_reset(), force destroy streams and pipes */
- atomisp_destroy_pipes_stream_force(asd);
+ /*
+ * CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
+ * destroying all pipes.
+ */
+ atomisp_destroy_pipes_stream(asd);
atomisp_init_raw_buffer_bitmap(asd);
- /*
- * SP can not be stop if other streams are in use
- */
- if (atomisp_streaming_count(isp) == 0)
- ia_css_stop_sp();
+ ia_css_stop_sp();
if (!in_reset) {
struct atomisp_stream_env *stream_env;
@@ -1970,10 +1852,7 @@ void atomisp_css_stop(struct atomisp_sub_device *asd,
list_splice_init(&asd->metadata_ready[i], &asd->metadata[i]);
}
- atomisp_flush_params_queue(&asd->video_out_capture);
- atomisp_flush_params_queue(&asd->video_out_vf);
- atomisp_flush_params_queue(&asd->video_out_preview);
- atomisp_flush_params_queue(&asd->video_out_video_capture);
+ atomisp_flush_params_queue(&asd->video_out);
atomisp_free_css_parameters(&asd->params.css_param);
memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
}
@@ -2424,54 +2303,33 @@ static int __get_frame_info(struct atomisp_sub_device *asd,
return 0;
get_info_err:
- atomisp_destroy_pipes_stream_force(asd);
+ atomisp_destroy_pipes_stream(asd);
return -EINVAL;
}
-static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd,
- uint16_t source_pad)
+static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd)
{
- struct atomisp_device *isp = asd->isp;
-
- switch (source_pad) {
- case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
- if (asd->copy_mode)
- return IA_CSS_PIPE_ID_COPY;
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO
- || asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
- return IA_CSS_PIPE_ID_VIDEO;
-
- return IA_CSS_PIPE_ID_CAPTURE;
- case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
- if (asd->copy_mode)
- return IA_CSS_PIPE_ID_COPY;
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_VIDEO:
+ return IA_CSS_PIPE_ID_VIDEO;
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
return IA_CSS_PIPE_ID_CAPTURE;
- case ATOMISP_SUBDEV_PAD_SOURCE_VF:
- if (!atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)) {
- return IA_CSS_PIPE_ID_CAPTURE;
- }
- fallthrough;
- case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
- if (asd->copy_mode)
- return IA_CSS_PIPE_ID_COPY;
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
- return IA_CSS_PIPE_ID_VIDEO;
-
+ case ATOMISP_RUN_MODE_PREVIEW:
return IA_CSS_PIPE_ID_PREVIEW;
}
- dev_warn(isp->dev,
- "invalid source pad:%d, return default preview pipe index.\n",
- source_pad);
+
+ dev_warn(asd->isp->dev, "cannot determine pipe-index return default preview pipe\n");
return IA_CSS_PIPE_ID_PREVIEW;
}
int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
- u16 source_pad,
struct ia_css_frame_info *frame_info)
{
struct ia_css_pipe_info info;
- int pipe_index = atomisp_get_pipe_index(asd, source_pad);
+ int pipe_index = atomisp_get_pipe_index(asd);
int stream_index;
struct atomisp_device *isp = asd->isp;
@@ -2485,34 +2343,8 @@ int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
return -EINVAL;
}
- switch (source_pad) {
- case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
- *frame_info = info.output_info[0];
- break;
- case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
- *frame_info = info.output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
- break;
- case ATOMISP_SUBDEV_PAD_SOURCE_VF:
- if (stream_index == ATOMISP_INPUT_STREAM_POSTVIEW)
- *frame_info = info.output_info[0];
- else
- *frame_info = info.vf_output_info[0];
- break;
- case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
- (pipe_index == IA_CSS_PIPE_ID_VIDEO ||
- pipe_index == IA_CSS_PIPE_ID_YUVPP))
- *frame_info = info.vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
- else
- *frame_info =
- info.output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
-
- break;
- default:
- frame_info = NULL;
- break;
- }
- return frame_info ? 0 : -EINVAL;
+ *frame_info = info.output_info[0];
+ return 0;
}
int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
@@ -2530,39 +2362,6 @@ int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
- unsigned int stream_index,
- unsigned int width, unsigned int height,
- unsigned int padded_width,
- enum ia_css_frame_format format)
-{
- asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_YUVPP].
- default_capture_config.mode =
- IA_CSS_CAPTURE_MODE_RAW;
-
- __configure_output(asd, stream_index, width, height, padded_width,
- format, IA_CSS_PIPE_ID_YUVPP);
- return 0;
-}
-
-int atomisp_css_yuvpp_get_output_frame_info(
- struct atomisp_sub_device *asd,
- unsigned int stream_index,
- struct ia_css_frame_info *info)
-{
- return __get_frame_info(asd, stream_index, info,
- ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_YUVPP);
-}
-
-int atomisp_css_yuvpp_get_viewfinder_frame_info(
- struct atomisp_sub_device *asd,
- unsigned int stream_index,
- struct ia_css_frame_info *info)
-{
- return __get_frame_info(asd, stream_index, info,
- ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_YUVPP);
-}
-
int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
@@ -3271,7 +3070,7 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
return -EINVAL;
/* isp needs to be streaming to get DIS statistics */
- if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ if (!asd->streaming)
return -EINVAL;
if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
@@ -3400,7 +3199,7 @@ static bool atomisp_css_isr_get_stream_id(struct ia_css_pipe *css_pipe,
struct atomisp_stream_env *stream_env;
int i, j;
- if (isp->asd.streaming == ATOMISP_DEVICE_STREAMING_DISABLED)
+ if (!isp->asd.streaming)
return false;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
index 33821b51d90e..762520ed87a5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
@@ -67,26 +67,6 @@ struct atomisp_3a_statistics32 {
u32 isp_config_id;
};
-struct atomisp_metadata_with_type32 {
- /* to specify which type of metadata to get */
- enum atomisp_metadata_type type;
- compat_uptr_t data;
- u32 width;
- u32 height;
- u32 stride; /* in bytes */
- u32 exp_id; /* exposure ID */
- compat_uptr_t effective_width;
-};
-
-struct atomisp_metadata32 {
- compat_uptr_t data;
- u32 width;
- u32 height;
- u32 stride;
- u32 exp_id;
- compat_uptr_t effective_width;
-};
-
struct atomisp_morph_table32 {
unsigned int enabled;
unsigned int height;
@@ -134,18 +114,6 @@ struct atomisp_overlay32 {
unsigned int overlay_start_y;
};
-struct atomisp_calibration_group32 {
- unsigned int size;
- unsigned int type;
- compat_uptr_t calb_grp_values;
-};
-
-struct v4l2_private_int_data32 {
- __u32 size;
- compat_uptr_t data;
- __u32 reserved[2];
-};
-
struct atomisp_shading_table32 {
__u32 enable;
__u32 sensor_width;
@@ -249,11 +217,6 @@ struct atomisp_dvs_6axis_config32 {
compat_uptr_t ycoords_uv;
};
-struct atomisp_sensor_ae_bracketing_lut32 {
- compat_uptr_t lut;
- unsigned int lut_size;
-};
-
#define ATOMISP_IOC_G_HISTOGRAM32 \
_IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
#define ATOMISP_IOC_S_HISTOGRAM32 \
@@ -283,28 +246,10 @@ struct atomisp_sensor_ae_bracketing_lut32 {
#define ATOMISP_IOC_S_ISP_OVERLAY32 \
_IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
-#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32)
-
-#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32)
-
#define ATOMISP_IOC_S_ISP_SHD_TAB32 \
_IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table32)
-#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32)
-
#define ATOMISP_IOC_S_PARAMETERS32 \
_IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32)
-#define ATOMISP_IOC_G_METADATA32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata32)
-
-#define ATOMISP_IOC_G_METADATA_BY_TYPE32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type32)
-
-#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut32)
-
#endif /* __ATOMISP_COMPAT_IOCTL32_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index b00bc0b7aaad..abf55a86f795 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -322,15 +322,11 @@ static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
struct v4l2_control ctrl;
struct atomisp_device *isp = asd->isp;
- struct camera_mipi_info *mipi_info;
int mipi_freq = 0;
enum atomisp_camera_port port;
-
int n;
- mipi_info = atomisp_to_sensor_mipi_info(
- isp->inputs[asd->input_curr].camera);
- port = mipi_info->port;
+ port = isp->inputs[asd->input_curr].port;
ctrl.id = V4L2_CID_LINK_FREQ;
if (v4l2_g_ctrl
@@ -375,6 +371,10 @@ int atomisp_mipi_csi2_init(struct atomisp_device *isp)
unsigned int i;
int ret;
+ ret = atomisp_csi2_bridge_init(isp);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
csi2_port = &isp->csi2_port[i];
csi2_port->isp = isp;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
index b245b2f5ce99..16ddb3ab2963 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -18,17 +18,107 @@
#ifndef __ATOMISP_CSI2_H__
#define __ATOMISP_CSI2_H__
+#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
+#include "../../include/linux/atomisp.h"
+
#define CSI2_PAD_SINK 0
#define CSI2_PAD_SOURCE 1
#define CSI2_PADS_NUM 2
-struct atomisp_device;
+#define CSI2_MAX_LANES 4
+#define CSI2_MAX_LINK_FREQS 3
+
+#define CSI2_MAX_ACPI_GPIOS 2u
+
+struct acpi_device;
struct v4l2_device;
+
+struct atomisp_device;
struct atomisp_sub_device;
+struct atomisp_csi2_acpi_gpio_map {
+ struct acpi_gpio_params params[CSI2_MAX_ACPI_GPIOS];
+ struct acpi_gpio_mapping mapping[CSI2_MAX_ACPI_GPIOS + 1];
+};
+
+struct atomisp_csi2_acpi_gpio_parsing_data {
+ struct acpi_device *adev;
+ struct atomisp_csi2_acpi_gpio_map *map;
+ u32 settings[CSI2_MAX_ACPI_GPIOS];
+ unsigned int settings_count;
+ unsigned int res_count;
+ unsigned int map_count;
+};
+
+enum atomisp_csi2_sensor_swnodes {
+ SWNODE_SENSOR,
+ SWNODE_SENSOR_PORT,
+ SWNODE_SENSOR_ENDPOINT,
+ SWNODE_CSI2_PORT,
+ SWNODE_CSI2_ENDPOINT,
+ SWNODE_COUNT
+};
+
+struct atomisp_csi2_property_names {
+ char clock_frequency[16];
+ char rotation[9];
+ char bus_type[9];
+ char data_lanes[11];
+ char remote_endpoint[16];
+ char link_frequencies[17];
+};
+
+struct atomisp_csi2_node_names {
+ char port[7];
+ char endpoint[11];
+ char remote_port[7];
+};
+
+struct atomisp_csi2_sensor_config {
+ const char *hid;
+ int lanes;
+ int nr_link_freqs;
+ u64 link_freqs[CSI2_MAX_LINK_FREQS];
+};
+
+struct atomisp_csi2_sensor {
+ /* Append port in "-%u" format as suffix of HID */
+ char name[ACPI_ID_LEN + 4];
+ struct acpi_device *adev;
+ int port;
+ int lanes;
+
+ /* SWNODE_COUNT + 1 for terminating NULL */
+ const struct software_node *group[SWNODE_COUNT + 1];
+ struct software_node swnodes[SWNODE_COUNT];
+ struct atomisp_csi2_node_names node_names;
+ struct atomisp_csi2_property_names prop_names;
+ /* "clock-frequency", "rotation" + terminating entry */
+ struct property_entry dev_properties[3];
+ /* "bus-type", "data-lanes", "remote-endpoint" + "link-freq" + terminating entry */
+ struct property_entry ep_properties[5];
+ /* "data-lanes", "remote-endpoint" + terminating entry */
+ struct property_entry csi2_properties[3];
+ struct software_node_ref_args local_ref[1];
+ struct software_node_ref_args remote_ref[1];
+ struct software_node_ref_args vcm_ref[1];
+ /* GPIO mappings storage */
+ struct atomisp_csi2_acpi_gpio_map gpio_map;
+};
+
+struct atomisp_csi2_bridge {
+ struct software_node csi2_node;
+ char csi2_node_name[14];
+ u32 data_lanes[CSI2_MAX_LANES];
+ unsigned int n_sensors;
+ struct atomisp_csi2_sensor sensors[ATOMISP_CAMERA_NR_PORTS];
+};
+
struct atomisp_mipi_csi2_device {
struct v4l2_subdev subdev;
struct media_pad pads[CSI2_PADS_NUM];
@@ -48,6 +138,8 @@ void atomisp_mipi_csi2_unregister_entities(
struct atomisp_mipi_csi2_device *csi2);
int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
struct v4l2_device *vdev);
+int atomisp_csi2_bridge_init(struct atomisp_device *isp);
+int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp);
void atomisp_csi2_configure(struct atomisp_sub_device *asd);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
new file mode 100644
index 000000000000..0d12ba78d9c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -0,0 +1,874 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code to build software firmware node graph for atomisp2 connected sensors
+ * from ACPI tables.
+ *
+ * Copyright (C) 2023 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on drivers/media/pci/intel/ipu3/cio2-bridge.c written by:
+ * Dan Scally <djrscally@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/property.h>
+#include <media/v4l2-fwnode.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_csi2.h"
+#include "atomisp_internal.h"
+
+#define NODE_SENSOR(_HID, _PROPS) \
+ ((const struct software_node) { \
+ .name = _HID, \
+ .properties = _PROPS, \
+ })
+
+#define NODE_PORT(_PORT, _SENSOR_NODE) \
+ ((const struct software_node) { \
+ .name = _PORT, \
+ .parent = _SENSOR_NODE, \
+ })
+
+#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
+ ((const struct software_node) { \
+ .name = _EP, \
+ .parent = _PORT, \
+ .properties = _PROPS, \
+ })
+
+#define PMC_CLK_RATE_19_2MHZ 19200000
+
+/*
+ * 79234640-9e10-4fea-a5c1-b5aa8b19756f
+ * This _DSM GUID returns information about the GPIO lines mapped to a sensor.
+ * Function number 1 returns a count of the GPIO lines that are mapped.
+ * Subsequent functions return 32 bit ints encoding information about the GPIO.
+ */
+static const guid_t intel_sensor_gpio_info_guid =
+ GUID_INIT(0x79234640, 0x9e10, 0x4fea,
+ 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
+
+#define INTEL_GPIO_DSM_TYPE_SHIFT 0
+#define INTEL_GPIO_DSM_TYPE_MASK GENMASK(7, 0)
+#define INTEL_GPIO_DSM_PIN_SHIFT 8
+#define INTEL_GPIO_DSM_PIN_MASK GENMASK(15, 8)
+#define INTEL_GPIO_DSM_SENSOR_ON_VAL_SHIFT 24
+#define INTEL_GPIO_DSM_SENSOR_ON_VAL_MASK GENMASK(31, 24)
+
+#define INTEL_GPIO_DSM_TYPE(x) \
+ (((x) & INTEL_GPIO_DSM_TYPE_MASK) >> INTEL_GPIO_DSM_TYPE_SHIFT)
+#define INTEL_GPIO_DSM_PIN(x) \
+ (((x) & INTEL_GPIO_DSM_PIN_MASK) >> INTEL_GPIO_DSM_PIN_SHIFT)
+#define INTEL_GPIO_DSM_SENSOR_ON_VAL(x) \
+ (((x) & INTEL_GPIO_DSM_SENSOR_ON_VAL_MASK) >> INTEL_GPIO_DSM_SENSOR_ON_VAL_SHIFT)
+
+/*
+ * 822ace8f-2814-4174-a56b-5f029fe079ee
+ * This _DSM GUID returns a string from the sensor device, which acts as a
+ * module identifier.
+ */
+static const guid_t intel_sensor_module_guid =
+ GUID_INIT(0x822ace8f, 0x2814, 0x4174,
+ 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
+
+/*
+ * dc2f6c4f-045b-4f1d-97b9-882a6860a4be
+ * This _DSM GUID returns a package with n*2 strings, with each set of 2 strings
+ * forming a key, value pair for settings like e.g. "CsiLanes" = "1".
+ */
+static const guid_t atomisp_dsm_guid =
+ GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
+ 0x97, 0xb9, 0x88, 0x2a, 0x68, 0x60, 0xa4, 0xbe);
+
+/*
+ * Extend this array with ACPI Hardware IDs of sensors known to be working
+ * plus the default number of links + link-frequencies.
+ *
+ * Do not add an entry for a sensor that is not actually supported,
+ * or which have not yet been converted to work without atomisp_gmin
+ * power-management and with v4l2-async probing.
+ */
+static const struct atomisp_csi2_sensor_config supported_sensors[] = {
+ /* GalaxyCore GC0310 */
+ { "INT0310", 1 },
+ /* Omnivision OV2680 */
+ { "OVTI2680", 1 },
+};
+
+/*
+ * gmin_cfg parsing code. This is a cleaned up version of the gmin_cfg parsing
+ * code from atomisp_gmin_platform.c.
+ * Once all sensors are moved to v4l2-async probing atomisp_gmin_platform.c can
+ * be removed and the duplication of this code goes away.
+ */
+struct gmin_cfg_var {
+ const char *acpi_dev_name;
+ const char *key;
+ const char *val;
+};
+
+static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
+ /* _DSM contains the wrong CsiPort! */
+ { "OVTI2680:01", "CsiPort", "0" },
+ {}
+};
+
+static const struct dmi_system_id gmin_cfg_dmi_overrides[] = {
+ {
+ /* Lenovo Ideapad Miix 310 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10"),
+ },
+ .driver_data = lenovo_ideapad_miix_310_vars,
+ },
+ {}
+};
+
+static char *gmin_cfg_get_dsm(struct acpi_device *adev, const char *key)
+{
+ union acpi_object *obj, *key_el, *val_el;
+ char *val = NULL;
+ int i;
+
+ obj = acpi_evaluate_dsm_typed(adev->handle, &atomisp_dsm_guid, 0, 0,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!obj)
+ return NULL;
+
+ for (i = 0; i < obj->package.count - 1; i += 2) {
+ key_el = &obj->package.elements[i + 0];
+ val_el = &obj->package.elements[i + 1];
+
+ if (key_el->type != ACPI_TYPE_STRING || val_el->type != ACPI_TYPE_STRING)
+ break;
+
+ if (!strcmp(key_el->string.pointer, key)) {
+ val = kstrdup(val_el->string.pointer, GFP_KERNEL);
+ if (!val)
+ break;
+
+ acpi_handle_info(adev->handle, "Using DSM entry %s=%s\n", key, val);
+ break;
+ }
+ }
+
+ ACPI_FREE(obj);
+ return val;
+}
+
+static char *gmin_cfg_get_dmi_override(struct acpi_device *adev, const char *key)
+{
+ const struct dmi_system_id *id;
+ struct gmin_cfg_var *gv;
+
+ id = dmi_first_match(gmin_cfg_dmi_overrides);
+ if (!id)
+ return NULL;
+
+ for (gv = id->driver_data; gv->acpi_dev_name; gv++) {
+ if (strcmp(gv->acpi_dev_name, acpi_dev_name(adev)))
+ continue;
+
+ if (strcmp(key, gv->key))
+ continue;
+
+ acpi_handle_info(adev->handle, "Using DMI entry %s=%s\n", key, gv->val);
+ return kstrdup(gv->val, GFP_KERNEL);
+ }
+
+ return NULL;
+}
+
+static char *gmin_cfg_get(struct acpi_device *adev, const char *key)
+{
+ char *val;
+
+ val = gmin_cfg_get_dmi_override(adev, key);
+ if (val)
+ return val;
+
+ return gmin_cfg_get_dsm(adev, key);
+}
+
+static int gmin_cfg_get_int(struct acpi_device *adev, const char *key, int default_val)
+{
+ char *str_val;
+ long int_val;
+ int ret;
+
+ str_val = gmin_cfg_get(adev, key);
+ if (!str_val)
+ goto out_use_default;
+
+ ret = kstrtoul(str_val, 0, &int_val);
+ kfree(str_val);
+ if (ret)
+ goto out_use_default;
+
+ return int_val;
+
+out_use_default:
+ acpi_handle_info(adev->handle, "Using default %s=%d\n", key, default_val);
+ return default_val;
+}
+
+static int atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(struct acpi_device *adev)
+{
+ /* ACPI_PATH_SEGMENT_LENGTH is guaranteed to be big enough for name + 0 term. */
+ char name[ACPI_PATH_SEGMENT_LENGTH];
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer b_name = { sizeof(name), name };
+ union acpi_object *package, *element;
+ int i, ret = -ENOENT;
+ acpi_handle rhandle;
+ acpi_status status;
+ u8 clock_num;
+
+ status = acpi_evaluate_object_typed(adev->handle, "_PR0", NULL, &buffer, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ package = buffer.pointer;
+ for (i = 0; i < package->package.count; i++) {
+ element = &package->package.elements[i];
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ continue;
+
+ rhandle = element->reference.handle;
+ if (!rhandle)
+ continue;
+
+ acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
+
+ if (str_has_prefix(name, "CLK") && !kstrtou8(&name[3], 10, &clock_num) &&
+ clock_num <= 4) {
+ ret = clock_num;
+ break;
+ }
+ }
+
+ ACPI_FREE(buffer.pointer);
+
+ if (ret < 0)
+ acpi_handle_warn(adev->handle, "Could not find PMC clk in _PR0\n");
+
+ return ret;
+}
+
+static int atomisp_csi2_set_pmc_clk_freq(struct acpi_device *adev, int clock_num)
+{
+ struct clk *clk;
+ char name[14];
+ int ret;
+
+ if (clock_num < 0)
+ return 0;
+
+ snprintf(name, sizeof(name), "pmc_plt_clk_%d", clock_num);
+
+ clk = clk_get(NULL, name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ acpi_handle_err(adev->handle, "Error getting clk %s:%d\n", name, ret);
+ return ret;
+ }
+
+ /*
+ * The firmware might enable the clock at boot, to change
+ * the rate we must ensure the clock is disabled.
+ */
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ clk_disable_unprepare(clk);
+ if (!ret)
+ ret = clk_set_rate(clk, PMC_CLK_RATE_19_2MHZ);
+ if (ret)
+ acpi_handle_err(adev->handle, "Error setting clk-rate for %s:%d\n", name, ret);
+
+ clk_put(clk);
+ return ret;
+}
+
+static int atomisp_csi2_get_port(struct acpi_device *adev, int clock_num)
+{
+ int port;
+
+ /*
+ * Compare clock-number to the PMC-clock used for CsiPort 1
+ * in the CHT/BYT reference designs.
+ */
+ if (IS_ISP2401)
+ port = clock_num == 4 ? 1 : 0;
+ else
+ port = clock_num == 0 ? 1 : 0;
+
+ /* Intel DSM or DMI quirk overrides _PR0 CLK derived default */
+ return gmin_cfg_get_int(adev, "CsiPort", port);
+}
+
+/* Note this always returns 1 to continue looping so that res_count is accurate */
+static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_data)
+{
+ struct atomisp_csi2_acpi_gpio_parsing_data *data = _data;
+ struct acpi_resource_gpio *agpio;
+ const char *name;
+ bool active_low;
+ unsigned int i;
+ u32 settings = 0;
+ u16 pin;
+
+ if (!acpi_gpio_get_io_resource(ares, &agpio))
+ return 1; /* Not a GPIO, continue the loop */
+
+ data->res_count++;
+
+ pin = agpio->pin_table[0];
+ for (i = 0; i < data->settings_count; i++) {
+ if (INTEL_GPIO_DSM_PIN(data->settings[i]) == pin) {
+ settings = data->settings[i];
+ break;
+ }
+ }
+
+ if (i == data->settings_count) {
+ acpi_handle_warn(data->adev->handle,
+ "Could not find DSM GPIO settings for pin %u\n", pin);
+ return 1;
+ }
+
+ switch (INTEL_GPIO_DSM_TYPE(settings)) {
+ case 0:
+ name = "reset-gpios";
+ break;
+ case 1:
+ name = "powerdown-gpios";
+ break;
+ default:
+ acpi_handle_warn(data->adev->handle, "Unknown GPIO type 0x%02lx for pin %u\n",
+ INTEL_GPIO_DSM_TYPE(settings), pin);
+ return 1;
+ }
+
+ /*
+ * Both reset and power-down need to be logical false when the sensor
+ * is on (sensor should not be in reset and not be powered-down). So
+ * when the sensor-on-value (which is the physical pin value) is high,
+ * then the signal is active-low.
+ */
+ active_low = INTEL_GPIO_DSM_SENSOR_ON_VAL(settings);
+
+ i = data->map_count;
+ if (i == CSI2_MAX_ACPI_GPIOS)
+ return 1;
+
+ /* res_count is already incremented */
+ data->map->params[i].crs_entry_index = data->res_count - 1;
+ data->map->params[i].active_low = active_low;
+ data->map->mapping[i].name = name;
+ data->map->mapping[i].data = &data->map->params[i];
+ data->map->mapping[i].size = 1;
+ data->map_count++;
+
+ acpi_handle_info(data->adev->handle, "%s crs %d %s pin %u active-%s\n", name,
+ data->res_count - 1, agpio->resource_source.string_ptr,
+ pin, active_low ? "low" : "high");
+
+ return 1;
+}
+
+/*
+ * Helper function to create an ACPI GPIO lookup table for sensor reset and
+ * powerdown signals on Intel Bay Trail (BYT) and Cherry Trail (CHT) devices,
+ * including setting the correct polarity for the GPIO.
+ *
+ * This uses the "79234640-9e10-4fea-a5c1-b5aa8b19756f" DSM method directly
+ * on the sensor device's ACPI node. This is different from later Intel
+ * hardware which has a separate INT3472 acpi_device with this info.
+ *
+ * This function must be called before creating the sw-noded describing
+ * the fwnode graph endpoint. And sensor drivers used on these devices
+ * must return -EPROBE_DEFER when there is no endpoint description yet.
+ * Together this guarantees that the GPIO lookups are in place before
+ * the sensor driver tries to get GPIOs with gpiod_get().
+ *
+ * Note this code uses the same DSM GUID as the int3472_gpio_guid in
+ * the INT3472 discrete.c code and there is some overlap, but there are
+ * enough differences that it is difficult to share the code.
+ */
+static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
+ struct acpi_device *adev)
+{
+ struct atomisp_csi2_acpi_gpio_parsing_data data = { };
+ LIST_HEAD(resource_list);
+ union acpi_object *obj;
+ unsigned int i, j;
+ int ret;
+
+ obj = acpi_evaluate_dsm_typed(adev->handle, &intel_sensor_module_guid,
+ 0x00, 1, NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ acpi_handle_info(adev->handle, "Sensor module id: '%s'\n", obj->string.pointer);
+ ACPI_FREE(obj);
+ }
+
+ /*
+ * First get the GPIO-settings count and then get count GPIO-settings
+ * values. Note the order of these may differ from the order in which
+ * the GPIOs are listed on the ACPI resources! So we first store them all
+ * and then enumerate the ACPI resources and match them up by pin number.
+ */
+ obj = acpi_evaluate_dsm_typed(adev->handle,
+ &intel_sensor_gpio_info_guid, 0x00, 1,
+ NULL, ACPI_TYPE_INTEGER);
+ if (!obj) {
+ acpi_handle_err(adev->handle, "No _DSM entry for GPIO pin count\n");
+ return -EIO;
+ }
+
+ data.settings_count = obj->integer.value;
+ ACPI_FREE(obj);
+
+ if (data.settings_count > CSI2_MAX_ACPI_GPIOS) {
+ acpi_handle_err(adev->handle, "Too many GPIOs %u > %u\n", data.settings_count, CSI2_MAX_ACPI_GPIOS);
+ return -EOVERFLOW;
+ }
+
+ for (i = 0; i < data.settings_count; i++) {
+ /*
+ * i + 2 because the index of this _DSM function is 1-based
+ * and the first function is just a count.
+ */
+ obj = acpi_evaluate_dsm_typed(adev->handle,
+ &intel_sensor_gpio_info_guid,
+ 0x00, i + 2,
+ NULL, ACPI_TYPE_INTEGER);
+ if (!obj) {
+ acpi_handle_err(adev->handle, "No _DSM entry for pin %u\n", i);
+ return -EIO;
+ }
+
+ data.settings[i] = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ /* Since we match up by pin-number the pin-numbers must be unique */
+ for (i = 0; i < data.settings_count; i++) {
+ for (j = i + 1; j < data.settings_count; j++) {
+ if (INTEL_GPIO_DSM_PIN(data.settings[i]) !=
+ INTEL_GPIO_DSM_PIN(data.settings[j]))
+ continue;
+
+ acpi_handle_err(adev->handle, "Duplicate pin number %lu\n",
+ INTEL_GPIO_DSM_PIN(data.settings[i]));
+ return -EIO;
+ }
+ }
+
+ /* Now parse the ACPI resources and build the lookup table */
+ data.adev = adev;
+ data.map = &sensor->gpio_map;
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ atomisp_csi2_handle_acpi_gpio_res, &data);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (data.map_count != data.settings_count ||
+ data.res_count != data.settings_count)
+ acpi_handle_warn(adev->handle, "ACPI GPIO resources vs DSM GPIO-info count mismatch (dsm: %d res: %d map %d\n",
+ data.settings_count, data.res_count, data.map_count);
+
+ ret = acpi_dev_add_driver_gpios(adev, data.map->mapping);
+ if (ret)
+ acpi_handle_err(adev->handle, "Error adding driver GPIOs: %d\n", ret);
+
+ return ret;
+}
+
+static const struct atomisp_csi2_property_names prop_names = {
+ .clock_frequency = "clock-frequency",
+ .rotation = "rotation",
+ .bus_type = "bus-type",
+ .data_lanes = "data-lanes",
+ .remote_endpoint = "remote-endpoint",
+ .link_frequencies = "link-frequencies",
+};
+
+static void atomisp_csi2_create_fwnode_properties(struct atomisp_csi2_sensor *sensor,
+ struct atomisp_csi2_bridge *bridge,
+ const struct atomisp_csi2_sensor_config *cfg)
+{
+ sensor->prop_names = prop_names;
+
+ sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CSI2_ENDPOINT]);
+ sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
+
+ sensor->dev_properties[0] = PROPERTY_ENTRY_U32(sensor->prop_names.clock_frequency,
+ PMC_CLK_RATE_19_2MHZ);
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(sensor->prop_names.rotation, 0);
+
+ sensor->ep_properties[0] = PROPERTY_ENTRY_U32(sensor->prop_names.bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(sensor->prop_names.data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(sensor->prop_names.remote_endpoint,
+ sensor->local_ref);
+ if (cfg->nr_link_freqs > 0)
+ sensor->ep_properties[3] =
+ PROPERTY_ENTRY_U64_ARRAY_LEN(sensor->prop_names.link_frequencies,
+ cfg->link_freqs, cfg->nr_link_freqs);
+
+ sensor->csi2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(sensor->prop_names.data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->csi2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(sensor->prop_names.remote_endpoint,
+ sensor->remote_ref);
+}
+
+static void atomisp_csi2_init_swnode_names(struct atomisp_csi2_sensor *sensor)
+{
+ snprintf(sensor->node_names.remote_port,
+ sizeof(sensor->node_names.remote_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, sensor->port);
+ snprintf(sensor->node_names.port,
+ sizeof(sensor->node_names.port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
+ snprintf(sensor->node_names.endpoint,
+ sizeof(sensor->node_names.endpoint),
+ SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
+}
+
+static void atomisp_csi2_init_swnode_group(struct atomisp_csi2_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->group[SWNODE_SENSOR] = &nodes[SWNODE_SENSOR];
+ sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
+ sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_CSI2_PORT] = &nodes[SWNODE_CSI2_PORT];
+ sensor->group[SWNODE_CSI2_ENDPOINT] = &nodes[SWNODE_CSI2_ENDPOINT];
+}
+
+static void atomisp_csi2_create_connection_swnodes(struct atomisp_csi2_bridge *bridge,
+ struct atomisp_csi2_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ atomisp_csi2_init_swnode_names(sensor);
+
+ nodes[SWNODE_SENSOR] = NODE_SENSOR(sensor->name,
+ sensor->dev_properties);
+ nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
+ &nodes[SWNODE_SENSOR]);
+ nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(sensor->node_names.endpoint,
+ &nodes[SWNODE_SENSOR_PORT],
+ sensor->ep_properties);
+ nodes[SWNODE_CSI2_PORT] = NODE_PORT(sensor->node_names.remote_port,
+ &bridge->csi2_node);
+ nodes[SWNODE_CSI2_ENDPOINT] = NODE_ENDPOINT(sensor->node_names.endpoint,
+ &nodes[SWNODE_CSI2_PORT],
+ sensor->csi2_properties);
+
+ atomisp_csi2_init_swnode_group(sensor);
+}
+
+static void atomisp_csi2_unregister_sensors(struct atomisp_csi2_bridge *bridge)
+{
+ struct atomisp_csi2_sensor *sensor;
+ unsigned int i;
+
+ for (i = 0; i < bridge->n_sensors; i++) {
+ sensor = &bridge->sensors[i];
+ software_node_unregister_node_group(sensor->group);
+ acpi_dev_remove_driver_gpios(sensor->adev);
+ acpi_dev_put(sensor->adev);
+ }
+}
+
+static int atomisp_csi2_connect_sensor(const struct atomisp_csi2_sensor_config *cfg,
+ struct atomisp_csi2_bridge *bridge,
+ struct atomisp_device *isp)
+{
+ struct fwnode_handle *fwnode, *primary;
+ struct atomisp_csi2_sensor *sensor;
+ struct acpi_device *adev;
+ int ret, clock_num;
+
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+ if (!adev->status.enabled)
+ continue;
+
+ if (bridge->n_sensors >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "Exceeded available CSI2 ports\n");
+ ret = -EOVERFLOW;
+ goto err_put_adev;
+ }
+
+ sensor = &bridge->sensors[bridge->n_sensors];
+
+ /*
+ * ACPI takes care of turning the PMC clock on and off, but on BYT
+ * the clock defaults to 25 MHz instead of the expected 19.2 MHz.
+ * Get the PMC-clock number from ACPI _PR0 method and set it to 19.2 MHz.
+ * The PMC-clock number is also used to determine the default CSI port.
+ */
+ clock_num = atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(adev);
+
+ ret = atomisp_csi2_set_pmc_clk_freq(adev, clock_num);
+ if (ret)
+ goto err_put_adev;
+
+ sensor->port = atomisp_csi2_get_port(adev, clock_num);
+ if (sensor->port >= ATOMISP_CAMERA_NR_PORTS) {
+ acpi_handle_err(adev->handle, "Invalid port: %d\n", sensor->port);
+ ret = -EINVAL;
+ goto err_put_adev;
+ }
+
+ sensor->lanes = gmin_cfg_get_int(adev, "CsiLanes", cfg->lanes);
+ if (sensor->lanes > CSI2_MAX_LANES) {
+ acpi_handle_err(adev->handle, "Invalid number of lanes: %d\n", sensor->lanes);
+ ret = -EINVAL;
+ goto err_put_adev;
+ }
+
+ ret = atomisp_csi2_add_gpio_mappings(sensor, adev);
+ if (ret)
+ goto err_put_adev;
+
+ snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
+ cfg->hid, sensor->port);
+
+ atomisp_csi2_create_fwnode_properties(sensor, bridge, cfg);
+ atomisp_csi2_create_connection_swnodes(bridge, sensor);
+
+ ret = software_node_register_node_group(sensor->group);
+ if (ret)
+ goto err_remove_mappings;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_SENSOR]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto err_free_swnodes;
+ }
+
+ sensor->adev = acpi_dev_get(adev);
+
+ primary = acpi_fwnode_handle(adev);
+ primary->secondary = fwnode;
+
+ bridge->n_sensors++;
+ }
+
+ return 0;
+
+err_free_swnodes:
+ software_node_unregister_node_group(sensor->group);
+err_remove_mappings:
+ acpi_dev_remove_driver_gpios(adev);
+err_put_adev:
+ acpi_dev_put(adev);
+ return ret;
+}
+
+static int atomisp_csi2_connect_sensors(struct atomisp_csi2_bridge *bridge,
+ struct atomisp_device *isp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) {
+ const struct atomisp_csi2_sensor_config *cfg = &supported_sensors[i];
+
+ ret = atomisp_csi2_connect_sensor(cfg, bridge, isp);
+ if (ret)
+ goto err_unregister_sensors;
+ }
+
+ return 0;
+
+err_unregister_sensors:
+ atomisp_csi2_unregister_sensors(bridge);
+ return ret;
+}
+
+int atomisp_csi2_bridge_init(struct atomisp_device *isp)
+{
+ struct atomisp_csi2_bridge *bridge;
+ struct device *dev = isp->dev;
+ struct fwnode_handle *fwnode;
+ int i, ret;
+
+ /*
+ * This function is intended to run only once and then leave
+ * the created nodes attached even after a rmmod, therefore:
+ * 1. The bridge memory is leaked deliberately on success
+ * 2. If a secondary fwnode is already set exit early.
+ */
+ fwnode = dev_fwnode(dev);
+ if (fwnode && fwnode->secondary)
+ return 0;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ strscpy(bridge->csi2_node_name, "atomisp-csi2", sizeof(bridge->csi2_node_name));
+ bridge->csi2_node.name = bridge->csi2_node_name;
+
+ ret = software_node_register(&bridge->csi2_node);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register the CSI2 HID node\n");
+ goto err_free_bridge;
+ }
+
+ /*
+ * Map the lane arrangement, which is fixed for the ISP2 (meaning we
+ * only need one, rather than one per sensor). We include it as a
+ * member of the bridge struct rather than a global variable so
+ * that it survives if the module is unloaded along with the rest of
+ * the struct.
+ */
+ for (i = 0; i < CSI2_MAX_LANES; i++)
+ bridge->data_lanes[i] = i + 1;
+
+ ret = atomisp_csi2_connect_sensors(bridge, isp);
+ if (ret || bridge->n_sensors == 0)
+ goto err_unregister_csi2;
+
+ fwnode = software_node_fwnode(&bridge->csi2_node);
+ if (!fwnode) {
+ dev_err(dev, "Error getting fwnode from csi2 software_node\n");
+ ret = -ENODEV;
+ goto err_unregister_sensors;
+ }
+
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+
+err_unregister_sensors:
+ atomisp_csi2_unregister_sensors(bridge);
+err_unregister_csi2:
+ software_node_unregister(&bridge->csi2_node);
+err_free_bridge:
+ kfree(bridge);
+
+ return ret;
+}
+
+/******* V4L2 sub-device asynchronous registration callbacks***********/
+
+struct sensor_async_subdev {
+ struct v4l2_async_subdev asd;
+ int port;
+};
+
+#define to_sensor_asd(a) container_of(a, struct sensor_async_subdev, asd)
+#define notifier_to_atomisp(n) container_of(n, struct atomisp_device, notifier)
+
+/* .bound() notifier callback when a match is found */
+static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+
+ if (s_asd->port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", s_asd->port);
+ return -EINVAL;
+ }
+
+ if (isp->sensor_subdevs[s_asd->port]) {
+ dev_err(isp->dev, "port %d already has a sensor attached\n", s_asd->port);
+ return -EBUSY;
+ }
+
+ isp->sensor_subdevs[s_asd->port] = sd;
+ return 0;
+}
+
+/* The .unbind callback */
+static void atomisp_notifier_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+
+ isp->sensor_subdevs[s_asd->port] = NULL;
+}
+
+/* .complete() is called after all subdevices have been located */
+static int atomisp_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+
+ return atomisp_register_device_nodes(isp);
+}
+
+static const struct v4l2_async_notifier_operations atomisp_async_ops = {
+ .bound = atomisp_notifier_bound,
+ .unbind = atomisp_notifier_unbind,
+ .complete = atomisp_notifier_complete,
+};
+
+int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp)
+{
+ int i, mipi_port, ret;
+
+ v4l2_async_nf_init(&isp->notifier);
+ isp->notifier.ops = &atomisp_async_ops;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct sensor_async_subdev *s_asd;
+ struct fwnode_handle *ep;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ if (vep.base.port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", vep.base.port);
+ ret = -EINVAL;
+ goto err_parse;
+ }
+
+ mipi_port = atomisp_port_to_mipi_port(isp, vep.base.port);
+ isp->sensor_lanes[mipi_port] = vep.bus.mipi_csi2.num_data_lanes;
+
+ s_asd = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep,
+ struct sensor_async_subdev);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
+ goto err_parse;
+ }
+
+ s_asd->port = vep.base.port;
+
+ fwnode_handle_put(ep);
+ continue;
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
index 3ddc935ec01d..1df534bf54d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
@@ -69,7 +69,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
}
if (opt & OPTION_BIN_RUN) {
- if (atomisp_streaming_count(isp)) {
+ if (isp->asd.streaming) {
atomisp_css_dump_sp_raw_copy_linecount(true);
atomisp_css_debug_dump_isp_binary();
} else {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index fa362c8a37e8..54466d2f323a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -47,7 +47,6 @@ static int atomisp_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct atomisp_video_pipe *pipe = container_of(vq, struct atomisp_video_pipe, vb_queue);
- u16 source_pad = atomisp_subdev_source_pad(&pipe->vdev);
int ret;
mutex_lock(&pipe->asd->isp->mutex); /* for get_css_frame_info() / set_fmt() */
@@ -56,7 +55,7 @@ static int atomisp_queue_setup(struct vb2_queue *vq,
* When VIDIOC_S_FMT has not been called before VIDIOC_REQBUFS, then
* this will fail. Call atomisp_set_fmt() ourselves and try again.
*/
- ret = atomisp_get_css_frame_info(pipe->asd, source_pad, &pipe->frame_info);
+ ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
if (ret) {
struct v4l2_format f = {
.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420,
@@ -68,7 +67,7 @@ static int atomisp_queue_setup(struct vb2_queue *vq,
if (ret)
goto out;
- ret = atomisp_get_css_frame_info(pipe->asd, source_pad, &pipe->frame_info);
+ ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
if (ret)
goto out;
}
@@ -342,119 +341,29 @@ static int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
return 0;
}
-static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id,
- uint16_t source_pad)
-{
- if (pipe_id == IA_CSS_PIPE_ID_COPY ||
- source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE ||
- source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO ||
- (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
- asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO))
- return IA_CSS_BUFFER_TYPE_OUTPUT_FRAME;
- else
- return IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
-}
-
/* queue all available buffers to css */
int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
{
- enum ia_css_buffer_type buf_type;
- enum ia_css_pipe_id css_capture_pipe_id = IA_CSS_PIPE_ID_NUM;
- enum ia_css_pipe_id css_preview_pipe_id = IA_CSS_PIPE_ID_NUM;
- enum ia_css_pipe_id css_video_pipe_id = IA_CSS_PIPE_ID_NUM;
- enum atomisp_input_stream_id input_stream_id;
- struct atomisp_video_pipe *capture_pipe = NULL;
- struct atomisp_video_pipe *vf_pipe = NULL;
- struct atomisp_video_pipe *preview_pipe = NULL;
- struct atomisp_video_pipe *video_pipe = NULL;
- bool raw_mode = atomisp_is_mbuscode_raw(
- asd->fmt[asd->capture_pad].fmt.code);
-
- if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
- video_pipe = &asd->video_out_video_capture;
- css_video_pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ enum ia_css_pipe_id pipe_id;
+
+ if (asd->copy_mode) {
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
- preview_pipe = &asd->video_out_capture;
- css_preview_pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- video_pipe = &asd->video_out_video_capture;
- preview_pipe = &asd->video_out_preview;
- css_video_pipe_id = IA_CSS_PIPE_ID_VIDEO;
- css_preview_pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
- preview_pipe = &asd->video_out_preview;
- css_preview_pipe_id = IA_CSS_PIPE_ID_PREVIEW;
+ pipe_id = IA_CSS_PIPE_ID_PREVIEW;
} else {
/* ATOMISP_RUN_MODE_STILL_CAPTURE */
- capture_pipe = &asd->video_out_capture;
- if (!raw_mode)
- vf_pipe = &asd->video_out_vf;
- css_capture_pipe_id = IA_CSS_PIPE_ID_CAPTURE;
- }
-
- if (IS_ISP2401 && asd->copy_mode) {
- css_capture_pipe_id = IA_CSS_PIPE_ID_COPY;
- css_preview_pipe_id = IA_CSS_PIPE_ID_COPY;
- css_video_pipe_id = IA_CSS_PIPE_ID_COPY;
- }
-
- if (capture_pipe) {
- buf_type = atomisp_get_css_buf_type(
- asd, css_capture_pipe_id,
- atomisp_subdev_source_pad(&capture_pipe->vdev));
- input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-
- atomisp_q_video_buffers_to_css(asd, capture_pipe,
- input_stream_id,
- buf_type, css_capture_pipe_id);
- }
-
- if (vf_pipe) {
- buf_type = atomisp_get_css_buf_type(
- asd, css_capture_pipe_id,
- atomisp_subdev_source_pad(&vf_pipe->vdev));
- if (asd->stream_env[ATOMISP_INPUT_STREAM_POSTVIEW].stream)
- input_stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
- else
- input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-
- atomisp_q_video_buffers_to_css(asd, vf_pipe,
- input_stream_id,
- buf_type, css_capture_pipe_id);
- }
-
- if (preview_pipe) {
- buf_type = atomisp_get_css_buf_type(
- asd, css_preview_pipe_id,
- atomisp_subdev_source_pad(&preview_pipe->vdev));
-
- if (css_preview_pipe_id == IA_CSS_PIPE_ID_YUVPP)
- input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
- else if (asd->stream_env[ATOMISP_INPUT_STREAM_PREVIEW].stream)
- input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
- else
- input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-
- atomisp_q_video_buffers_to_css(asd, preview_pipe,
- input_stream_id,
- buf_type, css_preview_pipe_id);
- }
-
- if (video_pipe) {
- buf_type = atomisp_get_css_buf_type(
- asd, css_video_pipe_id,
- atomisp_subdev_source_pad(&video_pipe->vdev));
- if (asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO].stream)
- input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
- else
- input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-
- atomisp_q_video_buffers_to_css(asd, video_pipe,
- input_stream_id,
- buf_type, css_video_pipe_id);
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
}
+ atomisp_q_video_buffers_to_css(asd, &asd->video_out,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, pipe_id);
return 0;
}
@@ -493,9 +402,8 @@ static void atomisp_buf_queue(struct vb2_buffer *vb)
* is put to waiting list until previous per-frame parameter buffers
* get enqueued.
*/
- if (!atomisp_is_vf_pipe(pipe) &&
- (pipe->frame_request_config_id[vb->index] ||
- !list_empty(&pipe->buffers_waiting_for_param)))
+ if (pipe->frame_request_config_id[vb->index] ||
+ !list_empty(&pipe->buffers_waiting_for_param))
list_add_tail(&frame->queue, &pipe->buffers_waiting_for_param);
else
list_add_tail(&frame->queue, &pipe->activeq);
@@ -503,7 +411,7 @@ static void atomisp_buf_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
/* TODO: do this better, not best way to queue to css */
- if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ if (asd->streaming) {
if (!list_empty(&pipe->buffers_waiting_for_param))
atomisp_handle_parameter_and_buffer(pipe);
else
@@ -539,9 +447,7 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp)
{
unsigned int i;
- isp->need_gfx_throttle = true;
isp->isp_fatal_error = false;
- isp->mipi_frame_size = 0;
for (i = 0; i < isp->input_cnt; i++)
isp->inputs[i].asd = NULL;
@@ -568,10 +474,6 @@ static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
/* s3a grid not enabled for any pipe */
asd->params.s3a_enabled_pipe = IA_CSS_PIPE_ID_NUM;
- /* Add for channel */
- asd->input_curr = 0;
-
- asd->mipi_frame_size = 0;
asd->copy_mode = false;
asd->stream_prepared = false;
@@ -584,19 +486,6 @@ static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
/*
* file operation functions
*/
-static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
-{
- return asd->video_out_preview.users +
- asd->video_out_vf.users +
- asd->video_out_capture.users +
- asd->video_out_video_capture.users;
-}
-
-unsigned int atomisp_dev_users(struct atomisp_device *isp)
-{
- return atomisp_subdev_users(&isp->asd);
-}
-
static int atomisp_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -613,8 +502,6 @@ static int atomisp_open(struct file *file)
mutex_lock(&isp->mutex);
- asd->subdev.devnode = vdev;
-
if (!isp->input_cnt) {
dev_err(isp->dev, "no camera attached\n");
ret = -EINVAL;
@@ -630,11 +517,6 @@ static int atomisp_open(struct file *file)
return -EBUSY;
}
- if (atomisp_dev_users(isp)) {
- dev_dbg(isp->dev, "skip init isp in open\n");
- goto init_subdev;
- }
-
/* runtime power management, turn on ISP */
ret = pm_runtime_resume_and_get(vdev->v4l2_dev->dev);
if (ret < 0) {
@@ -650,19 +532,12 @@ static int atomisp_open(struct file *file)
goto css_error;
}
-init_subdev:
- if (atomisp_subdev_users(asd))
- goto done;
-
atomisp_subdev_init_struct(asd);
/* Ensure that a mode is set */
- v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+ v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_PREVIEW);
-done:
pipe->users++;
mutex_unlock(&isp->mutex);
-
-
return 0;
css_error:
@@ -681,23 +556,18 @@ static int atomisp_release(struct file *file)
struct atomisp_sub_device *asd = pipe->asd;
struct v4l2_subdev_fh fh;
struct v4l2_rect clear_compose = {0};
- unsigned long flags;
int ret;
v4l2_fh_init(&fh.vfh, vdev);
dev_dbg(isp->dev, "release device %s\n", vdev->name);
- asd->subdev.devnode = vdev;
-
/* Note file must not be used after this! */
vb2_fop_release(file);
mutex_lock(&isp->mutex);
pipe->users--;
- if (pipe->users)
- goto done;
/*
* A little trick here:
@@ -714,9 +584,6 @@ static int atomisp_release(struct file *file)
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
- if (atomisp_subdev_users(asd))
- goto done;
-
atomisp_css_free_stat_buffers(asd);
atomisp_free_internal_buffers(asd);
@@ -730,14 +597,7 @@ static int atomisp_release(struct file *file)
isp->inputs[asd->input_curr].asd = NULL;
}
- spin_lock_irqsave(&isp->lock, flags);
- asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
- spin_unlock_irqrestore(&isp->lock, flags);
-
- if (atomisp_dev_users(isp))
- goto done;
-
- atomisp_destroy_pipes_stream_force(asd);
+ atomisp_destroy_pipes_stream(asd);
ret = v4l2_subdev_call(isp->flash, core, s_power, 0);
if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD)
@@ -746,10 +606,9 @@ static int atomisp_release(struct file *file)
if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0)
dev_err(isp->dev, "Failed to power off device\n");
-done:
atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
- atomisp_subdev_source_pad(vdev),
+ ATOMISP_SUBDEV_PAD_SOURCE,
V4L2_SEL_TGT_COMPOSE, 0,
&clear_compose);
mutex_unlock(&isp->mutex);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp_fops.h
index 883c1851c1c9..ad1cb1ac8aa4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.h
@@ -22,9 +22,6 @@
#define __ATOMISP_FOPS_H__
#include "atomisp_subdev.h"
-unsigned int atomisp_dev_users(struct atomisp_device *isp);
-unsigned int atomisp_sub_dev_users(struct atomisp_sub_device *asd);
-
/*
* Memory help functions for image frame and private parameters
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index c718a74ea70a..139ad7ad1dcf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -189,6 +189,7 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
pdata.subdevs[i].type = type;
pdata.subdevs[i].port = gs->csi_port;
+ pdata.subdevs[i].lanes = gs->csi_lanes;
pdata.subdevs[i].subdev = subdev;
return 0;
}
@@ -1150,6 +1151,7 @@ int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
pdata.subdevs[i].type = RAW_CAMERA;
pdata.subdevs[i].port = port;
+ pdata.subdevs[i].lanes = lanes;
pdata.subdevs[i].subdev = subdev;
return 0;
}
@@ -1357,7 +1359,7 @@ static int gmin_get_config_dsm_var(struct device *dev,
dev_info(dev, "found _DSM entry for '%s': %s\n", var,
cur->string.pointer);
strscpy(out, cur->string.pointer, *out_len);
- *out_len = strlen(cur->string.pointer);
+ *out_len = strlen(out);
ACPI_FREE(obj);
return 0;
@@ -1427,8 +1429,8 @@ static int gmin_get_config_var(struct device *maindev,
int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
{
- char val[CFG_VAR_NAME_MAX];
- size_t len = sizeof(val);
+ char val[CFG_VAR_NAME_MAX + 1];
+ size_t len = CFG_VAR_NAME_MAX;
long result;
int ret;
@@ -1458,243 +1460,3 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
MODULE_DESCRIPTION("Ancillary routines for binding ACPI devices");
MODULE_LICENSE("GPL");
-
-/*
- * The below helper functions don't really belong here and should eventually be
- * moved to some place under drivers/media/v4l2-core.
- */
-#include <linux/platform_data/x86/soc.h>
-
-/*
- * 79234640-9e10-4fea-a5c1-b5aa8b19756f
- * This _DSM GUID returns information about the GPIO lines mapped to a sensor.
- * Function number 1 returns a count of the GPIO lines that are mapped.
- * Subsequent functions return 32 bit ints encoding information about the GPIO.
- */
-static const guid_t intel_sensor_gpio_info_guid =
- GUID_INIT(0x79234640, 0x9e10, 0x4fea,
- 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
-
-/*
- * 822ace8f-2814-4174-a56b-5f029fe079ee
- * This _DSM GUID returns a string from the sensor device, which acts as a
- * module identifier.
- */
-static const guid_t intel_sensor_module_guid =
- GUID_INIT(0x822ace8f, 0x2814, 0x4174,
- 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
-
-#define INTEL_DSM_TYPE_SHIFT 0
-#define INTEL_DSM_TYPE_MASK GENMASK(7, 0)
-#define INTEL_DSM_PIN_SHIFT 8
-#define INTEL_DSM_PIN_MASK GENMASK(15, 8)
-#define INTEL_DSM_SENSOR_ON_VAL_SHIFT 24
-#define INTEL_DSM_SENSOR_ON_VAL_MASK GENMASK(31, 24)
-
-#define INTEL_DSM_TYPE(x) \
- (((x) & INTEL_DSM_TYPE_MASK) >> INTEL_DSM_TYPE_SHIFT)
-#define INTEL_DSM_PIN(x) \
- (((x) & INTEL_DSM_PIN_MASK) >> INTEL_DSM_PIN_SHIFT)
-#define INTEL_DSM_SENSOR_ON_VAL(x) \
- (((x) & INTEL_DSM_SENSOR_ON_VAL_MASK) >> INTEL_DSM_SENSOR_ON_VAL_SHIFT)
-
-#define V4L2_SENSOR_MAX_ACPI_GPIOS 2u
-
-struct v4l2_acpi_gpio_map {
- struct acpi_gpio_params params[V4L2_SENSOR_MAX_ACPI_GPIOS];
- struct acpi_gpio_mapping mapping[V4L2_SENSOR_MAX_ACPI_GPIOS + 1];
-};
-
-struct v4l2_acpi_gpio_parsing_data {
- struct device *dev;
- u32 settings[V4L2_SENSOR_MAX_ACPI_GPIOS];
- unsigned int settings_count;
- unsigned int res_count;
- unsigned int map_count;
- struct v4l2_acpi_gpio_map *map;
-};
-
-/* Note this always returns 1 to continue looping so that res_count is accurate */
-static int v4l2_acpi_handle_gpio_res(struct acpi_resource *ares, void *_data)
-{
- struct v4l2_acpi_gpio_parsing_data *data = _data;
- struct acpi_resource_gpio *agpio;
- const char *name;
- bool active_low;
- unsigned int i;
- u32 settings;
- u8 pin;
-
- if (!acpi_gpio_get_io_resource(ares, &agpio))
- return 1; /* Not a GPIO, continue the loop */
-
- data->res_count++;
-
- pin = agpio->pin_table[0];
- for (i = 0; i < data->settings_count; i++) {
- if (INTEL_DSM_PIN(data->settings[i]) == pin) {
- settings = data->settings[i];
- break;
- }
- }
-
- if (i == data->settings_count) {
- dev_warn(data->dev, "Could not find DSM GPIO settings for pin %d\n", pin);
- return 1;
- }
-
- switch (INTEL_DSM_TYPE(settings)) {
- case 0:
- name = "reset-gpios";
- break;
- case 1:
- name = "powerdown-gpios";
- break;
- default:
- dev_warn(data->dev, "Unknown GPIO type 0x%02lx for pin %d\n",
- INTEL_DSM_TYPE(settings), pin);
- return 1;
- }
-
- /*
- * Both reset and power-down need to be logical false when the sensor
- * is on (sensor should not be in reset and not be powered-down). So
- * when the sensor-on-value (which is the physical pin value) is high,
- * then the signal is active-low.
- */
- active_low = INTEL_DSM_SENSOR_ON_VAL(settings) ? true : false;
-
- i = data->map_count;
- if (i == V4L2_SENSOR_MAX_ACPI_GPIOS)
- return 1;
-
- /* res_count is already incremented */
- data->map->params[i].crs_entry_index = data->res_count - 1;
- data->map->params[i].active_low = active_low;
- data->map->mapping[i].name = name;
- data->map->mapping[i].data = &data->map->params[i];
- data->map->mapping[i].size = 1;
- data->map_count++;
-
- dev_info(data->dev, "%s crs %d %s pin %d active-%s\n", name,
- data->res_count - 1, agpio->resource_source.string_ptr,
- pin, active_low ? "low" : "high");
-
- return 1;
-}
-
-/*
- * Helper function to create an ACPI GPIO lookup table for sensor reset and
- * powerdown signals on Intel Bay Trail (BYT) and Cherry Trail (CHT) devices,
- * including setting the correct polarity for the GPIO.
- *
- * This uses the "79234640-9e10-4fea-a5c1-b5aa8b19756f" DSM method directly
- * on the sensor device's ACPI node. This is different from later Intel
- * hardware which has a separate INT3472 with this info. Since there is
- * no separate firmware-node to which we can bind to register the GPIO lookups
- * this unfortunately means that all sensor drivers which may be used on
- * BYT or CHT hw need to call this function. This also means that this function
- * may only fail when it is actually called on BYT/CHT hw. In all other cases
- * it must always succeed.
- *
- * Note this code uses the same DSM GUID as the INT3472 discrete.c code
- * and there is some overlap, but there are enough differences that it is
- * difficult to share the code.
- */
-int v4l2_get_acpi_sensor_info(struct device *dev, char **module_id_str)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- struct v4l2_acpi_gpio_parsing_data data = { };
- LIST_HEAD(resource_list);
- union acpi_object *obj;
- unsigned int i, j;
- int ret;
-
- if (module_id_str)
- *module_id_str = NULL;
-
- if (!adev)
- return 0;
-
- obj = acpi_evaluate_dsm_typed(adev->handle, &intel_sensor_module_guid,
- 0x00, 0x01, NULL, ACPI_TYPE_STRING);
- if (obj) {
- dev_info(dev, "Sensor module id: '%s'\n", obj->string.pointer);
- if (module_id_str)
- *module_id_str = kstrdup(obj->string.pointer, GFP_KERNEL);
-
- ACPI_FREE(obj);
- }
-
- if (!soc_intel_is_byt() && !soc_intel_is_cht())
- return 0;
-
- /*
- * First get the GPIO-settings count and then get count GPIO-settings
- * values. Note the order of these may differ from the order in which
- * the GPIOs are listed on the ACPI resources! So we first store them all
- * and then enumerate the ACPI resources and match them up by pin number.
- */
- obj = acpi_evaluate_dsm_typed(adev->handle,
- &intel_sensor_gpio_info_guid, 0x00, 1,
- NULL, ACPI_TYPE_INTEGER);
- if (!obj)
- return dev_err_probe(dev, -EIO, "No _DSM entry for GPIO pin count\n");
-
- data.settings_count = obj->integer.value;
- ACPI_FREE(obj);
-
- if (data.settings_count > V4L2_SENSOR_MAX_ACPI_GPIOS)
- return dev_err_probe(dev, -EIO, "Too many GPIOs %u > %u\n",
- data.settings_count, V4L2_SENSOR_MAX_ACPI_GPIOS);
-
- for (i = 0; i < data.settings_count; i++) {
- /*
- * i + 2 because the index of this _DSM function is 1-based
- * and the first function is just a count.
- */
- obj = acpi_evaluate_dsm_typed(adev->handle,
- &intel_sensor_gpio_info_guid,
- 0x00, i + 2,
- NULL, ACPI_TYPE_INTEGER);
- if (!obj)
- return dev_err_probe(dev, -EIO, "No _DSM entry for GPIO pin %u\n", i);
-
- data.settings[i] = obj->integer.value;
- ACPI_FREE(obj);
- }
-
- /* Since we match up by pin-number the pin-numbers must be unique */
- for (i = 0; i < data.settings_count; i++) {
- for (j = i + 1; j < data.settings_count; j++) {
- if (INTEL_DSM_PIN(data.settings[i]) !=
- INTEL_DSM_PIN(data.settings[j]))
- continue;
-
- return dev_err_probe(dev, -EIO, "Duplicate pin number %lu\n",
- INTEL_DSM_PIN(data.settings[i]));
- }
- }
-
- /* Use devm_kzalloc() for the mappings + params to auto-free them */
- data.map = devm_kzalloc(dev, sizeof(*data.map), GFP_KERNEL);
- if (!data.map)
- return -ENOMEM;
-
- /* Now parse the ACPI resources and build the lookup table */
- data.dev = dev;
- ret = acpi_dev_get_resources(adev, &resource_list,
- v4l2_acpi_handle_gpio_res, &data);
- if (ret < 0)
- return ret;
-
- acpi_dev_free_resource_list(&resource_list);
-
- if (data.map_count != data.settings_count ||
- data.res_count != data.settings_count)
- dev_warn(dev, "ACPI GPIO resources vs DSM GPIO-info count mismatch (dsm: %d res: %d map %d\n",
- data.settings_count, data.res_count, data.map_count);
-
- return devm_acpi_dev_add_driver_gpios(dev, data.map->mapping);
-}
-EXPORT_SYMBOL_GPL(v4l2_get_acpi_sensor_info);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index 1fac99f4e2b0..f7b4bee9574b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -27,6 +27,7 @@
#include <linux/idr.h>
#include <media/media-device.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-subdev.h>
/* ISP2400*/
@@ -124,17 +125,23 @@
struct atomisp_input_subdev {
unsigned int type;
enum atomisp_camera_port port;
+ u32 code; /* MEDIA_BUS_FMT_* */
+ bool binning_support;
+ bool crop_support;
struct v4l2_subdev *camera;
+ /* Sensor rects for sensors which support crop */
+ struct v4l2_rect native_rect;
+ struct v4l2_rect active_rect;
+ /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_pad_config pad_cfg;
+
struct v4l2_subdev *motor;
- struct v4l2_frmsizeenum frame_size;
/*
* To show this resource is used by
* which stream, in ISP multiple stream mode
*/
struct atomisp_sub_device *asd;
-
- int sensor_index;
};
enum atomisp_dfs_mode {
@@ -168,10 +175,6 @@ struct atomisp_regs {
u32 csi_access_viol;
};
-#define ATOMISP_DEVICE_STREAMING_DISABLED 0
-#define ATOMISP_DEVICE_STREAMING_ENABLED 1
-#define ATOMISP_DEVICE_STREAMING_STOPPING 2
-
/*
* ci device struct
*/
@@ -180,6 +183,7 @@ struct atomisp_device {
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct atomisp_sub_device asd;
+ struct v4l2_async_notifier notifier;
struct atomisp_platform_data *pdata;
void *mmu_l1_base;
void __iomem *base;
@@ -196,6 +200,12 @@ struct atomisp_device {
* structures and css API calls. */
struct mutex mutex;
+ /*
+ * Number of lanes used by each sensor per port.
+ * Note this is indexed by mipi_port_id not atomisp_camera_port.
+ */
+ int sensor_lanes[N_MIPI_PORT_ID];
+ struct v4l2_subdev *sensor_subdevs[ATOMISP_CAMERA_NR_PORTS];
unsigned int input_cnt;
struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
struct v4l2_subdev *flash;
@@ -204,16 +214,11 @@ struct atomisp_device {
struct atomisp_regs saved_regs;
struct atomisp_css_env css_env;
- /* isp timeout status flag */
- bool isp_timeout;
bool isp_fatal_error;
struct work_struct assert_recovery_work;
spinlock_t lock; /* Protects asd.streaming */
- bool need_gfx_throttle;
-
- unsigned int mipi_frame_size;
const struct atomisp_dfs_config *dfs;
unsigned int hpll_freq;
unsigned int running_freq;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 384f31fc66c5..d2174156573a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -528,22 +528,6 @@ int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change)
return -EBUSY;
}
- switch (pipe->asd->streaming) {
- case ATOMISP_DEVICE_STREAMING_DISABLED:
- break;
- case ATOMISP_DEVICE_STREAMING_ENABLED:
- if (settings_change) {
- dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n");
- return -EBUSY;
- }
- break;
- case ATOMISP_DEVICE_STREAMING_STOPPING:
- dev_err(pipe->isp->dev, "IOCTL issued while stopping\n");
- return -EBUSY;
- default:
- return -EINVAL;
- }
-
return 0;
}
@@ -615,20 +599,6 @@ static int atomisp_enum_input(struct file *file, void *fh,
return 0;
}
-static unsigned int
-atomisp_subdev_streaming_count(struct atomisp_sub_device *asd)
-{
- return vb2_start_streaming_called(&asd->video_out_preview.vb_queue) +
- vb2_start_streaming_called(&asd->video_out_capture.vb_queue) +
- vb2_start_streaming_called(&asd->video_out_video_capture.vb_queue) +
- vb2_start_streaming_called(&asd->video_out_vf.vb_queue);
-}
-
-unsigned int atomisp_streaming_count(struct atomisp_device *isp)
-{
- return isp->asd.streaming == ATOMISP_DEVICE_STREAMING_ENABLED;
-}
-
/*
* get input are used to get current primary/secondary camera
*/
@@ -703,7 +673,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
/* select operating sensor */
ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
- 0, isp->inputs[input].sensor_index, 0);
+ 0, 0, 0);
if (ret && (ret != -ENOIOCTLCMD)) {
dev_err(isp->dev, "Failed to select sensor\n");
return ret;
@@ -727,20 +697,98 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
return 0;
}
+/*
+ * With crop any framesize <= sensor-size can be made, give
+ * userspace a list of sizes to choice from.
+ */
+static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
+ struct v4l2_frmsizeenum *fsize,
+ const struct v4l2_rect *active,
+ const struct v4l2_rect *native,
+ int *valid_sizes)
+{
+ static const struct v4l2_frmsize_discrete frame_sizes[] = {
+ { 1600, 1200 },
+ { 1600, 1080 },
+ { 1600, 900 },
+ { 1440, 1080 },
+ { 1280, 960 },
+ { 1280, 720 },
+ { 800, 600 },
+ { 640, 480 },
+ };
+ u32 padding_w, padding_h;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frame_sizes); i++) {
+ atomisp_get_padding(isp, frame_sizes[i].width, frame_sizes[i].height,
+ &padding_w, &padding_h);
+
+ if ((frame_sizes[i].width + padding_w) > native->width ||
+ (frame_sizes[i].height + padding_h) > native->height)
+ continue;
+
+ /*
+ * Skip sizes where width and height are less then 2/3th of the
+ * sensor size to avoid sizes with a too small field of view.
+ */
+ if (frame_sizes[i].width < (active->width * 2 / 3) &&
+ frame_sizes[i].height < (active->height * 2 / 3))
+ continue;
+
+ if (*valid_sizes == fsize->index) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = frame_sizes[i];
+ return 0;
+ }
+
+ (*valid_sizes)++;
+ }
+
+ return -EINVAL;
+}
+
+static int atomisp_enum_framesizes_crop(struct atomisp_device *isp,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_rect active = input->active_rect;
+ struct v4l2_rect native = input->native_rect;
+ int ret, valid_sizes = 0;
+
+ ret = atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
+ if (ret == 0)
+ return 0;
+
+ if (!input->binning_support)
+ return -EINVAL;
+
+ active.width /= 2;
+ active.height /= 2;
+ native.width /= 2;
+ native.height /= 2;
+
+ return atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
+}
+
static int atomisp_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_size_enum fse = {
.index = fsize->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .code = input->code,
};
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, enum_frame_size, NULL, &fse);
+ if (input->crop_support)
+ return atomisp_enum_framesizes_crop(isp, fsize);
+
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
@@ -836,77 +884,14 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
return -EINVAL;
}
-static int atomisp_adjust_fmt(struct v4l2_format *f)
-{
- const struct atomisp_format_bridge *format_bridge;
- u32 padded_width;
-
- format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
- /* Currently, raw formats are broken!!! */
- if (!format_bridge || format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
-
- format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
- if (!format_bridge)
- return -EINVAL;
- }
-
- padded_width = f->fmt.pix.width + pad_w;
-
- if (format_bridge->planar) {
- f->fmt.pix.bytesperline = padded_width;
- f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
- DIV_ROUND_UP(format_bridge->depth *
- padded_width, 8));
- } else {
- f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth *
- padded_width, 8);
- f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline);
- }
-
- if (f->fmt.pix.field == V4L2_FIELD_ANY)
- f->fmt.pix.field = V4L2_FIELD_NONE;
-
- /*
- * FIXME: do we need to setup this differently, depending on the
- * sensor or the pipeline?
- */
- f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_709;
- f->fmt.pix.xfer_func = V4L2_XFER_FUNC_709;
-
- f->fmt.pix.width -= pad_w;
- f->fmt.pix.height -= pad_h;
-
- return 0;
-}
-
/* This function looks up the closest available resolution. */
static int atomisp_try_fmt_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct video_device *vdev = video_devdata(file);
- u32 pixfmt = f->fmt.pix.pixelformat;
- int ret;
-
- /*
- * atomisp_try_fmt() gived results with padding included, note
- * (this gets removed again by the atomisp_adjust_fmt() call below.
- */
- f->fmt.pix.width += pad_w;
- f->fmt.pix.height += pad_h;
-
- ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL);
- if (ret)
- return ret;
-
- /*
- * atomisp_try_fmt() replaces pixelformat with the sensors native
- * format, restore the actual format requested by userspace.
- */
- f->fmt.pix.pixelformat = pixfmt;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
- return atomisp_adjust_fmt(f);
+ return atomisp_try_fmt(isp, &f->fmt.pix, NULL, NULL);
}
static int atomisp_g_fmt_cap(struct file *file, void *fh,
@@ -1048,8 +1033,7 @@ static int atomisp_qbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer
if (buf->index >= vdev->queue->num_buffers)
return -EINVAL;
- if (!atomisp_is_vf_pipe(pipe) &&
- (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) {
+ if (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING) {
/* this buffer will have a per-frame parameter */
pipe->frame_request_config_id[buf->index] = buf->reserved2 &
~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING;
@@ -1099,48 +1083,6 @@ static int atomisp_dqbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer
return 0;
}
-enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd)
-{
- /*
- * Disable vf_pp and run CSS in video mode. This allows using ISP
- * scaling but it has one frame delay due to CSS internal buffering.
- */
- if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
- return IA_CSS_PIPE_ID_VIDEO;
-
- /*
- * Disable vf_pp and run CSS in still capture mode. In this mode
- * CSS does not cause extra latency with buffering, but scaling
- * is not available.
- */
- if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
- return IA_CSS_PIPE_ID_CAPTURE;
-
- switch (asd->run_mode->val) {
- case ATOMISP_RUN_MODE_PREVIEW:
- return IA_CSS_PIPE_ID_PREVIEW;
- case ATOMISP_RUN_MODE_VIDEO:
- return IA_CSS_PIPE_ID_VIDEO;
- case ATOMISP_RUN_MODE_STILL_CAPTURE:
- default:
- return IA_CSS_PIPE_ID_CAPTURE;
- }
-}
-
-static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd)
-{
- if (asd->vfpp->val != ATOMISP_VFPP_ENABLE ||
- asd->copy_mode)
- return 1;
-
- if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
- (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE &&
- !atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)))
- return 2;
- else
- return 1;
-}
-
/* Input system HW workaround */
/* Input system address translation corrupts burst during */
/* invalidate. SW workaround for this is to set burst length */
@@ -1163,17 +1105,14 @@ int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
struct atomisp_sub_device *asd = pipe->asd;
- struct video_device *vdev = &pipe->vdev;
struct atomisp_device *isp = asd->isp;
struct pci_dev *pdev = to_pci_dev(isp->dev);
- enum ia_css_pipe_id css_pipe_id;
- unsigned int sensor_start_stream;
unsigned long irqflags;
int ret;
- mutex_lock(&isp->mutex);
+ dev_dbg(isp->dev, "Start stream\n");
- dev_dbg(isp->dev, "Start stream on pad %d\n", atomisp_subdev_source_pad(vdev));
+ mutex_lock(&isp->mutex);
ret = atomisp_pipe_check(pipe, false);
if (ret)
@@ -1182,25 +1121,6 @@ int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Input system HW workaround */
atomisp_dma_burst_len_cfg(asd);
- /*
- * The number of streaming video nodes is based on which
- * binary is going to be run.
- */
- sensor_start_stream = atomisp_sensor_start_stream(asd);
-
- if (atomisp_subdev_streaming_count(asd) > sensor_start_stream) {
- atomisp_qbuffers_to_css(asd);
- ret = 0;
- goto out_unlock;
- }
-
- if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
- atomisp_qbuffers_to_css(asd);
- goto start_sensor;
- }
-
- css_pipe_id = atomisp_get_css_pipe_id(asd);
-
/* Invalidate caches. FIXME: should flush only necessary buffers */
wbinvd();
@@ -1215,14 +1135,14 @@ int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
}
asd->params.dvs_6axis = NULL;
- ret = atomisp_css_start(asd, css_pipe_id, false);
+ ret = atomisp_css_start(asd);
if (ret) {
atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_QUEUED, true);
goto out_unlock;
}
spin_lock_irqsave(&isp->lock, irqflags);
- asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+ asd->streaming = true;
spin_unlock_irqrestore(&isp->lock, irqflags);
atomic_set(&asd->sof_count, -1);
atomic_set(&asd->sequence, -1);
@@ -1238,13 +1158,6 @@ int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
atomisp_qbuffers_to_css(asd);
- /* Only start sensor when the last streaming instance started */
- if (atomisp_subdev_streaming_count(asd) < sensor_start_stream) {
- ret = 0;
- goto out_unlock;
- }
-
-start_sensor:
if (isp->flash) {
asd->params.num_flash_frames = 0;
asd->params.flash_state = ATOMISP_FLASH_IDLE;
@@ -1254,19 +1167,9 @@ start_sensor:
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
atomisp_css_valid_sof(isp));
atomisp_csi2_configure(asd);
- /*
- * set freq to max when streaming count > 1 which indicate
- * dual camera would run
- */
- if (atomisp_streaming_count(isp) > 1) {
- if (atomisp_freq_scaling(isp,
- ATOMISP_DFS_MODE_MAX, false) < 0)
- dev_dbg(isp->dev, "DFS max mode failed!\n");
- } else {
- if (atomisp_freq_scaling(isp,
- ATOMISP_DFS_MODE_AUTO, false) < 0)
- dev_dbg(isp->dev, "DFS auto mode failed!\n");
- }
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false) < 0)
+ dev_dbg(isp->dev, "DFS auto mode failed!\n");
/* Enable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
@@ -1279,8 +1182,9 @@ start_sensor:
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_stream, 1);
if (ret) {
+ dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
spin_lock_irqsave(&isp->lock, irqflags);
- asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+ asd->streaming = false;
spin_unlock_irqrestore(&isp->lock, irqflags);
ret = -EINVAL;
goto out_unlock;
@@ -1295,19 +1199,14 @@ void atomisp_stop_streaming(struct vb2_queue *vq)
{
struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
struct atomisp_sub_device *asd = pipe->asd;
- struct video_device *vdev = &pipe->vdev;
struct atomisp_device *isp = asd->isp;
struct pci_dev *pdev = to_pci_dev(isp->dev);
- enum ia_css_pipe_id css_pipe_id;
- bool recreate_stream = false;
- bool first_streamoff = false;
unsigned long flags;
int ret;
- mutex_lock(&isp->mutex);
-
- dev_dbg(isp->dev, "Stop stream on pad %d\n", atomisp_subdev_source_pad(vdev));
+ dev_dbg(isp->dev, "Stop stream\n");
+ mutex_lock(&isp->mutex);
/*
* There is no guarantee that the buffers queued to / owned by the ISP
* will properly be returned to the queue when stopping. Set a flag to
@@ -1324,46 +1223,29 @@ void atomisp_stop_streaming(struct vb2_queue *vq)
if (ret == 0)
dev_warn(isp->dev, "Warning timeout waiting for CSS to return buffers\n");
- if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED)
- first_streamoff = true;
-
spin_lock_irqsave(&isp->lock, flags);
- if (atomisp_subdev_streaming_count(asd) == 1)
- asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
- else
- asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+ asd->streaming = false;
spin_unlock_irqrestore(&isp->lock, flags);
- if (!first_streamoff)
- goto stopsensor;
-
atomisp_clear_css_buffer_counters(asd);
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
- css_pipe_id = atomisp_get_css_pipe_id(asd);
- atomisp_css_stop(asd, css_pipe_id, false);
+ atomisp_css_stop(asd, false);
atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_ERROR, true);
atomisp_subdev_cleanup_pending_events(asd);
-stopsensor:
- if (atomisp_subdev_streaming_count(asd) != atomisp_sensor_start_stream(asd))
- goto out_unlock;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
if (isp->flash) {
asd->params.num_flash_frames = 0;
asd->params.flash_state = ATOMISP_FLASH_IDLE;
}
- /* if other streams are running, isp should not be powered off */
- if (atomisp_streaming_count(isp)) {
- atomisp_css_flush(isp);
- goto out_unlock;
- }
-
/* Disable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
@@ -1373,45 +1255,21 @@ stopsensor:
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
dev_warn(isp->dev, "DFS failed.\n");
+
/*
- * ISP work around, need to reset isp
- * Is it correct time to reset ISP when first node does streamoff?
- */
- if (isp->isp_timeout)
- dev_err(isp->dev, "%s: Resetting with WA activated",
- __func__);
- /*
- * It is possible that the other asd stream is in the stage
- * that v4l2_setfmt is just get called on it, which will
- * create css stream on that stream. But at this point, there
- * is no way to destroy the css stream created on that stream.
- *
- * So force stream destroy here.
+ * ISP work around, need to reset ISP to allow next stream on to work.
+ * Streams have already been destroyed by atomisp_css_stop().
+ * Disable PUNIT/ISP acknowlede/handshake - SRSE=3 and then reset.
*/
- if (isp->asd.stream_prepared) {
- atomisp_destroy_pipes_stream_force(&isp->asd);
- recreate_stream = true;
- }
-
- /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */
pci_write_config_dword(pdev, PCI_I_CONTROL,
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
- dev_err(isp->dev, "atomisp_reset");
atomisp_reset(isp);
- if (recreate_stream) {
- int ret2;
-
- ret2 = atomisp_create_pipes_stream(&isp->asd);
- if (ret2) {
- dev_err(isp->dev, "%s error re-creating streams: %d\n", __func__, ret2);
- if (!ret)
- ret = ret2;
- }
- }
+ /* Streams were destroyed by atomisp_css_stop(), recreate them. */
+ ret = atomisp_create_pipes_stream(&isp->asd);
+ if (ret)
+ dev_warn(isp->dev, "Recreating streams failed: %d\n", ret);
- isp->isp_timeout = false;
-out_unlock:
mutex_unlock(&isp->mutex);
}
@@ -1902,9 +1760,6 @@ static int atomisp_s_parm(struct file *file, void *fh,
case CI_MODE_STILL_CAPTURE:
mode = ATOMISP_RUN_MODE_STILL_CAPTURE;
break;
- case CI_MODE_CONTINUOUS:
- mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE;
- break;
case CI_MODE_PREVIEW:
mode = ATOMISP_RUN_MODE_PREVIEW;
break;
@@ -1923,14 +1778,8 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
- struct v4l2_subdev *motor;
int err;
- if (!IS_ISP2401)
- motor = isp->inputs[asd->input_curr].motor;
- else
- motor = isp->motor;
-
switch (cmd) {
case ATOMISP_IOC_S_SENSOR_RUNMODE:
if (IS_ISP2401)
@@ -2081,31 +1930,10 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
err = atomisp_fixed_pattern_table(asd, arg);
break;
- case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
- if (motor)
- err = v4l2_subdev_call(motor, core, ioctl, cmd, arg);
- else
- err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- core, ioctl, cmd, arg);
- break;
-
case ATOMISP_IOC_S_EXPOSURE:
- case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
- case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
- case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, ioctl, cmd, arg);
break;
- case ATOMISP_IOC_G_UPDATE_EXPOSURE:
- if (IS_ISP2401)
- err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- core, ioctl, cmd, arg);
- else
- err = -EINVAL;
- break;
case ATOMISP_IOC_S_ISP_SHD_TAB:
err = atomisp_set_shading_table(asd, arg);
@@ -2123,12 +1951,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
err = atomisp_set_parameters(vdev, arg);
break;
- case ATOMISP_IOC_G_METADATA:
- err = atomisp_get_metadata(asd, 0, arg);
- break;
- case ATOMISP_IOC_G_METADATA_BY_TYPE:
- err = atomisp_get_metadata_by_type(asd, 0, arg);
- break;
case ATOMISP_IOC_EXT_ISP_CTRL:
err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, ioctl, cmd, arg);
@@ -2149,15 +1971,9 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
case ATOMISP_IOC_S_FORMATS_CONFIG:
err = atomisp_formats(asd, 1, arg);
break;
- case ATOMISP_IOC_S_EXPOSURE_WINDOW:
- err = atomisp_s_ae_window(asd, arg);
- break;
case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
err = atomisp_inject_a_fake_event(asd, arg);
break;
- case ATOMISP_IOC_G_INVALID_FRAME_NUM:
- err = atomisp_get_invalid_frame_num(vdev, arg);
- break;
case ATOMISP_IOC_S_ARRAY_RESOLUTION:
err = atomisp_set_array_res(asd, arg);
break;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
index db6da77df06b..56d3df86c706 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
@@ -42,13 +42,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count);
void atomisp_stop_streaming(struct vb2_queue *vq);
-enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device
- *asd);
-
extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
-unsigned int atomisp_streaming_count(struct atomisp_device *isp);
-
/* compat_ioctl for 32bit userland app and 64bit kernel */
long atomisp_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index a0acfdb87177..45073e401bac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -117,35 +117,19 @@ const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
return NULL;
}
-bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
- unsigned int source_pad)
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd)
{
struct v4l2_mbus_framefmt *sink, *src;
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
- V4L2_SUBDEV_FORMAT_ACTIVE,
- ATOMISP_SUBDEV_PAD_SINK);
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
- V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
return atomisp_is_mbuscode_raw(sink->code)
&& !atomisp_is_mbuscode_raw(src->code);
}
-uint16_t atomisp_subdev_source_pad(struct video_device *vdev)
-{
- struct media_link *link;
- u16 ret = 0;
-
- list_for_each_entry(link, &vdev->entity.links, list) {
- if (link->source) {
- ret = link->source->index;
- break;
- }
- }
- return ret;
-}
-
/*
* V4L2 subdev operations
*/
@@ -355,10 +339,7 @@ static const char *atomisp_pad_str(unsigned int pad)
{
static const char *const pad_str[] = {
"ATOMISP_SUBDEV_PAD_SINK",
- "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
- "ATOMISP_SUBDEV_PAD_SOURCE_VF",
- "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
- "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO",
+ "ATOMISP_SUBDEV_PAD_SOURCE",
};
if (pad >= ARRAY_SIZE(pad_str))
@@ -376,9 +357,10 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
*comp[ATOMISP_SUBDEV_PADS_NUM];
- unsigned int i;
- unsigned int padding_w = pad_w;
- unsigned int padding_h = pad_h;
+
+ if ((pad == ATOMISP_SUBDEV_PAD_SINK && target != V4L2_SEL_TGT_CROP) ||
+ (pad == ATOMISP_SUBDEV_PAD_SOURCE && target != V4L2_SEL_TGT_COMPOSE))
+ return -EINVAL;
isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
@@ -393,27 +375,17 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH);
r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT);
- switch (pad) {
- case ATOMISP_SUBDEV_PAD_SINK: {
+ if (pad == ATOMISP_SUBDEV_PAD_SINK) {
/* Only crop target supported on sink pad. */
unsigned int dvs_w, dvs_h;
crop[pad]->width = ffmt[pad]->width;
crop[pad]->height = ffmt[pad]->height;
- /* Workaround for BYT 1080p perfectshot since the maxinum resolution of
- * front camera ov2722 is 1932x1092 and cannot use pad_w > 12*/
- if (!strncmp(isp->inputs[isp_sd->input_curr].camera->name,
- "ov2722", 6) && crop[pad]->height == 1092) {
- padding_w = 12;
- padding_h = 12;
- }
-
- if (atomisp_subdev_format_conversion(isp_sd,
- isp_sd->capture_pad)
+ if (atomisp_subdev_format_conversion(isp_sd)
&& crop[pad]->width && crop[pad]->height) {
- crop[pad]->width -= padding_w;
- crop[pad]->height -= padding_h;
+ crop[pad]->width -= isp_sd->sink_pad_padding_w;
+ crop[pad]->height -= isp_sd->sink_pad_padding_h;
}
if (isp_sd->params.video_dis_en &&
@@ -433,19 +405,15 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
crop[pad]->height = min(crop[pad]->height, r->height);
if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) {
- for (i = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE;
- i < ATOMISP_SUBDEV_PADS_NUM; i++) {
- struct v4l2_rect tmp = *crop[pad];
-
- atomisp_subdev_set_selection(
- sd, sd_state, which, i,
- V4L2_SEL_TGT_COMPOSE,
- flags, &tmp);
- }
+ struct v4l2_rect tmp = *crop[pad];
+
+ atomisp_subdev_set_selection(sd, sd_state, which,
+ ATOMISP_SUBDEV_PAD_SOURCE,
+ V4L2_SEL_TGT_COMPOSE, flags, &tmp);
}
if (which == V4L2_SUBDEV_FORMAT_TRY)
- break;
+ goto get_rect;
if (isp_sd->params.video_dis_en &&
isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
@@ -468,12 +436,8 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
ATOMISP_INPUT_STREAM_GENERAL,
crop[pad]->width,
crop[pad]->height);
- break;
- }
- case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
- case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: {
+ } else if (isp_sd->run_mode->val != ATOMISP_RUN_MODE_PREVIEW) {
/* Only compose target is supported on source pads. */
-
if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
/* Scaling is disabled in this mode */
r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width;
@@ -492,7 +456,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
if (r->width == 0 || r->height == 0 ||
crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 ||
crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0)
- break;
+ goto get_rect;
/*
* do cropping on sensor input if ratio of required resolution
* is different with sensor output resolution ratio:
@@ -522,18 +486,12 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
width * r->height / r->width,
ATOM_ISP_STEP_WIDTH));
-
- break;
- }
- case ATOMISP_SUBDEV_PAD_SOURCE_VF:
- case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ } else {
comp[pad]->width = r->width;
comp[pad]->height = r->height;
- break;
- default:
- return -EINVAL;
}
+get_rect:
/* Set format dimensions on non-sink pads as well. */
if (pad != ATOMISP_SUBDEV_PAD_SINK) {
ffmt[pad]->width = comp[pad]->width;
@@ -612,10 +570,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
break;
}
- case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
- case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
- case ATOMISP_SUBDEV_PAD_SOURCE_VF:
- case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ case ATOMISP_SUBDEV_PAD_SOURCE:
__ffmt->code = ffmt->code;
break;
}
@@ -755,11 +710,9 @@ static const struct v4l2_ctrl_ops ctrl_ops = {
};
static const char *const ctrl_run_mode_menu[] = {
- NULL,
- "Video",
- "Still capture",
- "Continuous capture",
- "Preview",
+ [ATOMISP_RUN_MODE_VIDEO] = "Video",
+ [ATOMISP_RUN_MODE_STILL_CAPTURE] = "Still capture",
+ [ATOMISP_RUN_MODE_PREVIEW] = "Preview",
};
static const struct v4l2_ctrl_config ctrl_run_mode = {
@@ -767,9 +720,9 @@ static const struct v4l2_ctrl_config ctrl_run_mode = {
.id = V4L2_CID_RUN_MODE,
.name = "Atomisp run mode",
.type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .def = 1,
- .max = 4,
+ .min = ATOMISP_RUN_MODE_MIN,
+ .def = ATOMISP_RUN_MODE_PREVIEW,
+ .max = ATOMISP_RUN_MODE_MAX,
.qmenu = ctrl_run_mode_menu,
};
@@ -921,23 +874,13 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
sprintf(sd->name, "ATOMISP_SUBDEV");
v4l2_set_subdevdata(sd, asd);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->devnode = &asd->video_out.vdev;
pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].flags = MEDIA_PAD_FL_SOURCE;
- pads[ATOMISP_SUBDEV_PAD_SOURCE_VF].flags = MEDIA_PAD_FL_SOURCE;
- pads[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].flags = MEDIA_PAD_FL_SOURCE;
- pads[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
-
- asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code =
- MEDIA_BUS_FMT_SBGGR10_1X10;
- asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].fmt.code =
- MEDIA_BUS_FMT_SBGGR10_1X10;
- asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VF].fmt.code =
- MEDIA_BUS_FMT_SBGGR10_1X10;
- asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].fmt.code =
- MEDIA_BUS_FMT_SBGGR10_1X10;
- asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].fmt.code =
- MEDIA_BUS_FMT_SBGGR10_1X10;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
me->ops = &isp_subdev_media_ops;
me->function = MEDIA_ENT_F_PROC_VIDEO_ISP;
@@ -945,43 +888,11 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
if (ret < 0)
return ret;
- ret = atomisp_init_subdev_pipe(asd, &asd->video_out_preview,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ret = atomisp_init_subdev_pipe(asd, &asd->video_out, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (ret)
return ret;
- ret = atomisp_init_subdev_pipe(asd, &asd->video_out_vf,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (ret)
- return ret;
-
- ret = atomisp_init_subdev_pipe(asd, &asd->video_out_capture,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (ret)
- return ret;
-
- ret = atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (ret)
- return ret;
-
- ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE",
- ATOMISP_RUN_MODE_STILL_CAPTURE);
- if (ret < 0)
- return ret;
-
- ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER",
- ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE);
- if (ret < 0)
- return ret;
-
- ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW",
- ATOMISP_RUN_MODE_PREVIEW);
- if (ret < 0)
- return ret;
-
- ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO",
- ATOMISP_RUN_MODE_VIDEO);
+ ret = atomisp_video_init(&asd->video_out);
if (ret < 0)
return ret;
@@ -1016,57 +927,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
return asd->ctrl_handler.error;
}
-int atomisp_create_pads_links(struct atomisp_device *isp)
-{
- int i, ret;
-
- for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
- ret = media_create_pad_link(&isp->csi2_port[i].subdev.entity,
- CSI2_PAD_SOURCE, &isp->asd.subdev.entity,
- ATOMISP_SUBDEV_PAD_SINK, 0);
- if (ret < 0)
- return ret;
- }
-
- for (i = 0; i < isp->input_cnt; i++) {
- /* Don't create links for the test-pattern-generator */
- if (isp->inputs[i].type == TEST_PATTERN)
- continue;
-
- ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0,
- &isp->csi2_port[isp->inputs[i].
- port].subdev.entity,
- CSI2_PAD_SINK,
- MEDIA_LNK_FL_ENABLED |
- MEDIA_LNK_FL_IMMUTABLE);
- if (ret < 0)
- return ret;
- }
-
- ret = media_create_pad_link(&isp->asd.subdev.entity,
- ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW,
- &isp->asd.video_out_preview.vdev.entity, 0, 0);
- if (ret < 0)
- return ret;
- ret = media_create_pad_link(&isp->asd.subdev.entity,
- ATOMISP_SUBDEV_PAD_SOURCE_VF,
- &isp->asd.video_out_vf.vdev.entity, 0, 0);
- if (ret < 0)
- return ret;
- ret = media_create_pad_link(&isp->asd.subdev.entity,
- ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
- &isp->asd.video_out_capture.vdev.entity, 0, 0);
- if (ret < 0)
- return ret;
- ret = media_create_pad_link(&isp->asd.subdev.entity,
- ATOMISP_SUBDEV_PAD_SOURCE_VIDEO,
- &isp->asd.video_out_video_capture.vdev.entity, 0, 0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd)
{
v4l2_ctrl_handler_free(&asd->ctrl_handler);
@@ -1092,10 +952,7 @@ void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
{
atomisp_subdev_cleanup_entities(asd);
v4l2_device_unregister_subdev(&asd->subdev);
- atomisp_video_unregister(&asd->video_out_preview);
- atomisp_video_unregister(&asd->video_out_vf);
- atomisp_video_unregister(&asd->video_out_capture);
- atomisp_video_unregister(&asd->video_out_video_capture);
+ atomisp_video_unregister(&asd->video_out);
}
int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
@@ -1104,51 +961,6 @@ int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
return v4l2_device_register_subdev(vdev, &asd->subdev);
}
-int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /*
- * FIXME: check if all device caps are properly initialized.
- * Should any of those use V4L2_CAP_META_CAPTURE? Probably yes.
- */
-
- asd->video_out_preview.vdev.v4l2_dev = vdev;
- asd->video_out_preview.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- ret = video_register_device(&asd->video_out_preview.vdev,
- VFL_TYPE_VIDEO, -1);
- if (ret < 0)
- goto error;
-
- asd->video_out_capture.vdev.v4l2_dev = vdev;
- asd->video_out_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- ret = video_register_device(&asd->video_out_capture.vdev,
- VFL_TYPE_VIDEO, -1);
- if (ret < 0)
- goto error;
-
- asd->video_out_vf.vdev.v4l2_dev = vdev;
- asd->video_out_vf.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- ret = video_register_device(&asd->video_out_vf.vdev,
- VFL_TYPE_VIDEO, -1);
- if (ret < 0)
- goto error;
-
- asd->video_out_video_capture.vdev.v4l2_dev = vdev;
- asd->video_out_video_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- ret = video_register_device(&asd->video_out_video_capture.vdev,
- VFL_TYPE_VIDEO, -1);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- atomisp_subdev_unregister_entities(asd);
- return ret;
-}
-
/*
* atomisp_subdev_init - ISP Subdevice initialization.
* @dev: Device pointer specific to the ATOM ISP.
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index fee663bc415a..9a04511b9efd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -32,15 +32,8 @@
#define ATOMISP_MAX_EXP_ID (250)
#define ATOMISP_SUBDEV_PAD_SINK 0
-/* capture output for still frames */
-#define ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE 1
-/* viewfinder output for downscaled capture output */
-#define ATOMISP_SUBDEV_PAD_SOURCE_VF 2
-/* preview output for display */
-#define ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW 3
-/* main output for video pipeline */
-#define ATOMISP_SUBDEV_PAD_SOURCE_VIDEO 4
-#define ATOMISP_SUBDEV_PADS_NUM 5
+#define ATOMISP_SUBDEV_PAD_SOURCE 1
+#define ATOMISP_SUBDEV_PADS_NUM 2
struct atomisp_in_fmt_conv {
u32 code;
@@ -74,8 +67,6 @@ struct atomisp_video_pipe {
/* Filled through atomisp_get_css_frame_info() on queue setup */
struct ia_css_frame_info frame_info;
- /* Store here the initial run mode */
- unsigned int default_run_mode;
/* Set from streamoff to disallow queuing further buffers in CSS */
bool stopping;
@@ -248,15 +239,12 @@ struct atomisp_sub_device {
struct v4l2_subdev subdev;
struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM];
struct atomisp_pad_format fmt[ATOMISP_SUBDEV_PADS_NUM];
- u16 capture_pad; /* main capture pad; defines much of isp config */
+ /* Padding for currently set sink-pad fmt */
+ u32 sink_pad_padding_w;
+ u32 sink_pad_padding_h;
unsigned int output;
- struct atomisp_video_pipe video_out_capture; /* capture output */
- struct atomisp_video_pipe video_out_vf; /* viewfinder output */
- struct atomisp_video_pipe video_out_preview; /* preview output */
- /* video pipe main output */
- struct atomisp_video_pipe video_out_video_capture;
- /* struct isp_subdev_params params; */
+ struct atomisp_video_pipe video_out;
struct atomisp_device *isp;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *run_mode;
@@ -312,13 +300,12 @@ struct atomisp_sub_device {
* Writers of streaming must hold both isp->mutex and isp->lock.
* Readers of streaming need to hold only one of the two locks.
*/
- unsigned int streaming;
+ bool streaming;
bool stream_prepared; /* whether css stream is created */
+ bool recreate_streams_on_resume;
unsigned int latest_preview_exp_id; /* CSS ZSL/SDV raw buffer id */
- unsigned int mipi_frame_size;
-
bool copy_mode; /* CSI2+ use copy mode */
int raw_buffer_bitmap[ATOMISP_MAX_EXP_ID / 32 +
@@ -352,9 +339,7 @@ const struct atomisp_in_fmt_conv
atomisp_in_fmt);
const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_compressed(u32 code);
-bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
- unsigned int source_pad);
-uint16_t atomisp_subdev_source_pad(struct video_device *vdev);
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd);
/* Get pointer to appropriate format */
struct v4l2_mbus_framefmt
@@ -382,10 +367,7 @@ void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
struct v4l2_device *vdev);
-int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd,
- struct v4l2_device *vdev);
int atomisp_subdev_init(struct atomisp_device *isp);
void atomisp_subdev_cleanup(struct atomisp_device *isp);
-int atomisp_create_pads_links(struct atomisp_device *isp);
#endif /* __ATOMISP_SUBDEV_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 3f315dabbeeb..c43b916a006e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -27,6 +27,7 @@
#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/bits.h>
+#include <media/v4l2-fwnode.h>
#include <asm/iosf_mbi.h>
@@ -124,22 +125,8 @@ static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_457MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
/* Merrifield and Moorefield DFS rules */
@@ -171,22 +158,8 @@ static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = {
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
static const struct atomisp_dfs_config dfs_config_merr_1179 = {
@@ -251,23 +224,9 @@ static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = {
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_200MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
static struct atomisp_dfs_config dfs_config_merr_117a = {
@@ -298,22 +257,8 @@ static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = {
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_400MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
static const struct atomisp_dfs_config dfs_config_byt = {
@@ -344,29 +289,8 @@ static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = {
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_320MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = 1280,
- .height = 720,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_320MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_356MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
@@ -389,22 +313,8 @@ static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_320MHZ,
- .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
- },
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
- {
- .width = ISP_FREQ_RULE_ANY,
- .height = ISP_FREQ_RULE_ANY,
- .fps = ISP_FREQ_RULE_ANY,
- .isp_freq = ISP_FREQ_356MHZ,
- .run_mode = ATOMISP_RUN_MODE_SDV,
- },
};
static const struct atomisp_dfs_config dfs_config_cht = {
@@ -424,34 +334,22 @@ const struct atomisp_dfs_config dfs_config_cht_soc = {
.dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
};
-int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
- unsigned int run_mode)
+int atomisp_video_init(struct atomisp_video_pipe *video)
{
int ret;
- const char *direction;
-
- switch (video->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
- video->vdev.fops = &atomisp_fops;
- video->vdev.ioctl_ops = &atomisp_ioctl_ops;
- video->vdev.lock = &video->isp->mutex;
- break;
- default:
- return -EINVAL;
- }
+ video->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad);
if (ret < 0)
return ret;
/* Initialize the video device. */
- snprintf(video->vdev.name, sizeof(video->vdev.name),
- "ATOMISP ISP %s %s", name, direction);
+ strscpy(video->vdev.name, "ATOMISP video output", sizeof(video->vdev.name));
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+ video->vdev.lock = &video->isp->mutex;
video->vdev.release = video_device_release_empty;
video_set_drvdata(&video->vdev, video->isp);
- video->default_run_mode = run_mode;
return 0;
}
@@ -755,15 +653,9 @@ static int atomisp_suspend(struct device *dev)
dev_get_drvdata(dev);
unsigned long flags;
- /*
- * FIXME: Suspend is not supported by sensors. Abort if any video
- * node was opened.
- */
- if (atomisp_dev_users(isp))
- return -EBUSY;
-
+ /* FIXME: Suspend is not supported by sensors. Abort if streaming. */
spin_lock_irqsave(&isp->lock, flags);
- if (isp->asd.streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+ if (isp->asd.streaming) {
spin_unlock_irqrestore(&isp->lock, flags);
dev_err(isp->dev, "atomisp cannot suspend at this time.\n");
return -EINVAL;
@@ -772,12 +664,25 @@ static int atomisp_suspend(struct device *dev)
pm_runtime_resume(dev);
+ isp->asd.recreate_streams_on_resume = isp->asd.stream_prepared;
+ atomisp_destroy_pipes_stream(&isp->asd);
+
return atomisp_power_off(dev);
}
static int atomisp_resume(struct device *dev)
{
- return atomisp_power_on(dev);
+ struct atomisp_device *isp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_power_on(dev);
+ if (ret)
+ return ret;
+
+ if (isp->asd.recreate_streams_on_resume)
+ ret = atomisp_create_pipes_stream(&isp->asd);
+
+ return ret;
}
int atomisp_csi_lane_config(struct atomisp_device *isp)
@@ -785,7 +690,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
struct pci_dev *pdev = to_pci_dev(isp->dev);
static const struct {
u8 code;
- u8 lanes[MRFLD_PORT_NUM];
+ u8 lanes[N_MIPI_PORT_ID];
} portconfigs[] = {
/* Tangier/Merrifield available lane configurations */
{ 0x00, { 4, 1, 0 } }, /* 00000 */
@@ -809,7 +714,6 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
};
unsigned int i, j;
- u8 sensor_lanes[MRFLD_PORT_NUM] = { 0 };
u32 csi_control;
int nportconfigs;
u32 port_config_mask;
@@ -837,41 +741,13 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
nportconfigs = ARRAY_SIZE(portconfigs);
}
- for (i = 0; i < isp->input_cnt; i++) {
- struct camera_mipi_info *mipi_info;
-
- if (isp->inputs[i].type != RAW_CAMERA)
- continue;
-
- mipi_info = atomisp_to_sensor_mipi_info(isp->inputs[i].camera);
- if (!mipi_info)
- continue;
-
- switch (mipi_info->port) {
- case ATOMISP_CAMERA_PORT_PRIMARY:
- sensor_lanes[0] = mipi_info->num_lanes;
- break;
- case ATOMISP_CAMERA_PORT_SECONDARY:
- sensor_lanes[1] = mipi_info->num_lanes;
- break;
- case ATOMISP_CAMERA_PORT_TERTIARY:
- sensor_lanes[2] = mipi_info->num_lanes;
- break;
- default:
- dev_err(isp->dev,
- "%s: invalid port: %d for the %dth sensor\n",
- __func__, mipi_info->port, i);
- return -EINVAL;
- }
- }
-
for (i = 0; i < nportconfigs; i++) {
- for (j = 0; j < MRFLD_PORT_NUM; j++)
- if (sensor_lanes[j] &&
- sensor_lanes[j] != portconfigs[i].lanes[j])
+ for (j = 0; j < N_MIPI_PORT_ID; j++)
+ if (isp->sensor_lanes[j] &&
+ isp->sensor_lanes[j] != portconfigs[i].lanes[j])
break;
- if (j == MRFLD_PORT_NUM)
+ if (j == N_MIPI_PORT_ID)
break; /* Found matching setting */
}
@@ -879,7 +755,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
dev_err(isp->dev,
"%s: could not find the CSI port setting for %d-%d-%d\n",
__func__,
- sensor_lanes[0], sensor_lanes[1], sensor_lanes[2]);
+ isp->sensor_lanes[0], isp->sensor_lanes[1], isp->sensor_lanes[2]);
return -EINVAL;
}
@@ -907,7 +783,11 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
{
const struct atomisp_platform_data *pdata;
struct intel_v4l2_subdev_table *subdevs;
- int ret, raw_index = -1, count;
+ int ret, mipi_port;
+
+ ret = atomisp_csi2_bridge_parse_firmware(isp);
+ if (ret)
+ return ret;
pdata = atomisp_get_platform_data();
if (!pdata) {
@@ -915,23 +795,12 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
return 0;
}
- /* FIXME: should return -EPROBE_DEFER if not all subdevs were probed */
- for (count = 0; count < SUBDEV_WAIT_TIMEOUT_MAX_COUNT; count++) {
- int camera_count = 0;
-
- for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
- if (subdevs->type == RAW_CAMERA)
- camera_count++;
- }
- if (camera_count)
- break;
- msleep(SUBDEV_WAIT_TIMEOUT);
- }
- /* Wait more time to give more time for subdev init code to finish */
- msleep(5 * SUBDEV_WAIT_TIMEOUT);
-
- /* FIXME: should, instead, use I2C probe */
-
+ /*
+ * TODO: this is left here for now to allow testing atomisp-sensor
+ * drivers which are still using the atomisp_gmin_platform infra before
+ * converting them to standard v4l2 sensor drivers using runtime-pm +
+ * ACPI for pm and v4l2_async_register_subdev_sensor() registration.
+ */
for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdevs->subdev);
if (ret)
@@ -939,25 +808,20 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
switch (subdevs->type) {
case RAW_CAMERA:
- dev_dbg(isp->dev, "raw_index: %d\n", raw_index);
- raw_index = isp->input_cnt;
- if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
- dev_warn(isp->dev,
- "too many atomisp inputs, ignored\n");
+ if (subdevs->port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", subdevs->port);
+ break;
+ }
+
+ if (isp->sensor_subdevs[subdevs->port]) {
+ dev_err(isp->dev, "port %d already has a sensor attached\n",
+ subdevs->port);
break;
}
- isp->inputs[isp->input_cnt].type = subdevs->type;
- isp->inputs[isp->input_cnt].port = subdevs->port;
- isp->inputs[isp->input_cnt].camera = subdevs->subdev;
- isp->inputs[isp->input_cnt].sensor_index = 0;
- /*
- * initialize the subdev frame size, then next we can
- * judge whether frame_size store effective value via
- * pixel_format.
- */
- isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
- isp->input_cnt++;
+ mipi_port = atomisp_port_to_mipi_port(isp, subdevs->port);
+ isp->sensor_lanes[mipi_port] = subdevs->lanes;
+ isp->sensor_subdevs[subdevs->port] = subdevs->subdev;
break;
case CAMERA_MOTOR:
if (isp->motor) {
@@ -979,21 +843,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
}
}
- /*
- * HACK: Currently VCM belongs to primary sensor only, but correct
- * approach must be to acquire from platform code which sensor
- * owns it.
- */
- if (isp->motor && raw_index >= 0)
- isp->inputs[raw_index].motor = isp->motor;
-
- /* Proceed even if no modules detected. For COS mode and no modules. */
- if (!isp->input_cnt)
- dev_warn(isp->dev, "no camera attached or fail to detect\n");
- else
- dev_info(isp->dev, "detected %d camera sensors\n",
- isp->input_cnt);
-
return atomisp_csi_lane_config(isp);
}
@@ -1067,29 +916,8 @@ static int atomisp_register_entities(struct atomisp_device *isp)
goto subdev_register_failed;
}
- for (i = 0; i < isp->input_cnt; i++) {
- if (isp->inputs[i].port >= ATOMISP_CAMERA_NR_PORTS) {
- dev_err(isp->dev, "isp->inputs port %d not supported\n",
- isp->inputs[i].port);
- ret = -EINVAL;
- goto link_failed;
- }
- }
-
- if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
- dev_dbg(isp->dev,
- "TPG detected, camera_cnt: %d\n", isp->input_cnt);
- isp->inputs[isp->input_cnt].type = TEST_PATTERN;
- isp->inputs[isp->input_cnt].port = -1;
- isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
- } else {
- dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
- }
-
return 0;
-link_failed:
- atomisp_subdev_unregister_entities(&isp->asd);
subdev_register_failed:
atomisp_tpg_unregister_entities(&isp->tpg);
tpg_register_failed:
@@ -1103,15 +931,152 @@ v4l2_device_failed:
return ret;
}
-static int atomisp_register_device_nodes(struct atomisp_device *isp)
+static void atomisp_init_sensor(struct atomisp_input_subdev *input)
+{
+ struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
+ struct v4l2_subdev_frame_size_enum fse = { };
+ struct v4l2_subdev_state sd_state = {
+ .pads = &input->pad_cfg,
+ };
+ struct v4l2_subdev_selection sel = { };
+ int i, err;
+
+ mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
+ if (!err)
+ input->code = mbus_code_enum.code;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
+ err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ if (err)
+ return;
+
+ input->native_rect = sel.r;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
+ err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ if (err)
+ return;
+
+ input->active_rect = sel.r;
+
+ /*
+ * Check for a framesize with half active_rect width and height,
+ * if found assume the sensor supports binning.
+ * Do this before changing the crop-rect since that may influence
+ * enum_frame_size results.
+ */
+ for (i = 0; ; i++) {
+ fse.index = i;
+ fse.code = input->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ if (err)
+ break;
+
+ if (fse.min_width <= (input->active_rect.width / 2) &&
+ fse.min_height <= (input->active_rect.height / 2)) {
+ input->binning_support = true;
+ break;
+ }
+ }
+
+ /*
+ * The ISP also wants the non-active pixels at the border of the sensor
+ * for padding, set the crop rect to cover the entire sensor instead
+ * of only the default active area.
+ *
+ * Do this for both try and active formats since the try_crop rect in
+ * pad_cfg may influence (clamp) future try_fmt calls with which == try.
+ */
+ sel.which = V4L2_SUBDEV_FORMAT_TRY;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = input->native_rect;
+ err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
+ if (err)
+ return;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = input->native_rect;
+ err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
+ if (err)
+ return;
+
+ dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
+ input->native_rect.width, input->native_rect.height,
+ input->active_rect.width, input->active_rect.height,
+ input->binning_support);
+
+ input->crop_support = true;
+}
+
+int atomisp_register_device_nodes(struct atomisp_device *isp)
{
- int err;
+ struct atomisp_input_subdev *input;
+ int i, err;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ err = media_create_pad_link(&isp->csi2_port[i].subdev.entity,
+ CSI2_PAD_SOURCE, &isp->asd.subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (err)
+ return err;
+
+ if (!isp->sensor_subdevs[i])
+ continue;
- err = atomisp_subdev_register_video_nodes(&isp->asd, &isp->v4l2_dev);
+ input = &isp->inputs[isp->input_cnt];
+
+ input->type = RAW_CAMERA;
+ input->port = i;
+ input->camera = isp->sensor_subdevs[i];
+
+ atomisp_init_sensor(input);
+
+ /*
+ * HACK: Currently VCM belongs to primary sensor only, but correct
+ * approach must be to acquire from platform code which sensor
+ * owns it.
+ */
+ if (i == ATOMISP_CAMERA_PORT_PRIMARY)
+ input->motor = isp->motor;
+
+ err = media_create_pad_link(&input->camera->entity, 0,
+ &isp->csi2_port[i].subdev.entity,
+ CSI2_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (err)
+ return err;
+
+ isp->input_cnt++;
+ }
+
+ if (!isp->input_cnt)
+ dev_warn(isp->dev, "no camera attached or fail to detect\n");
+ else
+ dev_info(isp->dev, "detected %d camera sensors\n", isp->input_cnt);
+
+ if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
+ dev_dbg(isp->dev, "TPG detected, camera_cnt: %d\n", isp->input_cnt);
+ isp->inputs[isp->input_cnt].type = TEST_PATTERN;
+ isp->inputs[isp->input_cnt].port = -1;
+ isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
+ } else {
+ dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
+ }
+
+ isp->asd.video_out.vdev.v4l2_dev = &isp->v4l2_dev;
+ isp->asd.video_out.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ err = video_register_device(&isp->asd.video_out.vdev, VFL_TYPE_VIDEO, -1);
if (err)
return err;
- err = atomisp_create_pads_links(isp);
+ err = media_create_pad_link(&isp->asd.subdev.entity, ATOMISP_SUBDEV_PAD_SOURCE,
+ &isp->asd.video_out.vdev.entity, 0, 0);
if (err)
return err;
@@ -1543,9 +1508,11 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
- err = atomisp_register_device_nodes(isp);
- if (err)
+ err = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
+ if (err) {
+ dev_err(isp->dev, "failed to register async notifier : %d\n", err);
goto css_init_fail;
+ }
atomisp_drvfs_init(isp);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
index ccf1c0ac17b2..fad9573374b3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
@@ -26,10 +26,10 @@ struct v4l2_device;
struct atomisp_device;
struct firmware;
-int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
- unsigned int run_mode);
+int atomisp_video_init(struct atomisp_video_pipe *video);
void atomisp_video_unregister(struct atomisp_video_pipe *video);
const struct firmware *atomisp_load_firmware(struct atomisp_device *isp);
int atomisp_csi_lane_config(struct atomisp_device *isp);
+int atomisp_register_device_nodes(struct atomisp_device *isp);
#endif /* __ATOMISP_V4L2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
index 700070c58eda..90c17884968b 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
@@ -138,4 +138,6 @@ bool ia_css_frame_is_same_type(
int ia_css_dma_configure_from_info(struct dma_port_config *config,
const struct ia_css_frame_info *info);
+unsigned int ia_css_frame_pad_width(unsigned int width, enum ia_css_frame_format format);
+
#endif /* __IA_CSS_FRAME_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
index 83bb42e05421..2d7fddb114f6 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
@@ -269,6 +269,34 @@ int ia_css_frame_init_planes(struct ia_css_frame *frame)
return 0;
}
+unsigned int ia_css_frame_pad_width(unsigned int width, enum ia_css_frame_format format)
+{
+ switch (format) {
+ /*
+ * Frames with a U and V plane of 8 bits per pixel need to have
+ * all planes aligned, this means double the alignment for the
+ * Y plane if the horizontal decimation is 2.
+ */
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ return CEIL_MUL(width, 2 * HIVE_ISP_DDR_WORD_BYTES);
+
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ return CEIL_MUL(width, NV12_TILEY_TILE_WIDTH);
+
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ return CEIL_MUL(width, 2 * ISP_VEC_NELEMS);
+
+ default:
+ return CEIL_MUL(width, HIVE_ISP_DDR_WORD_BYTES);
+ }
+}
+
void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
unsigned int width,
unsigned int min_padded_width)
@@ -285,25 +313,8 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
align = max(min_padded_width, width);
info->res.width = width;
- /* frames with a U and V plane of 8 bits per pixel need to have
- all planes aligned, this means double the alignment for the
- Y plane if the horizontal decimation is 2. */
- if (info->format == IA_CSS_FRAME_FORMAT_YUV420 ||
- info->format == IA_CSS_FRAME_FORMAT_YV12 ||
- info->format == IA_CSS_FRAME_FORMAT_NV12 ||
- info->format == IA_CSS_FRAME_FORMAT_NV21 ||
- info->format == IA_CSS_FRAME_FORMAT_BINARY_8 ||
- info->format == IA_CSS_FRAME_FORMAT_YUV_LINE)
- info->padded_width =
- CEIL_MUL(align, 2 * HIVE_ISP_DDR_WORD_BYTES);
- else if (info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY)
- info->padded_width = CEIL_MUL(align, NV12_TILEY_TILE_WIDTH);
- else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
- info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)
- info->padded_width = CEIL_MUL(align, 2 * ISP_VEC_NELEMS);
- else {
- info->padded_width = CEIL_MUL(align, HIVE_ISP_DDR_WORD_BYTES);
- }
+ info->padded_width = ia_css_frame_pad_width(align, info->format);
+
IA_CSS_LEAVE_PRIVATE("");
}
@@ -601,9 +612,6 @@ static void frame_init_qplane6_planes(struct ia_css_frame *frame)
static int frame_allocate_buffer_data(struct ia_css_frame *frame)
{
-#ifdef ISP2401
- IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes);
-#endif
frame->data = hmm_alloc(frame->data_bytes);
if (frame->data == mmgr_NULL)
return -ENOMEM;
@@ -635,15 +643,11 @@ static int frame_allocate_with_data(struct ia_css_frame **frame,
if (err) {
kvfree(me);
-#ifndef ISP2401
- return err;
-#else
- me = NULL;
-#endif
+ *frame = NULL;
+ } else {
+ *frame = me;
}
- *frame = me;
-
return err;
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 93789500416f..4b3fa6d93fe0 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -1529,15 +1529,14 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
mipi_init();
-#ifndef ISP2401
/*
* In case this has been programmed already, update internal
* data structure ...
* DEPRECATED
*/
- my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
+ if (!IS_ISP2401)
+ my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
-#endif
my_css.irq_type = irq_type;
my_css_save.irq_type = irq_type;
@@ -1596,10 +1595,8 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
* sh_css_init_buffer_queues();
*/
-#if defined(ISP2401)
- gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
-#endif
-
+ if (IS_ISP2401)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
if (!IS_ISP2401)
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
@@ -2128,13 +2125,8 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe)
err);
}
}
-#ifndef ISP2401
ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
pipe->pipe_settings.video.tnr_frames);
-#else
- ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
- pipe->pipe_settings.video.tnr_frames);
-#endif
ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
pipe->pipe_settings.video.delay_frames);
break;
@@ -2238,11 +2230,10 @@ int ia_css_irq_translate(
case virq_isys_csi:
infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
break;
-#if !defined(ISP2401)
case virq_ifmt0_id:
- infos |= IA_CSS_IRQ_INFO_IF_ERROR;
+ if (!IS_ISP2401)
+ infos |= IA_CSS_IRQ_INFO_IF_ERROR;
break;
-#endif
case virq_dma:
infos |= IA_CSS_IRQ_INFO_DMA_ERROR;
break;
@@ -2277,27 +2268,34 @@ int ia_css_irq_enable(
IA_CSS_ENTER("info=%d, enable=%d", info, enable);
switch (info) {
-#if !defined(ISP2401)
case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
irq = virq_isys_sof;
break;
case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
irq = virq_isys_eof;
break;
case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
irq = virq_isys_csi;
break;
case IA_CSS_IRQ_INFO_IF_ERROR:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
irq = virq_ifmt0_id;
break;
-#else
- case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
- case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
- case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
- case IA_CSS_IRQ_INFO_IF_ERROR:
- /* Just ignore those unused IRQs without printing errors */
- return 0;
-#endif
case IA_CSS_IRQ_INFO_DMA_ERROR:
irq = virq_dma;
break;
@@ -2413,14 +2411,14 @@ alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time)
return -EINVAL;
}
-#if defined(ISP2401)
- /* For CSI2+, the continuous frame will hold the full input frame */
- ref_info.res.width = pipe->stream->config.input_config.input_res.width;
- ref_info.res.height = pipe->stream->config.input_config.input_res.height;
+ if (IS_ISP2401) {
+ /* For CSI2+, the continuous frame will hold the full input frame */
+ ref_info.res.width = pipe->stream->config.input_config.input_res.width;
+ ref_info.res.height = pipe->stream->config.input_config.input_res.height;
- /* Ensure padded width is aligned for 2401 */
- ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
-#endif
+ /* Ensure padded width is aligned for 2401 */
+ ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
+ }
if (pipe->stream->config.pack_raw_pixels) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
@@ -2499,11 +2497,9 @@ load_preview_binaries(struct ia_css_pipe *pipe)
int err = 0;
bool need_vf_pp = false;
bool need_isp_copy_binary = false;
-#ifdef ISP2401
bool sensor = false;
-#else
bool continuous;
-#endif
+
/* preview only have 1 output pin now */
struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
@@ -2514,11 +2510,9 @@ load_preview_binaries(struct ia_css_pipe *pipe)
assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
online = pipe->stream->config.online;
-#ifdef ISP2401
+
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
-#else
continuous = pipe->stream->config.continuous;
-#endif
if (mycs->preview_binary.info)
return 0;
@@ -2627,24 +2621,22 @@ load_preview_binaries(struct ia_css_pipe *pipe)
return err;
}
-#ifdef ISP2401
- /*
- * When the input system is 2401, only the Direct Sensor Mode
- * Offline Preview uses the ISP copy binary.
- */
- need_isp_copy_binary = !online && sensor;
-#else
- /*
- * About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
- * This is typical the case with SkyCam (which has no input system) but it also applies to all cases
- * where the driver chooses for memory based input frames. In these cases, a copy binary (which typical
- * copies sensor data to DDR) does not have much use.
- */
- if (!IS_ISP2401)
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
+ * Offline Preview uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+ } else {
+ /*
+ * About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
+ * This is typical the case with SkyCam (which has no input system) but it also
+ * applies to all cases where the driver chooses for memory based input frames.
+ * In these cases, a copy binary (which typical copies sensor data to DDR) does
+ * not have much use.
+ */
need_isp_copy_binary = !online && !continuous;
- else
- need_isp_copy_binary = !online && !continuous && !(pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY);
-#endif
+ }
/* Copy */
if (need_isp_copy_binary) {
@@ -3125,11 +3117,10 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
in_frame->frame_info.format = format;
-#ifdef ISP2401
- if (format == IA_CSS_FRAME_FORMAT_RAW)
+ if (IS_ISP2401 && format == IA_CSS_FRAME_FORMAT_RAW) {
in_frame->frame_info.format = (pipe->stream->config.pack_raw_pixels) ?
IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
-#endif
+ }
in_frame->frame_info.res.width = pipe->stream->config.input_config.input_res.width;
in_frame->frame_info.res.height = pipe->stream->config.input_config.input_res.height;
@@ -3211,18 +3202,18 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
me->dvs_frame_delay = pipe->dvs_frame_delay;
-#ifdef ISP2401
- /*
- * When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following: online or continuous
- */
- need_in_frameinfo_memory = !(pipe->stream->config.online ||
- pipe->stream->config.continuous);
-#else
- /* Construct in_frame info (only in case we have dynamic input */
- need_in_frameinfo_memory = pipe->stream->config.mode ==
- IA_CSS_INPUT_MODE_MEMORY;
-#endif
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following: online or continuous
+ */
+ need_in_frameinfo_memory = !(pipe->stream->config.online ||
+ pipe->stream->config.continuous);
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode ==
+ IA_CSS_INPUT_MODE_MEMORY;
+ }
/* Construct in_frame info (only in case we have dynamic input */
if (need_in_frameinfo_memory) {
@@ -3268,15 +3259,14 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
goto ERR;
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
-#ifdef ISP2401
- /*
- * When continuous is enabled, configure in_frame with the
- * last pipe, which is the copy pipe.
- */
- in_frame = pipe->stream->last_pipe->continuous_frames[0];
-#else
- in_frame = pipe->continuous_frames[0];
-#endif
+ if (IS_ISP2401)
+ /*
+ * When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ else
+ in_frame = pipe->continuous_frames[0];
}
ia_css_pipe_util_set_output_frames(out_frames, 0,
@@ -3373,12 +3363,10 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
struct ia_css_frame *out_frame;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
bool need_in_frameinfo_memory = false;
-#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
-#endif
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
@@ -3391,25 +3379,26 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
-#ifdef ISP2401
- /*
- * When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following:
- * - Direct Sensor Mode Online Preview
- * - Buffered Sensor Mode Online Preview
- * - Direct Sensor Mode Continuous Preview
- * - Buffered Sensor Mode Continuous Preview
- */
- sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
- buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
- online = pipe->stream->config.online;
- continuous = pipe->stream->config.continuous;
- need_in_frameinfo_memory =
- !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
-#else
- /* Construct in_frame info (only in case we have dynamic input */
- need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
-#endif
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Preview
+ * - Buffered Sensor Mode Online Preview
+ * - Direct Sensor Mode Continuous Preview
+ * - Buffered Sensor Mode Continuous Preview
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor &&
+ (online || continuous)));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
@@ -3420,7 +3409,6 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
} else {
in_frame = NULL;
}
-
err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
if (err)
goto ERR;
@@ -3441,17 +3429,16 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
goto ERR;
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
-#ifdef ISP2401
- /*
- * When continuous is enabled, configure in_frame with the
- * last pipe, which is the copy pipe.
- */
- if (continuous || !online)
- in_frame = pipe->stream->last_pipe->continuous_frames[0];
-
-#else
- in_frame = pipe->continuous_frames[0];
-#endif
+ if (IS_ISP2401) {
+ /*
+ * When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ if (continuous || !online)
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ } else {
+ in_frame = pipe->continuous_frames[0];
+ }
}
if (vf_pp_binary) {
@@ -3925,19 +3912,19 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
if (pipe && pipe->stop_requested) {
-#if !defined(ISP2401)
- /*
- * free mipi frames only for old input
- * system for 2401 it is done in
- * ia_css_stream_destroy call
- */
- return_err = free_mipi_frames(pipe);
- if (return_err) {
- IA_CSS_LOG("free_mipi_frames() failed");
- IA_CSS_LEAVE_ERR(return_err);
- return return_err;
+ if (!IS_ISP2401) {
+ /*
+ * free mipi frames only for old input
+ * system for 2401 it is done in
+ * ia_css_stream_destroy call
+ */
+ return_err = free_mipi_frames(pipe);
+ if (return_err) {
+ IA_CSS_LOG("free_mipi_frames() failed");
+ IA_CSS_LEAVE_ERR(return_err);
+ return return_err;
+ }
}
-#endif
pipe->stop_requested = false;
}
fallthrough;
@@ -3959,12 +3946,11 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
pipe->num_invalid_frames--;
if (frame->frame_info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
-#ifdef ISP2401
- frame->planes.binary.size = frame->data_bytes;
-#else
- frame->planes.binary.size =
- sh_css_sp_get_binary_copy_size();
-#endif
+ if (IS_ISP2401)
+ frame->planes.binary.size = frame->data_bytes;
+ else
+ frame->planes.binary.size =
+ sh_css_sp_get_binary_copy_size();
}
if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
@@ -4880,22 +4866,20 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
pipe->num_invalid_frames, pipe->dvs_frame_delay);
/* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
-#if !defined(ISP2401)
- /* Copy */
- if (!online && !continuous) {
- /*
- * TODO: what exactly needs doing, prepend the copy binary to
- * video base this only on !online?
- */
- err = load_copy_binary(pipe,
- &mycs->copy_binary,
- &mycs->video_binary);
- if (err)
- return err;
+ if (!IS_ISP2401) {
+ /* Copy */
+ if (!online && !continuous) {
+ /*
+ * TODO: what exactly needs doing, prepend the copy binary to
+ * video base this only on !online?
+ */
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->video_binary);
+ if (err)
+ return err;
+ }
}
-#else
- (void)continuous;
-#endif
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
struct ia_css_binary_descr vf_pp_descr;
@@ -5227,11 +5211,8 @@ static int load_primary_binaries(
bool need_pp = false;
bool need_isp_copy_binary = false;
bool need_ldc = false;
-#ifdef ISP2401
bool sensor = false;
-#else
bool memory, continuous;
-#endif
struct ia_css_frame_info prim_in_info,
prim_out_info,
capt_pp_out_info, vf_info,
@@ -5251,12 +5232,9 @@ static int load_primary_binaries(
pipe->mode == IA_CSS_PIPE_ID_COPY);
online = pipe->stream->config.online;
-#ifdef ISP2401
sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
-#else
memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
continuous = pipe->stream->config.continuous;
-#endif
mycs = &pipe->pipe_settings.capture;
pipe_out_info = &pipe->output_info[0];
@@ -5462,15 +5440,14 @@ static int load_primary_binaries(
if (err)
return err;
-#ifdef ISP2401
- /*
- * When the input system is 2401, only the Direct Sensor Mode
- * Offline Capture uses the ISP copy binary.
- */
- need_isp_copy_binary = !online && sensor;
-#else
- need_isp_copy_binary = !online && !continuous && !memory;
-#endif
+ if (IS_ISP2401)
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
+ * Offline Capture uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+ else
+ need_isp_copy_binary = !online && !continuous && !memory;
/* ISP Copy */
if (need_isp_copy_binary) {
@@ -5681,10 +5658,10 @@ static int load_advanced_binaries(struct ia_css_pipe *pipe)
}
/* Copy */
-#ifdef ISP2401
- /* For CSI2+, only the direct sensor mode/online requires ISP copy */
- need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
-#endif
+ if (IS_ISP2401)
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+
if (need_isp_copy)
load_copy_binary(pipe,
&pipe->pipe_settings.capture.copy_binary,
@@ -5829,10 +5806,10 @@ static int load_low_light_binaries(struct ia_css_pipe *pipe)
}
/* Copy */
-#ifdef ISP2401
- /* For CSI2+, only the direct sensor mode/online requires ISP copy */
- need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
-#endif
+ if (IS_ISP2401)
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+
if (need_isp_copy)
err = load_copy_binary(pipe,
&pipe->pipe_settings.capture.copy_binary,
@@ -5902,10 +5879,9 @@ static int load_capture_binaries(struct ia_css_pipe *pipe)
switch (pipe->config.default_capture_config.mode) {
case IA_CSS_CAPTURE_MODE_RAW:
err = load_copy_binaries(pipe);
-#if defined(ISP2401)
- if (!err)
+ if (!err && IS_ISP2401)
pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
-#endif
+
break;
case IA_CSS_CAPTURE_MODE_BAYER:
err = load_bayer_isp_binaries(pipe);
@@ -6409,7 +6385,6 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
else
next_binary = NULL;
-#if defined(ISP2401)
/*
* NOTES
* - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
@@ -6427,11 +6402,11 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
* pp_defs.h" for the list of input-frame formats that are supported by the
* "yuv_scale_binary".
*/
- need_isp_copy_binary =
- (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
-#else /* !ISP2401 */
- need_isp_copy_binary = true;
-#endif /* ISP2401 */
+ if (IS_ISP2401)
+ need_isp_copy_binary =
+ (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
+ else
+ need_isp_copy_binary = true;
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
@@ -6678,12 +6653,10 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
-#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
-#endif
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
@@ -6700,24 +6673,24 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
num_output_stage = pipe->pipe_settings.yuvpp.num_output;
-#ifdef ISP2401
- /*
- * When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following:
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Continuous Capture
- * - Buffered Sensor Mode Continuous Capture
- */
- sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
- buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
- online = pipe->stream->config.online;
- continuous = pipe->stream->config.continuous;
- need_in_frameinfo_memory =
- !((sensor && (online || continuous)) || (buffered_sensor && continuous));
-#else
- /* Construct in_frame info (only in case we have dynamic input */
- need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
-#endif
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+ buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && continuous));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
/*
* the input frame can come from:
*
@@ -6808,11 +6781,10 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
if (pipe->pipe_settings.yuvpp.copy_binary.info) {
struct ia_css_frame *in_frame_local = NULL;
-#ifdef ISP2401
- /* After isp copy is enabled in_frame needs to be passed. */
- if (!online)
+ if (IS_ISP2401 && !online) {
+ /* After isp copy is enabled in_frame needs to be passed. */
in_frame_local = in_frame;
-#endif
+ }
if (need_scaler) {
ia_css_pipe_util_set_output_frames(bin_out_frame,
@@ -7031,12 +7003,10 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
struct ia_css_frame *vf_frame;
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
-#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
-#endif
unsigned int i, num_yuv_scaler, num_primary_stage;
bool need_yuv_pp = false;
bool *is_output_stage = NULL;
@@ -7054,25 +7024,27 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
ia_css_pipeline_clean(me);
ia_css_pipe_util_create_output_frames(out_frames);
-#ifdef ISP2401
- /*
- * When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following:
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Continuous Capture
- * - Buffered Sensor Mode Continuous Capture
- */
- sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
- buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
- online = pipe->stream->config.online;
- continuous = pipe->stream->config.continuous;
- need_in_frameinfo_memory =
- !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
-#else
- /* Construct in_frame info (only in case we have dynamic input */
- need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
-#endif
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor &&
+ (online || continuous)));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
+
if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
@@ -7135,27 +7107,27 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
if (pipe->pipe_settings.capture.copy_binary.info) {
if (raw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
-#if defined(ISP2401)
- if (!continuous) {
- ia_css_pipe_get_generic_stage_desc(&stage_desc,
- copy_binary,
- out_frames,
- in_frame,
- NULL);
+ if (IS_ISP2401) {
+ if (!continuous) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
+ } else {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
+ }
} else {
- in_frame = pipe->stream->last_pipe->continuous_frames[0];
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
out_frames,
- in_frame,
- NULL);
+ NULL, NULL);
}
-#else
- ia_css_pipe_get_generic_stage_desc(&stage_desc,
- copy_binary,
- out_frames,
- NULL, NULL);
-#endif
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0,
in_frame);
@@ -7185,11 +7157,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
local_in_frame = in_frame;
else
local_in_frame = NULL;
-#ifndef ISP2401
- if (!need_pp && (i == num_primary_stage - 1))
-#else
- if (!need_pp && (i == num_primary_stage - 1) && !need_ldc)
-#endif
+ if (!need_pp && (i == num_primary_stage - 1) && (!IS_ISP2401 || !need_ldc))
local_out_frame = out_frame;
else
local_out_frame = NULL;
@@ -7400,23 +7368,14 @@ static int capture_start(struct ia_css_pipe *pipe)
return err;
}
}
-
-#if !defined(ISP2401)
/* old isys: need to send_mipi_frames() in all pipe modes */
- err = send_mipi_frames(pipe);
- if (err) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
- }
-#else
- if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ if (!IS_ISP2401 || (IS_ISP2401 && pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
-#endif
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
@@ -8123,24 +8082,22 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
return err;
}
-#if !defined(ISP2401)
- /* We don't support metadata for JPEG stream, since they both use str2mem */
- if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
- stream_config->metadata_config.resolution.height > 0) {
- err = -EINVAL;
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-#endif
-
-#ifdef ISP2401
- if (stream_config->online && stream_config->pack_raw_pixels) {
- IA_CSS_LOG("online and pack raw is invalid on input system 2401");
- err = -EINVAL;
- IA_CSS_LEAVE_ERR(err);
- return err;
+ if (!IS_ISP2401) {
+ /* We don't support metadata for JPEG stream, since they both use str2mem */
+ if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
+ stream_config->metadata_config.resolution.height > 0) {
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ } else {
+ if (stream_config->online && stream_config->pack_raw_pixels) {
+ IA_CSS_LOG("online and pack raw is invalid on input system 2401");
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
}
-#endif
ia_css_debug_pipe_graph_dump_stream_config(stream_config);
@@ -8223,19 +8180,17 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* take over stream config */
curr_stream->config = *stream_config;
-#if defined(ISP2401)
- if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
- stream_config->online)
- curr_stream->config.online = false;
-#endif
+ if (IS_ISP2401) {
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
+ stream_config->online)
+ curr_stream->config.online = false;
-#ifdef ISP2401
- if (curr_stream->config.online) {
- curr_stream->config.source.port.num_lanes =
- stream_config->source.port.num_lanes;
- curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ if (curr_stream->config.online) {
+ curr_stream->config.source.port.num_lanes =
+ stream_config->source.port.num_lanes;
+ curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ }
}
-#endif
/* in case driver doesn't configure init number of raw buffers, configure it here */
if (curr_stream->config.target_num_cont_raw_buf == 0)
curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES;
@@ -9162,11 +9117,10 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
-#if defined(ISP2401)
- need_input_queue = true;
-#else
- need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
-#endif
+ if (IS_ISP2401)
+ need_input_queue = true;
+ else
+ need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
/* map required buffer queues to resources */
/* TODO: to be improved */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
index e7ef578db8ab..197ab2085e8d 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -56,11 +56,8 @@ static struct firmware_header *firmware_header;
* which will be replaced with the actual RELEASE_VERSION
* during package generation. Please do not modify
*/
-#ifdef ISP2401
-static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458);
-#else
-static const char *release_version = STR(irci_stable_candrpv_0415_20150423_1753);
-#endif
+static const char *release_version_2401 = STR(irci_stable_candrpv_0415_20150521_0458);
+static const char *release_version_2400 = STR(irci_stable_candrpv_0415_20150423_1753);
#define MAX_FW_REL_VER_NAME 300
static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
@@ -191,8 +188,14 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
bool
sh_css_check_firmware_version(struct device *dev, const char *fw_data)
{
+ const char *release_version;
struct sh_css_fw_bi_file_h *file_header;
+ if (IS_ISP2401)
+ release_version = release_version_2401;
+ else
+ release_version = release_version_2400;
+
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
@@ -225,15 +228,28 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
unsigned int fw_size)
{
unsigned int i;
+ const char *release_version;
struct ia_css_fw_info *binaries;
struct sh_css_fw_bi_file_h *file_header;
int ret;
+ /* some sanity checks */
+ if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
+ return -EINVAL;
+
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
+
+ if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
+ return -EINVAL;
+
binaries = &firmware_header->binary_header;
strscpy(FW_rel_ver_name, file_header->version,
min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
+ if (IS_ISP2401)
+ release_version = release_version_2401;
+ else
+ release_version = release_version_2400;
ret = sh_css_check_firmware_version(dev, fw_data);
if (ret) {
IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
@@ -243,13 +259,6 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
IA_CSS_LOG("successfully load firmware version %s", release_version);
}
- /* some sanity checks */
- if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
- return -EINVAL;
-
- if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
- return -EINVAL;
-
sh_css_num_binaries = file_header->binary_nr;
/* Only allocate memory for ISP blob info */
if (sh_css_num_binaries > NUM_OF_SPS) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index bc6e8598a776..b20acaab0595 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -67,13 +67,12 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
unsigned int mem_words = 0;
unsigned int width_padded = width;
-#if defined(ISP2401)
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
*/
- width_padded += (2 * ISP_VEC_NELEMS);
-#endif
+ if (IS_ISP2401)
+ width_padded += (2 * ISP_VEC_NELEMS);
IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
@@ -235,7 +234,6 @@ bool mipi_is_free(void)
return true;
}
-#if defined(ISP2401)
/*
* @brief Calculate the required MIPI buffer sizes.
* Based on the stream configuration, calculate the
@@ -342,7 +340,6 @@ static int calculate_mipi_buff_size(struct ia_css_stream_config *stream_cfg,
IA_CSS_LEAVE_ERR(err);
return err;
}
-#endif
int
allocate_mipi_frames(struct ia_css_pipe *pipe,
@@ -363,15 +360,13 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
return -EINVAL;
}
-#ifdef ISP2401
- if (pipe->stream->config.online) {
+ if (IS_ISP2401 && pipe->stream->config.online) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
pipe);
return 0;
}
-#endif
if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
@@ -386,9 +381,10 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
return -EINVAL;
}
-#ifdef ISP2401
- err = calculate_mipi_buff_size(&pipe->stream->config,
- &my_css.mipi_frame_size[port]);
+ if (IS_ISP2401)
+ err = calculate_mipi_buff_size(&pipe->stream->config,
+ &my_css.mipi_frame_size[port]);
+
/*
* 2401 system allows multiple streams to use same physical port. This is not
* true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
@@ -396,20 +392,14 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
* In that case only 2400 related code should remain.
*/
if (ref_count_mipi_allocation[port] != 0) {
- ref_count_mipi_allocation[port]++;
+ if (IS_ISP2401)
+ ref_count_mipi_allocation[port]++;
+
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
pipe, port);
return 0;
}
-#else
- if (ref_count_mipi_allocation[port] != 0) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
- pipe, port);
- return 0;
- }
-#endif
ref_count_mipi_allocation[port]++;
@@ -503,14 +493,14 @@ free_mipi_frames(struct ia_css_pipe *pipe)
}
if (ref_count_mipi_allocation[port] > 0) {
-#if !defined(ISP2401)
- assert(ref_count_mipi_allocation[port] == 1);
- if (ref_count_mipi_allocation[port] != 1) {
- IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong ref_count (ref_count=%d).",
- pipe, ref_count_mipi_allocation[port]);
- return err;
+ if (!IS_ISP2401) {
+ assert(ref_count_mipi_allocation[port] == 1);
+ if (ref_count_mipi_allocation[port] != 1) {
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong ref_count (ref_count=%d).",
+ pipe, ref_count_mipi_allocation[port]);
+ return err;
+ }
}
-#endif
ref_count_mipi_allocation[port]--;
@@ -534,18 +524,6 @@ free_mipi_frames(struct ia_css_pipe *pipe)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(%p) exit (deallocated).\n", pipe);
}
-#if defined(ISP2401)
- else {
- /* 2401 system allows multiple streams to use same physical port. This is not
- * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
- * TODO AM: Once that is changed (removed) this code should be removed as well.
- * In that case only 2400 related code should remain.
- */
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "free_mipi_frames(%p) leave: nothing to do, other streams still use this port (port=%d).\n",
- pipe, port);
- }
-#endif
}
} else { /* pipe ==NULL */
/* AM TEMP: free-ing all mipi buffers just like a legacy code. */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
index 0dd58a7fe2cc..297e1b981720 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -952,12 +952,10 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
return 0;
}
-#if defined(ISP2401)
- (void)continuous;
- sh_css_sp_stage.deinterleaved = 0;
-#else
- sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
-#endif
+ if (IS_ISP2401)
+ sh_css_sp_stage.deinterleaved = 0;
+ else
+ sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
initialize_stage_frames(&sh_css_sp_stage.frames);
/*
diff --git a/drivers/staging/media/av7110/av7110_av.c b/drivers/staging/media/av7110/av7110_av.c
index a5c5bebad306..00dd6a7fea64 100644
--- a/drivers/staging/media/av7110/av7110_av.c
+++ b/drivers/staging/media/av7110/av7110_av.c
@@ -78,7 +78,7 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
{
- struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) p2t->priv;
+ struct dvb_demux_feed *dvbdmxfeed = p2t->priv;
if (!(dvbdmxfeed->ts_type & TS_PACKET))
return 0;
@@ -837,7 +837,7 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len)
{
struct dvb_demux *demux = feed->demux;
- struct av7110 *av7110 = (struct av7110 *) demux->priv;
+ struct av7110 *av7110 = demux->priv;
dprintk(2, "av7110:%p, \n", av7110);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 2d712eda2c5d..064dc562bc96 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -7,7 +7,7 @@
#include <linux/module.h>
#include "imx-media.h"
-#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0}
+#define IMX_BUS_FMTS(fmt...) ((const u32[]) {fmt, 0})
/*
* List of supported pixel formats for the subdevs.
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index c07994ea6e96..ab565b4e29ec 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
@@ -564,6 +565,49 @@ static int csi2_registered(struct v4l2_subdev *sd)
V4L2_FIELD_NONE, NULL);
}
+/* --------------- CORE OPS --------------- */
+
+static int csi2_log_status(struct v4l2_subdev *sd)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ v4l2_info(sd, "-----MIPI CSI status-----\n");
+ v4l2_info(sd, "VERSION: 0x%x\n",
+ readl(csi2->base + CSI2_VERSION));
+ v4l2_info(sd, "N_LANES: 0x%x\n",
+ readl(csi2->base + CSI2_N_LANES));
+ v4l2_info(sd, "PHY_SHUTDOWNZ: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_SHUTDOWNZ));
+ v4l2_info(sd, "DPHY_RSTZ: 0x%x\n",
+ readl(csi2->base + CSI2_DPHY_RSTZ));
+ v4l2_info(sd, "RESETN: 0x%x\n",
+ readl(csi2->base + CSI2_RESETN));
+ v4l2_info(sd, "PHY_STATE: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_STATE));
+ v4l2_info(sd, "DATA_IDS_1: 0x%x\n",
+ readl(csi2->base + CSI2_DATA_IDS_1));
+ v4l2_info(sd, "DATA_IDS_2: 0x%x\n",
+ readl(csi2->base + CSI2_DATA_IDS_2));
+ v4l2_info(sd, "ERR1: 0x%x\n",
+ readl(csi2->base + CSI2_ERR1));
+ v4l2_info(sd, "ERR2: 0x%x\n",
+ readl(csi2->base + CSI2_ERR2));
+ v4l2_info(sd, "MSK1: 0x%x\n",
+ readl(csi2->base + CSI2_MSK1));
+ v4l2_info(sd, "MSK2: 0x%x\n",
+ readl(csi2->base + CSI2_MSK2));
+ v4l2_info(sd, "PHY_TST_CTRL0: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_TST_CTRL0));
+ v4l2_info(sd, "PHY_TST_CTRL1: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_TST_CTRL1));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csi2_core_ops = {
+ .log_status = csi2_log_status,
+};
+
static const struct media_entity_operations csi2_entity_ops = {
.link_setup = csi2_link_setup,
.link_validate = v4l2_subdev_link_validate,
@@ -581,6 +625,7 @@ static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
};
static const struct v4l2_subdev_ops csi2_subdev_ops = {
+ .core = &csi2_core_ops,
.video = &csi2_video_ops,
.pad = &csi2_pad_ops,
};
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index 99b333b68198..c44145284aa1 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -30,6 +30,7 @@ struct max96712_priv {
struct regmap *regmap;
struct gpio_desc *gpiod_pwdn;
+ bool cphy;
struct v4l2_mbus_config_mipi_csi2 mipi;
struct v4l2_subdev sd;
@@ -127,10 +128,18 @@ static void max96712_mipi_configure(struct max96712_priv *priv)
/* Select 2x4 mode. */
max96712_write(priv, 0x8a0, 0x04);
- /* Configure a 4-lane DPHY using PHY0 and PHY1. */
/* TODO: Add support for 2-lane and 1-lane configurations. */
- /* TODO: Add support CPHY mode. */
- max96712_write(priv, 0x94a, 0xc0);
+ if (priv->cphy) {
+ /* Configure a 3-lane C-PHY using PHY0 and PHY1. */
+ max96712_write(priv, 0x94a, 0xa0);
+
+ /* Configure C-PHY timings. */
+ max96712_write(priv, 0x8ad, 0x3f);
+ max96712_write(priv, 0x8ae, 0x7d);
+ } else {
+ /* Configure a 4-lane D-PHY using PHY0 and PHY1. */
+ max96712_write(priv, 0x94a, 0xc0);
+ }
/* Configure lane mapping for PHY0 and PHY1. */
/* TODO: Add support for lane swapping. */
@@ -332,8 +341,9 @@ static int max96712_parse_dt(struct max96712_priv *priv)
{
struct fwnode_handle *ep;
struct v4l2_fwnode_endpoint v4l2_ep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
+ .bus_type = V4L2_MBUS_UNKNOWN,
};
+ unsigned int supported_lanes;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(&priv->client->dev), 4,
@@ -350,8 +360,24 @@ static int max96712_parse_dt(struct max96712_priv *priv)
return -EINVAL;
}
- if (v4l2_ep.bus.mipi_csi2.num_data_lanes != 4) {
- dev_err(&priv->client->dev, "Only 4 data lanes supported\n");
+ switch (v4l2_ep.bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ supported_lanes = 4;
+ priv->cphy = false;
+ break;
+ case V4L2_MBUS_CSI2_CPHY:
+ supported_lanes = 3;
+ priv->cphy = true;
+ break;
+ default:
+ dev_err(&priv->client->dev, "Unsupported bus-type %u\n",
+ v4l2_ep.bus_type);
+ return -EINVAL;
+ }
+
+ if (v4l2_ep.bus.mipi_csi2.num_data_lanes != supported_lanes) {
+ dev_err(&priv->client->dev, "Only %u data lanes supported\n",
+ supported_lanes);
return -EINVAL;
}
@@ -427,7 +453,7 @@ static struct i2c_driver max96712_i2c_driver = {
.name = "max96712",
.of_match_table = of_match_ptr(max96712_of_table),
},
- .probe_new = max96712_probe,
+ .probe = max96712_probe,
.remove = max96712_remove,
};
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
index df1b2cff2417..c53441822fdf 100644
--- a/drivers/staging/media/tegra-video/Kconfig
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -15,5 +15,6 @@ config VIDEO_TEGRA
config VIDEO_TEGRA_TPG
bool "NVIDIA Tegra VI driver TPG mode"
depends on VIDEO_TEGRA
+ depends on ARCH_TEGRA_210_SOC
help
Say yes here to enable Tegra internal TPG mode
diff --git a/drivers/staging/media/tegra-video/Makefile b/drivers/staging/media/tegra-video/Makefile
index dfa2ef8f99ef..6c7552e05109 100644
--- a/drivers/staging/media/tegra-video/Makefile
+++ b/drivers/staging/media/tegra-video/Makefile
@@ -2,7 +2,9 @@
tegra-video-objs := \
video.o \
vi.o \
+ vip.o \
csi.o
+tegra-video-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20.o
tegra-video-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
obj-$(CONFIG_VIDEO_TEGRA) += tegra-video.o
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index 36ca639622c9..052172017b3b 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -328,12 +328,42 @@ static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
}
csi_chan->pg_mode = chan->pg_mode;
+
+ /*
+ * Tegra CSI receiver can detect the first LP to HS transition.
+ * So, start the CSI stream-on prior to sensor stream-on and
+ * vice-versa for stream-off.
+ */
ret = csi->ops->csi_start_streaming(csi_chan);
if (ret < 0)
goto finish_calibration;
+ if (csi_chan->mipi) {
+ struct v4l2_subdev *src_subdev;
+ /*
+ * TRM has incorrectly documented to wait for done status from
+ * calibration logic after CSI interface power on.
+ * As per the design, calibration results are latched and applied
+ * to the pads only when the link is in LP11 state which will happen
+ * during the sensor stream-on.
+ * CSI subdev stream-on triggers start of MIPI pads calibration.
+ * Wait for calibration to finish here after sensor subdev stream-on.
+ */
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto disable_csi_stream;
+
+ err = tegra_mipi_finish_calibration(csi_chan->mipi);
+ if (err < 0)
+ dev_warn(csi->dev, "MIPI calibration failed: %d\n", err);
+ }
+
return 0;
+disable_csi_stream:
+ csi->ops->csi_stop_streaming(csi_chan);
finish_calibration:
if (csi_chan->mipi)
tegra_mipi_finish_calibration(csi_chan->mipi);
@@ -352,10 +382,24 @@ rpm_put:
static int tegra_csi_disable_stream(struct v4l2_subdev *subdev)
{
+ struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct tegra_csi *csi = csi_chan->csi;
int err;
+ /*
+ * Stream-off subdevices in reverse order to stream-on.
+ * Remote source subdev in TPG mode is same as CSI subdev.
+ */
+ if (csi_chan->mipi) {
+ struct v4l2_subdev *src_subdev;
+
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ err = v4l2_subdev_call(src_subdev, video, s_stream, false);
+ if (err < 0 && err != -ENOIOCTLCMD)
+ dev_err_probe(csi->dev, err, "source subdev stream off failed\n");
+ }
+
csi->ops->csi_stop_streaming(csi_chan);
if (csi_chan->mipi) {
@@ -786,6 +830,10 @@ static int tegra_csi_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_csi_soc tegra210_csi_soc;
+#endif
+
static const struct of_device_id tegra_csi_of_id_table[] = {
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
index 6960ea2e3d36..3e6e5ee1bb1e 100644
--- a/drivers/staging/media/tegra-video/csi.h
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -151,10 +151,6 @@ struct tegra_csi {
struct list_head csi_chans;
};
-#if defined(CONFIG_ARCH_TEGRA_210_SOC)
-extern const struct tegra_csi_soc tegra210_csi_soc;
-#endif
-
void tegra_csi_error_recover(struct v4l2_subdev *subdev);
void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
u8 csi_port_num,
diff --git a/drivers/staging/media/tegra-video/tegra20.c b/drivers/staging/media/tegra-video/tegra20.c
new file mode 100644
index 000000000000..c25286772603
--- /dev/null
+++ b/drivers/staging/media/tegra-video/tegra20.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Tegra20-specific VI implementation
+ *
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+/*
+ * This source file contains Tegra20 supported video formats,
+ * VI and VIP SoC specific data, operations and registers accessors.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/v4l2-mediabus.h>
+
+#include "vip.h"
+#include "vi.h"
+
+#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+/* This are just good-sense numbers. The actual min/max is not documented. */
+#define TEGRA20_MIN_WIDTH 32U
+#define TEGRA20_MIN_HEIGHT 32U
+#define TEGRA20_MAX_WIDTH 2048U
+#define TEGRA20_MAX_HEIGHT 2048U
+
+/* --------------------------------------------------------------------------
+ * Registers
+ */
+
+#define TEGRA_VI_CONT_SYNCPT_OUT_1 0x0060
+#define VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT BIT(8)
+#define VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT 0
+
+#define TEGRA_VI_VI_INPUT_CONTROL 0x0088
+#define VI_INPUT_FIELD_DETECT BIT(27)
+#define VI_INPUT_BT656 BIT(25)
+#define VI_INPUT_YUV_INPUT_FORMAT_SFT 8 /* bits [9:8] */
+#define VI_INPUT_YUV_INPUT_FORMAT_UYVY (0 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_VYUY (1 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_YUYV (2 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_YVYU (3 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_INPUT_FORMAT_SFT 2 /* bits [5:2] */
+#define VI_INPUT_INPUT_FORMAT_YUV422 (0 << VI_INPUT_INPUT_FORMAT_SFT)
+#define VI_INPUT_VIP_INPUT_ENABLE BIT(1)
+
+#define TEGRA_VI_VI_CORE_CONTROL 0x008c
+#define VI_VI_CORE_CONTROL_PLANAR_CONV_IN_SEL_EXT BIT(31)
+#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL_EXT BIT(30)
+#define VI_VI_CORE_CONTROL_INPUT_TO_ALT_MUX_SFT 27
+#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_EXT_SFT 24
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_EXT_SFT 21
+#define VI_VI_CORE_CONTROL_ISP_HOST_STALL_OFF BIT(20)
+#define VI_VI_CORE_CONTROL_V_DOWNSCALING BIT(19)
+#define VI_VI_CORE_CONTROL_V_AVERAGING BIT(18)
+#define VI_VI_CORE_CONTROL_H_DOWNSCALING BIT(17)
+#define VI_VI_CORE_CONTROL_H_AVERAGING BIT(16)
+#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL BIT(11)
+#define VI_VI_CORE_CONTROL_PLANAR_CONV_INPUT_SEL BIT(10)
+#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_SFT 8
+#define VI_VI_CORE_CONTROL_ISP_DOWNSAMPLE_SFT 5
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_EPP_SFT 2
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_SFT 0
+
+#define TEGRA_VI_VI_FIRST_OUTPUT_CONTROL 0x0090
+#define VI_OUTPUT_FORMAT_EXT BIT(22)
+#define VI_OUTPUT_V_DIRECTION BIT(20)
+#define VI_OUTPUT_H_DIRECTION BIT(19)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT 17
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY (0 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY (1 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV (2 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU (3 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_OUTPUT_BYTE_SWAP BIT(16)
+#define VI_OUTPUT_LAST_PIXEL_DUPLICATION BIT(8)
+#define VI_OUTPUT_OUTPUT_FORMAT_SFT 0
+#define VI_OUTPUT_OUTPUT_FORMAT_YUV422POST (3 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR (6 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
+
+#define TEGRA_VI_VIP_H_ACTIVE 0x00a4
+#define VI_VIP_H_ACTIVE_PERIOD_SFT 16 /* active pixels/line, must be even */
+#define VI_VIP_H_ACTIVE_START_SFT 0
+
+#define TEGRA_VI_VIP_V_ACTIVE 0x00a8
+#define VI_VIP_V_ACTIVE_PERIOD_SFT 16 /* active lines */
+#define VI_VIP_V_ACTIVE_START_SFT 0
+
+#define TEGRA_VI_VB0_START_ADDRESS_FIRST 0x00c4
+#define TEGRA_VI_VB0_BASE_ADDRESS_FIRST 0x00c8
+#define TEGRA_VI_VB0_START_ADDRESS_U 0x00cc
+#define TEGRA_VI_VB0_BASE_ADDRESS_U 0x00d0
+#define TEGRA_VI_VB0_START_ADDRESS_V 0x00d4
+#define TEGRA_VI_VB0_BASE_ADDRESS_V 0x00d8
+
+#define TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE 0x00e0
+#define VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT 16
+#define VI_FIRST_OUTPUT_FRAME_WIDTH_SFT 0
+
+#define TEGRA_VI_VB0_COUNT_FIRST 0x00e4
+
+#define TEGRA_VI_VB0_SIZE_FIRST 0x00e8
+#define VI_VB0_SIZE_FIRST_V_SFT 16
+#define VI_VB0_SIZE_FIRST_H_SFT 0
+
+#define TEGRA_VI_VB0_BUFFER_STRIDE_FIRST 0x00ec
+#define VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT 30
+#define VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT 0
+
+#define TEGRA_VI_H_LPF_CONTROL 0x0108
+#define VI_H_LPF_CONTROL_CHROMA_SFT 16
+#define VI_H_LPF_CONTROL_LUMA_SFT 0
+
+#define TEGRA_VI_H_DOWNSCALE_CONTROL 0x010c
+#define TEGRA_VI_V_DOWNSCALE_CONTROL 0x0110
+
+#define TEGRA_VI_VIP_INPUT_STATUS 0x0144
+
+#define TEGRA_VI_VI_DATA_INPUT_CONTROL 0x0168
+#define VI_DATA_INPUT_SFT 0 /* [11:0] = mask pin inputs to VI core */
+
+#define TEGRA_VI_PIN_INPUT_ENABLE 0x016c
+#define VI_PIN_INPUT_VSYNC BIT(14)
+#define VI_PIN_INPUT_HSYNC BIT(13)
+#define VI_PIN_INPUT_VD_SFT 0 /* [11:0] = data bin N input enable */
+
+#define TEGRA_VI_PIN_INVERSION 0x0174
+#define VI_PIN_INVERSION_VSYNC_ACTIVE_HIGH BIT(1)
+#define VI_PIN_INVERSION_HSYNC_ACTIVE_HIGH BIT(0)
+
+#define TEGRA_VI_CAMERA_CONTROL 0x01a0
+#define VI_CAMERA_CONTROL_STOP_CAPTURE BIT(2)
+#define VI_CAMERA_CONTROL_TEST_MODE BIT(1)
+#define VI_CAMERA_CONTROL_VIP_ENABLE BIT(0)
+
+#define TEGRA_VI_VI_ENABLE 0x01a4
+#define VI_VI_ENABLE_SW_FLOW_CONTROL_OUT1 BIT(1)
+#define VI_VI_ENABLE_FIRST_OUTPUT_TO_MEM_DISABLE BIT(0)
+
+#define TEGRA_VI_VI_RAISE 0x01ac
+#define VI_VI_RAISE_ON_EDGE BIT(0)
+
+/* --------------------------------------------------------------------------
+ * VI
+ */
+
+static void tegra20_vi_write(struct tegra_vi_channel *chan, unsigned int addr, u32 val)
+{
+ writel(val, chan->vi->iomem + addr);
+}
+
+/*
+ * Get the main input format (YUV/RGB...) and the YUV variant as values to
+ * be written into registers for the current VI input mbus code.
+ */
+static void tegra20_vi_get_input_formats(struct tegra_vi_channel *chan,
+ unsigned int *main_input_format,
+ unsigned int *yuv_input_format)
+{
+ unsigned int input_mbus_code = chan->fmtinfo->code;
+
+ (*main_input_format) = VI_INPUT_INPUT_FORMAT_YUV422;
+
+ switch (input_mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_VYUY;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YVYU;
+ break;
+ }
+}
+
+/*
+ * Get the main output format (YUV/RGB...) and the YUV variant as values to
+ * be written into registers for the current VI output pixel format.
+ */
+static void tegra20_vi_get_output_formats(struct tegra_vi_channel *chan,
+ unsigned int *main_output_format,
+ unsigned int *yuv_output_format)
+{
+ u32 output_fourcc = chan->format.pixelformat;
+
+ /* Default to YUV422 non-planar (U8Y8V8Y8) after downscaling */
+ (*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV422POST;
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
+
+ switch (output_fourcc) {
+ case V4L2_PIX_FMT_UYVY:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ (*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR;
+ break;
+ }
+}
+
+/*
+ * Make the VI accessible (needed on Tegra20).
+ *
+ * This function writes an unknown bit into an unknown register. The code
+ * comes from a downstream 3.1 kernel that has a working VIP driver for
+ * Tegra20, and removing it makes the VI completely unaccessible. It should
+ * be rewritten and possibly moved elsewhere, but the appropriate location
+ * and implementation is unknown due to a total lack of documentation.
+ */
+static int tegra20_vi_enable(struct tegra_vi *vi, bool on)
+{
+ /* from arch/arm/mach-tegra/iomap.h */
+ const phys_addr_t TEGRA_APB_MISC_BASE = 0x70000000;
+ const unsigned long reg_offset = 0x42c;
+ void __iomem *apb_misc;
+ u32 val;
+
+ apb_misc = ioremap(TEGRA_APB_MISC_BASE, PAGE_SIZE);
+ if (!apb_misc)
+ apb_misc = ERR_PTR(-ENOENT);
+ if (IS_ERR(apb_misc))
+ return dev_err_probe(vi->dev, PTR_ERR(apb_misc), "cannot access APB_MISC");
+
+ val = readl(apb_misc + reg_offset);
+ val &= ~BIT(0);
+ val |= on ? BIT(0) : 0;
+ writel(val, apb_misc + reg_offset);
+ iounmap(apb_misc);
+
+ return 0;
+}
+
+static int tegra20_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct host1x_syncpt *out_sp;
+
+ out_sp = host1x_syncpt_request(&vi->client, HOST1X_SYNCPT_CLIENT_MANAGED);
+ if (!out_sp)
+ return dev_err_probe(vi->dev, -ENOMEM, "failed to request syncpoint\n");
+
+ chan->mw_ack_sp[0] = out_sp;
+
+ return 0;
+}
+
+static void tegra20_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
+{
+ host1x_syncpt_put(chan->mw_ack_sp[0]);
+}
+
+static void tegra20_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
+{
+ pix->width = clamp(pix->width, TEGRA20_MIN_WIDTH, TEGRA20_MAX_WIDTH);
+ pix->height = clamp(pix->height, TEGRA20_MIN_HEIGHT, TEGRA20_MAX_HEIGHT);
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ pix->bytesperline = roundup(pix->width, 2) * 2;
+ pix->sizeimage = roundup(pix->width, 2) * 2 * pix->height;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ pix->bytesperline = roundup(pix->width, 8);
+ pix->sizeimage = roundup(pix->width, 8) * pix->height * 3 / 2;
+ break;
+ }
+}
+
+/*
+ * Compute buffer offsets once per stream so that
+ * tegra20_channel_vi_buffer_setup() only has to do very simple maths for
+ * each buffer.
+ */
+static void tegra20_channel_queue_setup(struct tegra_vi_channel *chan)
+{
+ unsigned int stride = chan->format.bytesperline;
+ unsigned int height = chan->format.height;
+
+ chan->start_offset = 0;
+
+ switch (chan->format.pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ if (chan->vflip)
+ chan->start_offset += stride * (height - 1);
+ if (chan->hflip)
+ chan->start_offset += stride - 1;
+ break;
+
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ chan->addr_offset_u = stride * height;
+ chan->addr_offset_v = chan->addr_offset_u + stride * height / 4;
+
+ /* For YVU420, we swap the locations of the U and V planes. */
+ if (chan->format.pixelformat == V4L2_PIX_FMT_YVU420) {
+ unsigned long temp;
+
+ temp = chan->addr_offset_u;
+ chan->addr_offset_u = chan->addr_offset_v;
+ chan->addr_offset_v = temp;
+ }
+
+ chan->start_offset_u = chan->addr_offset_u;
+ chan->start_offset_v = chan->addr_offset_v;
+
+ if (chan->vflip) {
+ chan->start_offset += stride * (height - 1);
+ chan->start_offset_u += (stride / 2) * ((height / 2) - 1);
+ chan->start_offset_v += (stride / 2) * ((height / 2) - 1);
+ }
+ if (chan->hflip) {
+ chan->start_offset += stride - 1;
+ chan->start_offset_u += (stride / 2) - 1;
+ chan->start_offset_v += (stride / 2) - 1;
+ }
+ break;
+ }
+}
+
+static void release_buffer(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb = &buf->buf;
+
+ vb->sequence = chan->sequence++;
+ vb->field = V4L2_FIELD_NONE;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb->vb2_buf, state);
+}
+
+static void tegra20_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ dma_addr_t base = buf->addr;
+
+ switch (chan->fmtinfo->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_U, base + chan->addr_offset_u);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_U, base + chan->start_offset_u);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_V, base + chan->addr_offset_v);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_V, base + chan->start_offset_v);
+ fallthrough;
+
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_FIRST, base);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_FIRST, base + chan->start_offset);
+ break;
+ }
+}
+
+static int tegra20_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ int err;
+
+ chan->next_out_sp_idx++;
+
+ tegra20_channel_vi_buffer_setup(chan, buf);
+
+ tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_VIP_ENABLE);
+
+ /* Wait for syncpt counter to reach frame start event threshold */
+ err = host1x_syncpt_wait(chan->mw_ack_sp[0], chan->next_out_sp_idx,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, NULL);
+ if (err) {
+ host1x_syncpt_incr(chan->mw_ack_sp[0]);
+ dev_err_ratelimited(&chan->video.dev, "frame start syncpt timeout: %d\n", err);
+ release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
+ return err;
+ }
+
+ tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL,
+ VI_CAMERA_CONTROL_STOP_CAPTURE | VI_CAMERA_CONTROL_VIP_ENABLE);
+
+ release_buffer(chan, buf, VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+static int tegra20_chan_capture_kthread_start(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+ unsigned int retries = 0;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Source is not streaming if error is non-zero.
+ * So, do not dequeue buffers on error and let the thread sleep
+ * till kthread stop signal is received.
+ */
+ wait_event_interruptible(chan->start_wait,
+ kthread_should_stop() ||
+ (!list_empty(&chan->capture) && !err));
+
+ if (kthread_should_stop())
+ break;
+
+ /* dequeue the buffer and start capture */
+ spin_lock(&chan->start_lock);
+ if (list_empty(&chan->capture)) {
+ spin_unlock(&chan->start_lock);
+ continue;
+ }
+
+ buf = list_first_entry(&chan->capture, struct tegra_channel_buffer, queue);
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->start_lock);
+
+ err = tegra20_channel_capture_frame(chan, buf);
+ if (!err) {
+ retries = 0;
+ continue;
+ }
+
+ if (retries++ > chan->syncpt_timeout_retry)
+ vb2_queue_error(&chan->queue);
+ else
+ err = 0;
+ }
+
+ return 0;
+}
+
+static void tegra20_camera_capture_setup(struct tegra_vi_channel *chan)
+{
+ u32 output_fourcc = chan->format.pixelformat;
+ int width = chan->format.width;
+ int height = chan->format.height;
+ int stride_l = chan->format.bytesperline;
+ int stride_c = (output_fourcc == V4L2_PIX_FMT_YUV420 ||
+ output_fourcc == V4L2_PIX_FMT_YVU420) ? 1 : 0;
+ int main_output_format;
+ int yuv_output_format;
+
+ tegra20_vi_get_output_formats(chan, &main_output_format, &yuv_output_format);
+
+ /*
+ * Set up low pass filter. Use 0x240 for chromaticity and 0x240
+ * for luminance, which is the default and means not to touch
+ * anything.
+ */
+ tegra20_vi_write(chan, TEGRA_VI_H_LPF_CONTROL,
+ 0x0240 << VI_H_LPF_CONTROL_LUMA_SFT |
+ 0x0240 << VI_H_LPF_CONTROL_CHROMA_SFT);
+
+ /* Set up raise-on-edge, so we get an interrupt on end of frame. */
+ tegra20_vi_write(chan, TEGRA_VI_VI_RAISE, VI_VI_RAISE_ON_EDGE);
+
+ tegra20_vi_write(chan, TEGRA_VI_VI_FIRST_OUTPUT_CONTROL,
+ (chan->vflip ? VI_OUTPUT_V_DIRECTION : 0) |
+ (chan->hflip ? VI_OUTPUT_H_DIRECTION : 0) |
+ yuv_output_format << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT |
+ main_output_format << VI_OUTPUT_OUTPUT_FORMAT_SFT);
+
+ /* Set up frame size */
+ tegra20_vi_write(chan, TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE,
+ height << VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT |
+ width << VI_FIRST_OUTPUT_FRAME_WIDTH_SFT);
+
+ /* First output memory enabled */
+ tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
+
+ /* Set the number of frames in the buffer */
+ tegra20_vi_write(chan, TEGRA_VI_VB0_COUNT_FIRST, 1);
+
+ /* Set up buffer frame size */
+ tegra20_vi_write(chan, TEGRA_VI_VB0_SIZE_FIRST,
+ height << VI_VB0_SIZE_FIRST_V_SFT |
+ width << VI_VB0_SIZE_FIRST_H_SFT);
+
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BUFFER_STRIDE_FIRST,
+ stride_l << VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT |
+ stride_c << VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT);
+
+ tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
+}
+
+static int tegra20_vi_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ struct media_pipeline *pipe = &chan->video.pipe;
+ int err;
+
+ chan->next_out_sp_idx = host1x_syncpt_read(chan->mw_ack_sp[0]);
+
+ err = video_device_pipeline_start(&chan->video, pipe);
+ if (err)
+ goto error_pipeline_start;
+
+ tegra20_camera_capture_setup(chan);
+
+ err = tegra_channel_set_stream(chan, true);
+ if (err)
+ goto error_set_stream;
+
+ chan->sequence = 0;
+
+ chan->kthread_start_capture = kthread_run(tegra20_chan_capture_kthread_start,
+ chan, "%s:0", chan->video.name);
+ if (IS_ERR(chan->kthread_start_capture)) {
+ err = PTR_ERR(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ dev_err_probe(&chan->video.dev, err, "failed to run capture kthread\n");
+ goto error_kthread_start;
+ }
+
+ return 0;
+
+error_kthread_start:
+ tegra_channel_set_stream(chan, false);
+error_set_stream:
+ video_device_pipeline_stop(&chan->video);
+error_pipeline_start:
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
+
+ return err;
+}
+
+static void tegra20_vi_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (chan->kthread_start_capture) {
+ kthread_stop(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ }
+
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
+ tegra_channel_set_stream(chan, false);
+ video_device_pipeline_stop(&chan->video);
+}
+
+static const struct tegra_vi_ops tegra20_vi_ops = {
+ .vi_enable = tegra20_vi_enable,
+ .channel_host1x_syncpt_init = tegra20_channel_host1x_syncpt_init,
+ .channel_host1x_syncpt_free = tegra20_channel_host1x_syncpt_free,
+ .vi_fmt_align = tegra20_fmt_align,
+ .channel_queue_setup = tegra20_channel_queue_setup,
+ .vi_start_streaming = tegra20_vi_start_streaming,
+ .vi_stop_streaming = tegra20_vi_stop_streaming,
+};
+
+#define TEGRA20_VIDEO_FMT(MBUS_CODE, BPP, FOURCC) \
+{ \
+ .code = MEDIA_BUS_FMT_##MBUS_CODE, \
+ .bpp = BPP, \
+ .fourcc = V4L2_PIX_FMT_##FOURCC, \
+}
+
+static const struct tegra_video_format tegra20_video_formats[] = {
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 2, UYVY),
+ TEGRA20_VIDEO_FMT(VYUY8_2X8, 2, VYUY),
+ TEGRA20_VIDEO_FMT(YUYV8_2X8, 2, YUYV),
+ TEGRA20_VIDEO_FMT(YVYU8_2X8, 2, YVYU),
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YUV420),
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YVU420),
+};
+
+const struct tegra_vi_soc tegra20_vi_soc = {
+ .video_formats = tegra20_video_formats,
+ .nformats = ARRAY_SIZE(tegra20_video_formats),
+ .default_video_format = &tegra20_video_formats[0],
+ .ops = &tegra20_vi_ops,
+ .vi_max_channels = 1, /* parallel input (VIP) */
+ .vi_max_clk_hz = 150000000,
+ .has_h_v_flip = true,
+};
+
+/* --------------------------------------------------------------------------
+ * VIP
+ */
+
+/*
+ * VIP-specific configuration for stream start.
+ *
+ * Whatever is common among VIP and CSI is done by the VI component (see
+ * tegra20_vi_start_streaming()). Here we do what is VIP-specific.
+ */
+static int tegra20_vip_start_streaming(struct tegra_vip_channel *vip_chan)
+{
+ struct tegra_vi_channel *vi_chan = v4l2_get_subdev_hostdata(&vip_chan->subdev);
+ int width = vi_chan->format.width;
+ int height = vi_chan->format.height;
+
+ unsigned int main_input_format;
+ unsigned int yuv_input_format;
+
+ tegra20_vi_get_input_formats(vi_chan, &main_input_format, &yuv_input_format);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_CORE_CONTROL, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_INPUT_CONTROL,
+ VI_INPUT_VIP_INPUT_ENABLE | main_input_format | yuv_input_format);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_V_DOWNSCALE_CONTROL, 0);
+ tegra20_vi_write(vi_chan, TEGRA_VI_H_DOWNSCALE_CONTROL, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VIP_V_ACTIVE, height << VI_VIP_V_ACTIVE_PERIOD_SFT);
+ tegra20_vi_write(vi_chan, TEGRA_VI_VIP_H_ACTIVE,
+ roundup(width, 2) << VI_VIP_H_ACTIVE_PERIOD_SFT);
+
+ /*
+ * For VIP, D9..D2 is mapped to the video decoder's P7..P0.
+ * Disable/mask out the other Dn wires. When not in BT656
+ * mode we also need the V/H sync.
+ */
+ tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INPUT_ENABLE,
+ GENMASK(9, 2) << VI_PIN_INPUT_VD_SFT |
+ VI_PIN_INPUT_HSYNC | VI_PIN_INPUT_VSYNC);
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_DATA_INPUT_CONTROL,
+ GENMASK(9, 2) << VI_DATA_INPUT_SFT);
+ tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INVERSION, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_CONT_SYNCPT_OUT_1,
+ VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT |
+ host1x_syncpt_id(vi_chan->mw_ack_sp[0])
+ << VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_STOP_CAPTURE);
+
+ return 0;
+}
+
+static const struct tegra_vip_ops tegra20_vip_ops = {
+ .vip_start_streaming = tegra20_vip_start_streaming,
+};
+
+const struct tegra_vip_soc tegra20_vip_soc = {
+ .ops = &tegra20_vip_ops,
+};
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
index d58370a84737..da99f19a39e7 100644
--- a/drivers/staging/media/tegra-video/tegra210.c
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -17,6 +17,13 @@
#include "csi.h"
#include "vi.h"
+#define TEGRA210_MIN_WIDTH 32U
+#define TEGRA210_MAX_WIDTH 32768U
+#define TEGRA210_MIN_HEIGHT 32U
+#define TEGRA210_MAX_HEIGHT 32768U
+
+#define SURFACE_ALIGN_BYTES 64
+
#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
/* Tegra210 VI registers */
@@ -172,6 +179,84 @@ static u32 vi_csi_read(struct tegra_vi_channel *chan, u8 portno,
/*
* Tegra210 VI channel capture operations
*/
+
+static int tegra210_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ struct host1x_syncpt *fs_sp;
+ struct host1x_syncpt *mw_sp;
+ int ret, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ fs_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!fs_sp) {
+ dev_err(vi->dev, "failed to request frame start syncpoint\n");
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ mw_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!mw_sp) {
+ dev_err(vi->dev, "failed to request memory ack syncpoint\n");
+ host1x_syncpt_put(fs_sp);
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ chan->frame_start_sp[i] = fs_sp;
+ chan->mw_ack_sp[i] = mw_sp;
+ spin_lock_init(&chan->sp_incr_lock[i]);
+ }
+
+ return 0;
+
+free_syncpts:
+ for (i = 0; i < chan->numgangports; i++) {
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
+ }
+ return ret;
+}
+
+static void tegra210_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
+{
+ int i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
+ }
+}
+
+static void tegra210_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
+{
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int bpl;
+
+ /*
+ * The transfer alignment requirements are expressed in bytes.
+ * Clamp the requested width and height to the limits.
+ */
+ pix->width = clamp(pix->width, TEGRA210_MIN_WIDTH, TEGRA210_MAX_WIDTH);
+ pix->height = clamp(pix->height, TEGRA210_MIN_HEIGHT, TEGRA210_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable
+ * line sizes. Override the requested value with the minimum in that
+ * case.
+ */
+ min_bpl = pix->width * bpp;
+ max_bpl = rounddown(TEGRA210_MAX_WIDTH, SURFACE_ALIGN_BYTES);
+ bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+ if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ pix->sizeimage *= 2;
+}
+
static int tegra_channel_capture_setup(struct tegra_vi_channel *chan,
u8 portno)
{
@@ -718,6 +803,9 @@ static const struct tegra_video_format tegra210_video_formats[] = {
/* Tegra210 VI operations */
static const struct tegra_vi_ops tegra210_vi_ops = {
+ .channel_host1x_syncpt_init = tegra210_channel_host1x_syncpt_init,
+ .channel_host1x_syncpt_free = tegra210_channel_host1x_syncpt_free,
+ .vi_fmt_align = tegra210_fmt_align,
.vi_start_streaming = tegra210_vi_start_streaming,
.vi_stop_streaming = tegra210_vi_stop_streaming,
};
@@ -730,8 +818,10 @@ const struct tegra_vi_soc tegra210_vi_soc = {
.hw_revision = 3,
.vi_max_channels = 6,
#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
+ .default_video_format = &tegra210_video_formats[0],
.vi_max_clk_hz = 499200000,
#else
+ .default_video_format = &tegra210_video_formats[4],
.vi_max_clk_hz = 998400000,
#endif
};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 2f1aff7e8717..79284c3b6cae 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -30,15 +30,19 @@
#include "vi.h"
#include "video.h"
-#define MAX_CID_CONTROLS 1
-
-static const struct tegra_video_format tegra_default_format = {
- .img_dt = TEGRA_IMAGE_DT_RAW10,
- .bit_width = 10,
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .bpp = 2,
- .img_fmt = TEGRA_IMAGE_FORMAT_DEF,
- .fourcc = V4L2_PIX_FMT_SRGGB10,
+#define MAX_CID_CONTROLS 3
+
+/**
+ * struct tegra_vi_graph_entity - Entity in the video graph
+ *
+ * @asd: subdev asynchronous registration information
+ * @entity: media entity from the corresponding V4L2 subdev
+ * @subdev: V4L2 subdev
+ */
+struct tegra_vi_graph_entity {
+ struct v4l2_async_subdev asd;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
};
static inline struct tegra_vi *
@@ -98,6 +102,7 @@ tegra_get_format_by_fourcc(struct tegra_vi *vi, u32 fourcc)
/*
* videobuf2 queue operations
*/
+
static int tegra_channel_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes,
@@ -113,6 +118,9 @@ static int tegra_channel_queue_setup(struct vb2_queue *vq,
sizes[0] = chan->format.sizeimage;
alloc_devs[0] = chan->vi->dev;
+ if (chan->vi->ops->channel_queue_setup)
+ chan->vi->ops->channel_queue_setup(chan);
+
return 0;
}
@@ -164,6 +172,9 @@ tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan)
return media_entity_to_v4l2_subdev(pad->entity);
}
+/*
+ * Walk up the chain until the initial source (e.g. image sensor)
+ */
struct v4l2_subdev *
tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
{
@@ -190,49 +201,15 @@ tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
static int tegra_channel_enable_stream(struct tegra_vi_channel *chan)
{
- struct v4l2_subdev *csi_subdev, *src_subdev;
- struct tegra_csi_channel *csi_chan;
- int ret, err;
+ struct v4l2_subdev *subdev;
+ int ret;
- /*
- * Tegra CSI receiver can detect the first LP to HS transition.
- * So, start the CSI stream-on prior to sensor stream-on and
- * vice-versa for stream-off.
- */
- csi_subdev = tegra_channel_get_remote_csi_subdev(chan);
- ret = v4l2_subdev_call(csi_subdev, video, s_stream, true);
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, true);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
- if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- return 0;
-
- csi_chan = v4l2_get_subdevdata(csi_subdev);
- /*
- * TRM has incorrectly documented to wait for done status from
- * calibration logic after CSI interface power on.
- * As per the design, calibration results are latched and applied
- * to the pads only when the link is in LP11 state which will happen
- * during the sensor stream-on.
- * CSI subdev stream-on triggers start of MIPI pads calibration.
- * Wait for calibration to finish here after sensor subdev stream-on.
- */
- src_subdev = tegra_channel_get_remote_source_subdev(chan);
- ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
- err = tegra_mipi_finish_calibration(csi_chan->mipi);
-
- if (ret < 0 && ret != -ENOIOCTLCMD)
- goto err_disable_csi_stream;
-
- if (err < 0)
- dev_warn(csi_chan->csi->dev,
- "MIPI calibration failed: %d\n", err);
-
return 0;
-
-err_disable_csi_stream:
- v4l2_subdev_call(csi_subdev, video, s_stream, false);
- return ret;
}
static int tegra_channel_disable_stream(struct tegra_vi_channel *chan)
@@ -240,18 +217,6 @@ static int tegra_channel_disable_stream(struct tegra_vi_channel *chan)
struct v4l2_subdev *subdev;
int ret;
- /*
- * Stream-off subdevices in reverse order to stream-on.
- * Remote source subdev in TPG mode is same as CSI subdev.
- */
- subdev = tegra_channel_get_remote_source_subdev(chan);
- ret = v4l2_subdev_call(subdev, video, s_stream, false);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- return 0;
-
subdev = tegra_channel_get_remote_csi_subdev(chan);
ret = v4l2_subdev_call(subdev, video, s_stream, false);
if (ret < 0 && ret != -ENOIOCTLCMD)
@@ -457,36 +422,6 @@ static int tegra_channel_get_format(struct file *file, void *fh,
return 0;
}
-static void tegra_channel_fmt_align(struct tegra_vi_channel *chan,
- struct v4l2_pix_format *pix,
- unsigned int bpp)
-{
- unsigned int min_bpl;
- unsigned int max_bpl;
- unsigned int bpl;
-
- /*
- * The transfer alignment requirements are expressed in bytes.
- * Clamp the requested width and height to the limits.
- */
- pix->width = clamp(pix->width, TEGRA_MIN_WIDTH, TEGRA_MAX_WIDTH);
- pix->height = clamp(pix->height, TEGRA_MIN_HEIGHT, TEGRA_MAX_HEIGHT);
-
- /* Clamp the requested bytes per line value. If the maximum bytes per
- * line value is zero, the module doesn't support user configurable
- * line sizes. Override the requested value with the minimum in that
- * case.
- */
- min_bpl = pix->width * bpp;
- max_bpl = rounddown(TEGRA_MAX_WIDTH, SURFACE_ALIGN_BYTES);
- bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
-
- pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
- pix->sizeimage = pix->bytesperline * pix->height;
- if (pix->pixelformat == V4L2_PIX_FMT_NV16)
- pix->sizeimage *= 2;
-}
-
static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
struct v4l2_pix_format *pix)
{
@@ -563,7 +498,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
- tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
__v4l2_subdev_state_free(sd_state);
@@ -616,7 +551,7 @@ static int tegra_channel_set_format(struct file *file, void *fh,
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
- tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
chan->format = *pix;
chan->fmtinfo = fmtinfo;
@@ -652,7 +587,7 @@ static int tegra_channel_set_subdev_active_fmt(struct tegra_vi_channel *chan)
chan->format.bytesperline = chan->format.width * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline *
chan->format.height;
- tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+ chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
tegra_channel_update_gangports(chan);
return 0;
@@ -821,7 +756,7 @@ static int tegra_channel_s_dv_timings(struct file *file, void *fh,
chan->format.height = bt->height;
chan->format.bytesperline = bt->width * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline * bt->height;
- tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+ chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
tegra_channel_update_gangports(chan);
return 0;
@@ -977,6 +912,12 @@ static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY:
chan->syncpt_timeout_retry = ctrl->val;
break;
+ case V4L2_CID_HFLIP:
+ chan->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ chan->vflip = ctrl->val;
+ break;
default:
return -EINVAL;
}
@@ -1048,6 +989,12 @@ static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
v4l2_ctrl_handler_free(&chan->ctrl_handler);
return ret;
}
+
+ if (chan->vi->soc->has_h_v_flip) {
+ v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
#endif
/* setup the controls */
@@ -1119,7 +1066,7 @@ static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
* there are no matched formats.
*/
if (!match_code) {
- match_code = tegra_default_format.code;
+ match_code = chan->vi->soc->default_video_format->code;
index = tegra_get_format_idx_by_code(chan->vi, match_code, 0);
if (WARN_ON(index < 0))
return -EINVAL;
@@ -1133,21 +1080,11 @@ static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
return 0;
}
-static void tegra_channel_host1x_syncpts_free(struct tegra_vi_channel *chan)
-{
- int i;
-
- for (i = 0; i < chan->numgangports; i++) {
- host1x_syncpt_put(chan->mw_ack_sp[i]);
- host1x_syncpt_put(chan->frame_start_sp[i]);
- }
-}
-
static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
{
v4l2_ctrl_handler_free(&chan->ctrl_handler);
media_entity_cleanup(&chan->video.entity);
- tegra_channel_host1x_syncpts_free(chan);
+ chan->vi->ops->channel_host1x_syncpt_free(chan);
mutex_destroy(&chan->video_lock);
}
@@ -1165,42 +1102,6 @@ void tegra_channels_cleanup(struct tegra_vi *vi)
}
}
-static int tegra_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
-{
- struct tegra_vi *vi = chan->vi;
- unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
- struct host1x_syncpt *fs_sp;
- struct host1x_syncpt *mw_sp;
- int ret, i;
-
- for (i = 0; i < chan->numgangports; i++) {
- fs_sp = host1x_syncpt_request(&vi->client, flags);
- if (!fs_sp) {
- dev_err(vi->dev, "failed to request frame start syncpoint\n");
- ret = -ENOMEM;
- goto free_syncpts;
- }
-
- mw_sp = host1x_syncpt_request(&vi->client, flags);
- if (!mw_sp) {
- dev_err(vi->dev, "failed to request memory ack syncpoint\n");
- host1x_syncpt_put(fs_sp);
- ret = -ENOMEM;
- goto free_syncpts;
- }
-
- chan->frame_start_sp[i] = fs_sp;
- chan->mw_ack_sp[i] = mw_sp;
- spin_lock_init(&chan->sp_incr_lock[i]);
- }
-
- return 0;
-
-free_syncpts:
- tegra_channel_host1x_syncpts_free(chan);
- return ret;
-}
-
static int tegra_channel_init(struct tegra_vi_channel *chan)
{
struct tegra_vi *vi = chan->vi;
@@ -1216,7 +1117,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
init_waitqueue_head(&chan->done_wait);
/* initialize the video format */
- chan->fmtinfo = &tegra_default_format;
+ chan->fmtinfo = chan->vi->soc->default_video_format;
chan->format.pixelformat = chan->fmtinfo->fourcc;
chan->format.colorspace = V4L2_COLORSPACE_SRGB;
chan->format.field = V4L2_FIELD_NONE;
@@ -1224,9 +1125,9 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
chan->format.height = TEGRA_DEF_HEIGHT;
chan->format.bytesperline = TEGRA_DEF_WIDTH * chan->fmtinfo->bpp;
chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
- tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+ vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
- ret = tegra_channel_host1x_syncpt_init(chan);
+ ret = vi->ops->channel_host1x_syncpt_init(chan);
if (ret)
return ret;
@@ -1289,7 +1190,7 @@ free_v4l2_ctrl_hdl:
cleanup_media:
media_entity_cleanup(&chan->video.entity);
free_syncpts:
- tegra_channel_host1x_syncpts_free(chan);
+ vi->ops->channel_host1x_syncpt_free(chan);
return ret;
}
@@ -1351,7 +1252,7 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
struct device_node *node = vi->dev->of_node;
struct device_node *ep = NULL;
struct device_node *ports;
- struct device_node *port;
+ struct device_node *port = NULL;
unsigned int port_num;
struct device_node *parent;
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
@@ -1360,7 +1261,7 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
ports = of_get_child_by_name(node, "ports");
if (!ports)
- return -ENODEV;
+ return dev_err_probe(vi->dev, -ENODEV, "%pOF: missing 'ports' node\n", node);
for_each_child_of_node(ports, port) {
if (!of_node_name_eq(port, "port"))
@@ -1374,7 +1275,6 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
dev_err(vi->dev, "invalid port num %d for %pOF\n",
port_num, port);
ret = -EINVAL;
- of_node_put(port);
goto cleanup;
}
@@ -1397,13 +1297,12 @@ static int tegra_vi_channels_alloc(struct tegra_vi *vi)
lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
ret = tegra_vi_channel_alloc(vi, port_num, port, lanes);
- if (ret < 0) {
- of_node_put(port);
+ if (ret < 0)
goto cleanup;
- }
}
cleanup:
+ of_node_put(port);
of_node_put(ports);
return ret;
}
@@ -1858,10 +1757,10 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
* Walk the links to parse the full graph. Each channel will have
* one endpoint of the composite node. Start by parsing the
* composite node and parse the remote entities in turn.
- * Each channel will register v4l2 async notifier to make the graph
- * independent between the channels so we can the current channel
+ * Each channel will register a v4l2 async notifier to make the graph
+ * independent between the channels so we can skip the current channel
* in case of something wrong during graph parsing and continue with
- * next channels.
+ * the next channels.
*/
list_for_each_entry(chan, &vi->vi_chans, list) {
struct fwnode_handle *ep, *remote;
@@ -1920,11 +1819,8 @@ static int tegra_vi_init(struct host1x_client *client)
ret = tegra_vi_tpg_channels_alloc(vi);
else
ret = tegra_vi_channels_alloc(vi);
- if (ret < 0) {
- dev_err(vi->dev,
- "failed to allocate vi channels: %d\n", ret);
+ if (ret < 0)
goto free_chans;
- }
ret = tegra_vi_channels_init(vi);
if (ret < 0)
@@ -2026,6 +1922,9 @@ static int tegra_vi_probe(struct platform_device *pdev)
vi->client.ops = &vi_client_ops;
vi->client.dev = &pdev->dev;
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, true);
+
ret = host1x_client_register(&vi->client);
if (ret < 0) {
dev_err(&pdev->dev,
@@ -2036,6 +1935,8 @@ static int tegra_vi_probe(struct platform_device *pdev)
return 0;
rpm_disable:
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, false);
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -2046,12 +1947,17 @@ static int tegra_vi_remove(struct platform_device *pdev)
host1x_client_unregister(&vi->client);
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, false);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id tegra_vi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vi", .data = &tegra20_vi_soc },
+#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-vi", .data = &tegra210_vi_soc },
#endif
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
index a68e2c02c7b0..1e6a5caa7082 100644
--- a/drivers/staging/media/tegra-video/vi.h
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -25,17 +25,11 @@
#define V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY (V4L2_CTRL_CLASS_CAMERA | 0x1001)
-#define TEGRA_MIN_WIDTH 32U
-#define TEGRA_MAX_WIDTH 32768U
-#define TEGRA_MIN_HEIGHT 32U
-#define TEGRA_MAX_HEIGHT 32768U
-
#define TEGRA_DEF_WIDTH 1920
#define TEGRA_DEF_HEIGHT 1080
#define TEGRA_IMAGE_FORMAT_DEF 32
#define MAX_FORMAT_NUM 64
-#define SURFACE_ALIGN_BYTES 64
enum tegra_vi_pg_mode {
TEGRA_VI_PG_DISABLED = 0,
@@ -43,8 +37,17 @@ enum tegra_vi_pg_mode {
TEGRA_VI_PG_PATCH,
};
+struct tegra_vi;
+struct tegra_vi_channel;
+
/**
* struct tegra_vi_ops - Tegra VI operations
+ * @vi_enable: soc-specific operations needed to enable/disable the VI peripheral
+ * @channel_host1x_syncpt_init: initialize synchronization points
+ * @channel_host1x_syncpt_free: free all synchronization points
+ * @vi_fmt_align: modify `pix` to fit the hardware alignment
+ * requirements and fill image geometry
+ * @channel_queue_setup: additional operations at the end of vb2_ops::queue_setup
* @vi_start_streaming: starts media pipeline, subdevice streaming, sets up
* VI for capture and runs capture start and capture finish
* kthreads for capturing frames to buffer and returns them back.
@@ -52,6 +55,11 @@ enum tegra_vi_pg_mode {
* back any queued buffers.
*/
struct tegra_vi_ops {
+ int (*vi_enable)(struct tegra_vi *vi, bool on);
+ int (*channel_host1x_syncpt_init)(struct tegra_vi_channel *chan);
+ void (*channel_host1x_syncpt_free)(struct tegra_vi_channel *chan);
+ void (*vi_fmt_align)(struct v4l2_pix_format *pix, unsigned int bpp);
+ void (*channel_queue_setup)(struct tegra_vi_channel *chan);
int (*vi_start_streaming)(struct vb2_queue *vq, u32 count);
void (*vi_stop_streaming)(struct vb2_queue *vq);
};
@@ -61,18 +69,22 @@ struct tegra_vi_ops {
*
* @video_formats: supported video formats
* @nformats: total video formats
+ * @default_video_format: default video format (pointer to a @video_formats item)
* @ops: vi operations
* @hw_revision: VI hw_revision
* @vi_max_channels: supported max streaming channels
* @vi_max_clk_hz: VI clock max frequency
+ * @has_h_v_flip: the chip can do H and V flip, and the driver implements it
*/
struct tegra_vi_soc {
const struct tegra_video_format *video_formats;
const unsigned int nformats;
+ const struct tegra_video_format *default_video_format;
const struct tegra_vi_ops *ops;
u32 hw_revision;
unsigned int vi_max_channels;
unsigned int vi_max_clk_hz;
+ bool has_h_v_flip:1;
};
/**
@@ -99,19 +111,6 @@ struct tegra_vi {
};
/**
- * struct tegra_vi_graph_entity - Entity in the video graph
- *
- * @asd: subdev asynchronous registration information
- * @entity: media entity from the corresponding V4L2 subdev
- * @subdev: V4L2 subdev
- */
-struct tegra_vi_graph_entity {
- struct v4l2_async_subdev asd;
- struct media_entity *entity;
- struct v4l2_subdev *subdev;
-};
-
-/**
* struct tegra_vi_channel - Tegra video channel
*
* @list: list head for this entry
@@ -122,11 +121,13 @@ struct tegra_vi_graph_entity {
* @vi: Tegra video input device structure
* @frame_start_sp: host1x syncpoint pointer to synchronize programmed capture
* start condition with hardware frame start events through host1x
- * syncpoint counters.
+ * syncpoint counters. (Tegra210)
* @mw_ack_sp: host1x syncpoint pointer to synchronize programmed memory write
* ack trigger condition with hardware memory write done at end of
- * frame through host1x syncpoint counters.
+ * frame through host1x syncpoint counters (On Tegra20 used for the
+ * OUT_1 syncpt)
* @sp_incr_lock: protects cpu syncpoint increment.
+ * @next_out_sp_idx: next expected value for mw_ack_sp[0], i.e. OUT_1 (Tegra20)
*
* @kthread_start_capture: kthread to start capture of single frame when
* vb buffer is available. This thread programs VI CSI hardware
@@ -151,6 +152,12 @@ struct tegra_vi_graph_entity {
* @queue: vb2 buffers queue
* @sequence: V4L2 buffers sequence number
*
+ * @addr_offset_u: U plane base address, relative to buffer base address (only for planar)
+ * @addr_offset_v: V plane base address, relative to buffer base address (only for planar)
+ * @start_offset: 1st Y byte to write, relative to buffer base address (for H/V flip)
+ * @start_offset_u: 1st U byte to write, relative to buffer base address (for H/V flip)
+ * @start_offset_v: 1st V byte to write, relative to buffer base address (for H/V flip)
+ *
* @capture: list of queued buffers for capture
* @start_lock: protects the capture queued list
* @done: list of capture done queued buffers
@@ -167,6 +174,9 @@ struct tegra_vi_graph_entity {
* @tpg_fmts_bitmap: a bitmap for supported TPG formats
* @pg_mode: test pattern generator mode (disabled/direct/patch)
* @notifier: V4L2 asynchronous subdevs notifier
+ *
+ * @hflip: Horizontal flip is enabled
+ * @vflip: Vertical flip is enabled
*/
struct tegra_vi_channel {
struct list_head list;
@@ -180,6 +190,7 @@ struct tegra_vi_channel {
struct host1x_syncpt *mw_ack_sp[GANG_PORTS_MAX];
/* protects the cpu syncpoint increment */
spinlock_t sp_incr_lock[GANG_PORTS_MAX];
+ u32 next_out_sp_idx;
struct task_struct *kthread_start_capture;
wait_queue_head_t start_wait;
@@ -191,6 +202,12 @@ struct tegra_vi_channel {
struct vb2_queue queue;
u32 sequence;
+ unsigned int addr_offset_u;
+ unsigned int addr_offset_v;
+ unsigned int start_offset;
+ unsigned int start_offset_u;
+ unsigned int start_offset_v;
+
struct list_head capture;
/* protects the capture queued list */
spinlock_t start_lock;
@@ -210,6 +227,9 @@ struct tegra_vi_channel {
enum tegra_vi_pg_mode pg_mode;
struct v4l2_async_notifier notifier;
+
+ bool hflip:1;
+ bool vflip:1;
};
/**
@@ -260,11 +280,11 @@ enum tegra_image_dt {
/**
* struct tegra_video_format - Tegra video format description
*
- * @img_dt: image data type
- * @bit_width: format width in bits per component
+ * @img_dt: MIPI CSI-2 data type (for CSI-2 only)
+ * @bit_width: format width in bits per component (for CSI/Tegra210 only)
* @code: media bus format code
* @bpp: bytes per pixel (when stored in memory)
- * @img_fmt: image format
+ * @img_fmt: image format (for CSI/Tegra210 only)
* @fourcc: V4L2 pixel format FCC identifier
*/
struct tegra_video_format {
@@ -276,6 +296,9 @@ struct tegra_video_format {
u32 fourcc;
};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern const struct tegra_vi_soc tegra20_vi_soc;
+#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
extern const struct tegra_vi_soc tegra210_vi_soc;
#endif
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
index d966b319553f..074ad0dc56ca 100644
--- a/drivers/staging/media/tegra-video/video.c
+++ b/drivers/staging/media/tegra-video/video.c
@@ -123,6 +123,10 @@ static int host1x_video_remove(struct host1x_device *dev)
}
static const struct of_device_id host1x_video_subdevs[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vip", },
+ { .compatible = "nvidia,tegra20-vi", },
+#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-csi", },
{ .compatible = "nvidia,tegra210-vi", },
@@ -141,6 +145,7 @@ static struct host1x_driver host1x_video_driver = {
static struct platform_driver * const drivers[] = {
&tegra_csi_driver,
+ &tegra_vip_driver,
&tegra_vi_driver,
};
diff --git a/drivers/staging/media/tegra-video/video.h b/drivers/staging/media/tegra-video/video.h
index fadaf2189dc9..7275affa6558 100644
--- a/drivers/staging/media/tegra-video/video.h
+++ b/drivers/staging/media/tegra-video/video.h
@@ -12,7 +12,6 @@
#include <media/v4l2-device.h>
#include "vi.h"
-#include "csi.h"
struct tegra_video_device {
struct v4l2_device v4l2_dev;
@@ -25,5 +24,6 @@ int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid);
void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid);
extern struct platform_driver tegra_vi_driver;
+extern struct platform_driver tegra_vip_driver;
extern struct platform_driver tegra_csi_driver;
#endif
diff --git a/drivers/staging/media/tegra-video/vip.c b/drivers/staging/media/tegra-video/vip.c
new file mode 100644
index 000000000000..a1ab886acc18
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vip.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Parallel video capture module (VIP) for the Tegra VI.
+ *
+ * This file implements the VIP-specific infrastructure.
+ *
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-fwnode.h>
+
+#include "vip.h"
+
+static inline struct tegra_vip *host1x_client_to_vip(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_vip, client);
+}
+
+static inline struct tegra_vip_channel *subdev_to_vip_channel(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct tegra_vip_channel, subdev);
+}
+
+static inline struct tegra_vip *vip_channel_to_vip(struct tegra_vip_channel *chan)
+{
+ return container_of(chan, struct tegra_vip, chan);
+}
+
+/* Find the previous subdev in the pipeline (i.e. the one connected to our sink pad) */
+static struct v4l2_subdev *tegra_vip_channel_get_prev_subdev(struct tegra_vip_channel *chan)
+{
+ struct media_pad *remote_pad;
+
+ remote_pad = media_pad_remote_pad_first(&chan->pads[TEGRA_VIP_PAD_SINK]);
+ if (!remote_pad)
+ return NULL;
+
+ return media_entity_to_v4l2_subdev(remote_pad->entity);
+}
+
+static int tegra_vip_enable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
+ struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
+ struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
+ int err;
+
+ err = pm_runtime_resume_and_get(vip->dev);
+ if (err)
+ return dev_err_probe(vip->dev, err, "failed to get runtime PM\n");
+
+ err = vip->soc->ops->vip_start_streaming(vip_chan);
+ if (err < 0)
+ goto err_start_streaming;
+
+ err = v4l2_subdev_call(prev_subdev, video, s_stream, true);
+ if (err < 0 && err != -ENOIOCTLCMD)
+ goto err_prev_subdev_start_stream;
+
+ return 0;
+
+err_prev_subdev_start_stream:
+err_start_streaming:
+ pm_runtime_put(vip->dev);
+ return err;
+}
+
+static int tegra_vip_disable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
+ struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
+ struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
+
+ v4l2_subdev_call(prev_subdev, video, s_stream, false);
+
+ pm_runtime_put(vip->dev);
+
+ return 0;
+}
+
+static int tegra_vip_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ int err;
+
+ if (enable)
+ err = tegra_vip_enable_stream(subdev);
+ else
+ err = tegra_vip_disable_stream(subdev);
+
+ return err;
+}
+
+static const struct v4l2_subdev_video_ops tegra_vip_video_ops = {
+ .s_stream = tegra_vip_s_stream,
+};
+
+static const struct v4l2_subdev_ops tegra_vip_ops = {
+ .video = &tegra_vip_video_ops,
+};
+
+static int tegra_vip_channel_of_parse(struct tegra_vip *vip)
+{
+ struct device *dev = vip->dev;
+ struct device_node *np = dev->of_node;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
+ struct fwnode_handle *fwh;
+ struct device_node *ep;
+ unsigned int num_pads;
+ int err;
+
+ dev_dbg(dev, "Parsing %pOF", np);
+
+ ep = of_graph_get_endpoint_by_regs(np, 0, 0);
+ if (!ep) {
+ err = -EINVAL;
+ dev_err_probe(dev, err, "%pOF: error getting endpoint node\n", np);
+ goto err_node_put;
+ }
+
+ fwh = of_fwnode_handle(ep);
+ err = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
+ of_node_put(ep);
+ if (err) {
+ dev_err_probe(dev, err, "%pOF: failed to parse v4l2 endpoint\n", np);
+ goto err_node_put;
+ }
+
+ num_pads = of_graph_get_endpoint_count(np);
+ if (num_pads != TEGRA_VIP_PADS_NUM) {
+ err = -EINVAL;
+ dev_err_probe(dev, err, "%pOF: need 2 pads, got %d\n", np, num_pads);
+ goto err_node_put;
+ }
+
+ vip->chan.of_node = of_node_get(np);
+ vip->chan.pads[TEGRA_VIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ vip->chan.pads[TEGRA_VIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ return 0;
+
+err_node_put:
+ of_node_put(np);
+ return err;
+}
+
+static int tegra_vip_channel_init(struct tegra_vip *vip)
+{
+ struct v4l2_subdev *subdev;
+ int err;
+
+ subdev = &vip->chan.subdev;
+ v4l2_subdev_init(subdev, &tegra_vip_ops);
+ subdev->dev = vip->dev;
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+ kbasename(vip->chan.of_node->full_name));
+
+ v4l2_set_subdevdata(subdev, &vip->chan);
+ subdev->fwnode = of_fwnode_handle(vip->chan.of_node);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ err = media_entity_pads_init(&subdev->entity, TEGRA_VIP_PADS_NUM, vip->chan.pads);
+ if (err)
+ return dev_err_probe(vip->dev, err, "failed to initialize media entity\n");
+
+ err = v4l2_async_register_subdev(subdev);
+ if (err) {
+ dev_err_probe(vip->dev, err, "failed to register subdev\n");
+ goto err_register_subdev;
+ }
+
+ return 0;
+
+err_register_subdev:
+ media_entity_cleanup(&subdev->entity);
+ return err;
+}
+
+static int tegra_vip_init(struct host1x_client *client)
+{
+ struct tegra_vip *vip = host1x_client_to_vip(client);
+ int err;
+
+ err = tegra_vip_channel_of_parse(vip);
+ if (err)
+ return err;
+
+ err = tegra_vip_channel_init(vip);
+ if (err)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ of_node_put(vip->chan.of_node);
+ return err;
+}
+
+static int tegra_vip_exit(struct host1x_client *client)
+{
+ struct tegra_vip *vip = host1x_client_to_vip(client);
+ struct v4l2_subdev *subdev = &vip->chan.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ of_node_put(vip->chan.of_node);
+
+ return 0;
+}
+
+static const struct host1x_client_ops vip_client_ops = {
+ .init = tegra_vip_init,
+ .exit = tegra_vip_exit,
+};
+
+static int tegra_vip_probe(struct platform_device *pdev)
+{
+ struct tegra_vip *vip;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing VIP \"%s\" from %pOF\n", pdev->name, pdev->dev.of_node);
+
+ vip = devm_kzalloc(&pdev->dev, sizeof(*vip), GFP_KERNEL);
+ if (!vip)
+ return -ENOMEM;
+
+ vip->soc = of_device_get_match_data(&pdev->dev);
+
+ vip->dev = &pdev->dev;
+ platform_set_drvdata(pdev, vip);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&vip->client.list);
+ vip->client.ops = &vip_client_ops;
+ vip->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&vip->client);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register host1x client\n");
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_vip_remove(struct platform_device *pdev)
+{
+ struct tegra_vip *vip = platform_get_drvdata(pdev);
+
+ host1x_client_unregister(&vip->client);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern const struct tegra_vip_soc tegra20_vip_soc;
+#endif
+
+static const struct of_device_id tegra_vip_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vip", .data = &tegra20_vip_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_vip_of_id_table);
+
+struct platform_driver tegra_vip_driver = {
+ .driver = {
+ .name = "tegra-vip",
+ .of_match_table = tegra_vip_of_id_table,
+ },
+ .probe = tegra_vip_probe,
+ .remove = tegra_vip_remove,
+};
diff --git a/drivers/staging/media/tegra-video/vip.h b/drivers/staging/media/tegra-video/vip.h
new file mode 100644
index 000000000000..32ceaaccbba2
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vip.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __TEGRA_VIP_H__
+#define __TEGRA_VIP_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+enum {
+ TEGRA_VIP_PAD_SINK,
+ TEGRA_VIP_PAD_SOURCE,
+ TEGRA_VIP_PADS_NUM,
+};
+
+struct tegra_vip;
+
+/**
+ * struct tegra_vip_channel - Tegra VIP (parallel video capture) channel
+ *
+ * @subdev: V4L2 subdevice associated with this channel
+ * @pads: media pads for the subdevice entity
+ * @of_node: vip device tree node
+ */
+struct tegra_vip_channel {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[TEGRA_VIP_PADS_NUM];
+ struct device_node *of_node;
+};
+
+/**
+ * struct tegra_vip_ops - Tegra VIP operations
+ *
+ * @vip_start_streaming: programs vip hardware to enable streaming.
+ */
+struct tegra_vip_ops {
+ int (*vip_start_streaming)(struct tegra_vip_channel *vip_chan);
+};
+
+/**
+ * struct tegra_vip_soc - NVIDIA Tegra VIP SoC structure
+ *
+ * @ops: vip hardware operations
+ */
+struct tegra_vip_soc {
+ const struct tegra_vip_ops *ops;
+};
+
+/**
+ * struct tegra_vip - NVIDIA Tegra VIP device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @soc: pointer to SoC data structure
+ * @chan: the VIP channel
+ */
+struct tegra_vip {
+ struct device *dev;
+ struct host1x_client client;
+ const struct tegra_vip_soc *soc;
+ struct tegra_vip_channel chan;
+};
+
+#endif
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 4e85e681922f..ce869280a056 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -362,7 +362,7 @@ static struct i2c_driver i2c_driver = {
.driver = {
.name = "hdm_i2c",
},
- .probe_new = i2c_probe,
+ .probe = i2c_probe,
.remove = i2c_remove,
.id_table = i2c_id,
};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 2fba52e0bd7b..d5271eac14f6 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -779,7 +779,7 @@ static struct i2c_driver dcon_driver = {
},
.class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
.id_table = dcon_idtable,
- .probe_new = dcon_probe,
+ .probe = dcon_probe,
.remove = dcon_remove,
.detect = dcon_detect,
.address_list = normal_i2c,
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index f08fdf06d566..220e157d4a5e 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1019,7 +1019,6 @@ static int setup_gpio(struct pi433_device *device)
}
/* configure the pin */
- gpiod_unexport(device->gpiod[i]);
retval = gpiod_direction_input(device->gpiod[i]);
if (retval)
return retval;
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
index eae8167f79dd..f8f774a16295 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -3,6 +3,7 @@ config RTL8192E
tristate "RealTek RTL8192E Wireless LAN NIC driver"
depends on PCI && WLAN && RTLLIB
depends on m
+ select CFG80211
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index 385cca79f484..c229fd244a48 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -85,7 +85,6 @@ enum rtl819x_loopback {
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
enum version_8190_loopback {
VERSION_8190_BD = 0x3,
VERSION_8190_BE
@@ -139,8 +138,6 @@ struct tx_fwinfo_8190pci {
u32 Retry_Limit:4;
u32 Reserved2:1;
u32 PacketID:13;
-
-
};
struct phy_ofdm_rx_status_rxsc_sgien_exintfflag {
@@ -172,7 +169,6 @@ struct phy_sts_cck_819xpci {
u8 cck_agc_rpt;
};
-
#define PHY_RSSI_SLID_WIN_MAX 100
#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
@@ -214,7 +210,6 @@ struct tx_desc {
u32 Reserved7;
};
-
struct tx_desc_cmd {
u16 PktSize;
u8 Reserved1;
@@ -256,10 +251,8 @@ struct rx_desc {
u32 Reserved3;
u32 BufferAddress;
-
};
-
struct rx_fwinfo {
u16 Reserved1:12;
u16 PartAggr:1;
@@ -278,7 +271,6 @@ struct rx_fwinfo {
u8 Reserved4:1;
u32 TSFL;
-
};
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index bb4539e337c8..7061f1cf4d3a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -73,11 +73,11 @@ bool rtl92e_config_rf(struct net_device *dev)
break;
case RF90_PATH_B:
u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
- bRFSI_RFENV<<16);
+ bRFSI_RFENV << 16);
break;
}
- rtl92e_set_bb_reg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
rtl92e_set_bb_reg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
@@ -117,7 +117,7 @@ bool rtl92e_config_rf(struct net_device *dev)
break;
case RF90_PATH_B:
rtl92e_set_bb_reg(dev, pPhyReg->rfintfs,
- bRFSI_RFENV<<16, u4RegValue);
+ bRFSI_RFENV << 16, u4RegValue);
break;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index aed53fedeb61..e5925899402c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -36,8 +36,8 @@ void rtl92e_start_beacon(struct net_device *dev)
rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
- BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
- BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
+ BcnTimeCfg |= BcnCW << BCN_TCFG_CW_SHIFT;
+ BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
rtl92e_irq_enable(dev);
}
@@ -46,23 +46,21 @@ static void _rtl92e_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 msr;
- enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
msr = rtl92e_readb(dev, MSR);
msr &= ~MSR_LINK_MASK;
switch (priv->rtllib->iw_mode) {
case IW_MODE_INFRA:
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_MANAGED;
- LedAction = LED_CTL_LINK;
break;
case IW_MODE_ADHOC:
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_ADHOC;
break;
case IW_MODE_MASTER:
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_MASTER;
break;
default:
@@ -70,8 +68,6 @@ static void _rtl92e_update_msr(struct net_device *dev)
}
rtl92e_writeb(dev, MSR, msr);
- if (priv->rtllib->LedControlHandler)
- priv->rtllib->LedControlHandler(dev, LedAction);
}
void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
@@ -111,7 +107,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
}
rtl92e_writeb(dev, MSR, btMsr);
-
}
break;
@@ -130,7 +125,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
rtl92e_writel(dev, RCR, RegRCR);
priv->receive_config = RegRCR;
-
}
break;
@@ -168,7 +162,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
&priv->rtllib->current_network.qos_data.parameters;
u1bAIFS = qop->aifs[pAcParam] *
- ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
+ ((mode & (WIRELESS_MODE_G | WIRELESS_MODE_N_24G)) ? 9 : 20) + aSifsTime;
rtl92e_dm_init_edca_turbo(dev);
@@ -256,7 +250,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case HW_VAR_SIFS:
rtl92e_writeb(dev, SIFS, val[0]);
- rtl92e_writeb(dev, SIFS+1, val[0]);
+ rtl92e_writeb(dev, SIFS + 1, val[0]);
break;
case HW_VAR_RF_TIMING:
@@ -270,7 +264,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
default:
break;
}
-
}
static void _rtl92e_read_eeprom_info(struct net_device *dev)
@@ -299,12 +292,12 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
(EEPROM_Customer_ID >> 1)) >> 8;
priv->eeprom_customer_id = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
- EEPROM_ICVersion_ChannelPlan>>1);
- priv->eeprom_chnl_plan = usValue&0xff;
- IC_Version = (usValue & 0xff00)>>8;
+ EEPROM_ICVersion_ChannelPlan >> 1);
+ priv->eeprom_chnl_plan = usValue & 0xff;
+ IC_Version = (usValue & 0xff00) >> 8;
ICVer8192 = IC_Version & 0xf;
- ICVer8256 = (IC_Version & 0xf0)>>4;
+ ICVer8256 = (IC_Version & 0xf0) >> 4;
if (ICVer8192 == 0x2) {
if (ICVer8256 == 0x5)
priv->card_8192_version = VERSION_8190_BE;
@@ -354,7 +347,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
if (!priv->autoload_fail_flag)
priv->eeprom_thermal_meter = ((rtl92e_eeprom_read(dev,
- (EEPROM_ThermalMeter>>1))) &
+ (EEPROM_ThermalMeter >> 1))) &
0xff00) >> 8;
else
priv->eeprom_thermal_meter = EEPROM_Default_ThermalMeter;
@@ -596,7 +589,7 @@ start:
RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
rtl92e_writel(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
- RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
+ RSVD_FW_QUEUE_PAGE_BCN_SHIFT |
NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
@@ -605,7 +598,7 @@ start:
ulRegRead = (0xFFF00000 & rtl92e_readl(dev, RRSR)) |
RATE_ALL_OFDM_AG | RATE_ALL_CCK;
rtl92e_writel(dev, RRSR, ulRegRead);
- rtl92e_writel(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
+ rtl92e_writel(dev, RATR0 + 4 * 7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
rtl92e_writeb(dev, ACK_TIMEOUT, 0x30);
@@ -718,7 +711,6 @@ end:
static void _rtl92e_net_update(struct net_device *dev)
{
-
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
@@ -738,8 +730,8 @@ static void _rtl92e_net_update(struct net_device *dev)
rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
- BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
- BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
+ BcnTimeCfg |= (BcnCW << BCN_TCFG_CW_SHIFT);
+ BcnTimeCfg |= BcnIFS << BCN_TCFG_IFS;
rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
}
@@ -753,7 +745,7 @@ void rtl92e_link_change(struct net_device *dev)
if (!priv->up)
return;
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
_rtl92e_net_update(dev);
rtl92e_update_ratr_table(dev);
if ((ieee->pairwise_key_type == KEY_TYPE_WEP40) ||
@@ -768,13 +760,14 @@ void rtl92e_link_change(struct net_device *dev)
u32 reg;
reg = rtl92e_readl(dev, RCR);
- if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (priv->rtllib->link_state == MAC80211_LINKED) {
if (ieee->intel_promiscuous_md_info.promiscuous_on)
;
else
priv->receive_config = reg |= RCR_CBSSID;
- } else
+ } else {
priv->receive_config = reg &= ~RCR_CBSSID;
+ }
rtl92e_writel(dev, RCR, reg);
}
@@ -883,7 +876,7 @@ static u8 _rtl92e_rate_mgn_to_hw(u8 rate)
case MGN_MCS15:
ret = DESC90_RATEMCS15;
break;
- case (0x80|0x20):
+ case (0x80 | 0x20):
ret = DESC90_RATEMCS32;
break;
default:
@@ -973,7 +966,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->RtsEnable = (cb_desc->bRTSEnable) ? 1 : 0;
pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
- pTxFwInfo->RtsHT = (cb_desc->rts_rate&0x80) ? 1 : 0;
+ pTxFwInfo->RtsHT = (cb_desc->rts_rate & 0x80) ? 1 : 0;
pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw(cb_desc->rts_rate);
pTxFwInfo->RtsBandwidth = 0;
pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
@@ -1008,8 +1001,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->PktSize = skb->len - sizeof(struct tx_fwinfo_8190pci);
pdesc->SecCAMID = 0;
- pdesc->RATid = cb_desc->RATRIndex;
-
+ pdesc->RATid = cb_desc->ratr_index;
pdesc->NoEnc = 1;
pdesc->SecType = 0x0;
@@ -1217,7 +1209,6 @@ static long _rtl92e_signal_scale_mapping(struct r8192_priv *priv, long currsig)
return retsig;
}
-
#define rx_hal_is_cck_rate(_pdrvinfo)\
((_pdrvinfo->RxRate == DESC90_RATE1M ||\
_pdrvinfo->RxRate == DESC90_RATE2M ||\
@@ -1265,7 +1256,6 @@ static void _rtl92e_query_rxphystatus(
check_reg824 = 1;
}
-
prxpkt = (u8 *)pdrvinfo;
prxpkt += sizeof(struct rx_fwinfo);
@@ -1345,7 +1335,7 @@ static void _rtl92e_query_rxphystatus(
else if (pcck_buf->sq_rpt < 20)
sq = 100;
else
- sq = ((64-sq) * 100) / 44;
+ sq = ((64 - sq) * 100) / 44;
}
pstats->SignalQuality = sq;
precord_stats->SignalQuality = sq;
@@ -1372,7 +1362,6 @@ static void _rtl92e_query_rxphystatus(
}
}
-
rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
@@ -1452,7 +1441,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
- tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
+ tmp_val = priv->stats.slide_rssi_total / slide_rssi_statistics;
priv->stats.signal_strength = rtl92e_translate_to_dbm(priv, tmp_val);
curr_st->rssi = priv->stats.signal_strength;
if (!prev_st->bPacketMatchBSSID) {
@@ -1482,14 +1471,13 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
} else {
priv->stats.rx_rssi_percentage[rfpath] =
((priv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH-1)) +
+ (RX_SMOOTH - 1)) +
(prev_st->RxMIMOSignalStrength[rfpath])) /
(RX_SMOOTH);
}
}
}
-
if (prev_st->bPacketBeacon) {
if (slide_beacon_adc_pwdb_statistics++ >=
PHY_Beacon_RSSI_SLID_WIN_MAX) {
@@ -1517,14 +1505,14 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (prev_st->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb) {
priv->undecorated_smoothed_pwdb =
(((priv->undecorated_smoothed_pwdb) *
- (RX_SMOOTH-1)) +
+ (RX_SMOOTH - 1)) +
(prev_st->RxPWDBAll)) / (RX_SMOOTH);
priv->undecorated_smoothed_pwdb =
priv->undecorated_smoothed_pwdb + 1;
} else {
priv->undecorated_smoothed_pwdb =
(((priv->undecorated_smoothed_pwdb) *
- (RX_SMOOTH-1)) +
+ (RX_SMOOTH - 1)) +
(prev_st->RxPWDBAll)) / (RX_SMOOTH);
}
rtl92e_update_rx_statistics(priv, prev_st);
@@ -1753,7 +1741,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
(pDrvInfo->FirstAGGR == 1);
stats->TimeStampLow = pDrvInfo->TSFL;
- stats->TimeStampHigh = rtl92e_readl(dev, TSFR+4);
+ stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
rtl92e_update_rx_pkt_timestamp(dev, stats);
@@ -1766,7 +1754,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
skb_trim(skb, skb->len - S_CRC_LEN);
- stats->packetlength = stats->Length-4;
+ stats->packetlength = stats->Length - 4;
stats->fraglength = stats->packetlength;
stats->fragoffset = 0;
stats->ntotalfrag = 1;
@@ -1806,7 +1794,6 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
rtl92e_writel(dev, WFCRC1, 0xffffffff);
rtl92e_writel(dev, WFCRC2, 0xffffffff);
-
rtl92e_writeb(dev, PMR, 0x5);
rtl92e_writeb(dev, MAC_BLK_CTRL, 0xa);
}
@@ -1830,18 +1817,14 @@ void rtl92e_update_ratr_table(struct net_device *dev)
rtl92e_config_rate(dev, &rate_config);
ratr_value = rate_config | *pMcsRate << 12;
switch (ieee->mode) {
- case IEEE_A:
- ratr_value &= 0x00000FF0;
- break;
- case IEEE_B:
+ case WIRELESS_MODE_B:
ratr_value &= 0x0000000F;
break;
- case IEEE_G:
- case IEEE_G|IEEE_B:
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_G | WIRELESS_MODE_B:
ratr_value &= 0x00000FF7;
break;
- case IEEE_N_24G:
- case IEEE_N_5G:
+ case WIRELESS_MODE_N_24G:
if (ieee->ht_info->peer_mimo_ps == 0)
ratr_value &= 0x0007F007;
else
@@ -1857,7 +1840,7 @@ void rtl92e_update_ratr_table(struct net_device *dev)
else if (!ieee->ht_info->cur_tx_bw40mhz &&
ieee->ht_info->bCurShortGI20MHz)
ratr_value |= 0x80000000;
- rtl92e_writel(dev, RATR0+rate_index*4, ratr_value);
+ rtl92e_writel(dev, RATR0 + rate_index * 4, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
}
@@ -1881,7 +1864,7 @@ rtl92e_init_variables(struct net_device *dev)
RCR_AMF | RCR_ADF |
RCR_AICV |
RCR_AB | RCR_AM | RCR_APM |
- RCR_AAP | ((u32)7<<RCR_MXDMA_OFFSET) |
+ RCR_AAP | ((u32)7 << RCR_MXDMA_OFFSET) |
((u32)7 << RCR_FIFO_OFFSET) | RCR_ONLYERLPKT;
priv->irq_mask[0] = (u32)(IMR_ROK | IMR_VODOK | IMR_VIDOK |
@@ -1901,7 +1884,6 @@ void rtl92e_enable_irq(struct net_device *dev)
priv->irq_enabled = 1;
rtl92e_writel(dev, INTA_MASK, priv->irq_mask[0]);
-
}
void rtl92e_disable_irq(struct net_device *dev)
@@ -1933,7 +1915,6 @@ void rtl92e_enable_tx(struct net_device *dev)
rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
}
-
void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb)
{
*p_inta = rtl92e_readl(dev, ISR);
@@ -1976,7 +1957,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
}
- SlotIndex = (priv->silent_reset_rx_slot_index++)%SilentResetRxSoltNum;
+ SlotIndex = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
priv->silent_reset_rx_stuck_event[SlotIndex] = 1;
@@ -1990,8 +1971,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
TotalRxStuckCount +=
priv->silent_reset_rx_stuck_event[i];
}
-
-
} else {
priv->silent_reset_rx_stuck_event[SlotIndex] = 0;
}
@@ -2021,7 +2000,7 @@ bool rtl92e_get_nmode_support_by_sec(struct net_device *dev)
struct rtllib_device *ieee = priv->rtllib;
if (ieee->rtllib_ap_sec_type &&
- (ieee->rtllib_ap_sec_type(priv->rtllib)&(SEC_ALG_WEP |
+ (ieee->rtllib_ap_sec_type(priv->rtllib) & (SEC_ALG_WEP |
SEC_ALG_TKIP))) {
return false;
} else {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index f4d4b01630a2..09f8c76b7e65 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -159,7 +159,6 @@ enum _RTL8192PCI_HW {
WFCRC2 = 0x2f8,
BW_OPMODE = 0x300,
-#define BW_OPMODE_5G BIT1
#define BW_OPMODE_20MHZ BIT2
IC_VERRSION = 0x301,
MSR = 0x303,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 4b0ebe96302e..875540a2079d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -32,7 +32,6 @@ static u32 _rtl92e_calculate_bit_shift(u32 dwBitMask)
void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
u32 dwData)
{
-
u32 OriginalValue, BitShift, NewValue;
if (dwBitMask != bMaskDWord) {
@@ -40,8 +39,9 @@ void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
BitShift = _rtl92e_calculate_bit_shift(dwBitMask);
NewValue = (OriginalValue & ~dwBitMask) | (dwData << BitShift);
rtl92e_writel(dev, dwRegAddr, NewValue);
- } else
+ } else {
rtl92e_writel(dev, dwRegAddr, dwData);
+ }
}
u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
@@ -99,7 +99,6 @@ static u32 _rtl92e_phy_rf_read(struct net_device *dev,
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
return ret;
-
}
static void _rtl92e_phy_rf_write(struct net_device *dev,
@@ -165,10 +164,10 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
_rtl92e_phy_rf_fw_write(dev, eRFPath, RegAddr,
New_Value);
- } else
+ } else {
_rtl92e_phy_rf_fw_write(dev, eRFPath, RegAddr, Data);
+ }
udelay(200);
-
} else {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_read(dev, eRFPath,
@@ -177,8 +176,9 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
New_Value = (Original_Value & ~BitMask) | (Data << BitShift);
_rtl92e_phy_rf_write(dev, eRFPath, RegAddr, New_Value);
- } else
+ } else {
_rtl92e_phy_rf_write(dev, eRFPath, RegAddr, Data);
+ }
}
}
@@ -226,7 +226,6 @@ static u32 _rtl92e_phy_rf_fw_read(struct net_device *dev,
return 0;
}
return rtl92e_readl(dev, RF_DATA);
-
}
static void _rtl92e_phy_rf_fw_write(struct net_device *dev,
@@ -247,10 +246,8 @@ static void _rtl92e_phy_rf_fw_write(struct net_device *dev,
break;
}
rtl92e_writel(dev, QPNR, Data);
-
}
-
void rtl92e_config_mac(struct net_device *dev)
{
u32 dwArrayLen = 0, i = 0;
@@ -267,12 +264,10 @@ void rtl92e_config_mac(struct net_device *dev)
}
for (i = 0; i < dwArrayLen; i += 3) {
if (pdwArray[i] == 0x318)
- pdwArray[i+2] = 0x00000800;
- rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i+1],
- pdwArray[i+2]);
+ pdwArray[i + 2] = 0x00000800;
+ rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i + 1],
+ pdwArray[i + 2]);
}
- return;
-
}
static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
@@ -291,13 +286,13 @@ static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
for (i = 0; i < PHY_REGArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
bMaskDWord,
- Rtl819XPHY_REGArray_Table[i+1]);
+ Rtl819XPHY_REGArray_Table[i + 1]);
}
} else if (ConfigType == BB_CONFIG_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
bMaskDWord,
- Rtl819XAGCTAB_Array_Table[i+1]);
+ Rtl819XAGCTAB_Array_Table[i + 1]);
}
}
}
@@ -370,7 +365,6 @@ bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
break;
}
-
if (dwRegRead != WriteData[i]) {
netdev_warn(dev, "%s(): Check failed.\n", __func__);
ret = false;
@@ -389,10 +383,10 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
u32 dwRegValue = 0;
bRegValue = rtl92e_readb(dev, BB_GLOBAL_RESET);
- rtl92e_writeb(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
+ rtl92e_writeb(dev, BB_GLOBAL_RESET, (bRegValue | BB_GLOBAL_RESET_BIT));
dwRegValue = rtl92e_readl(dev, CPU_GEN);
- rtl92e_writel(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
+ rtl92e_writel(dev, CPU_GEN, (dwRegValue & (~CPU_GEN_BB_RST)));
for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0;
eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
@@ -402,19 +396,18 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
if (!rtStatus)
return rtStatus;
}
- rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn | bOFDMEn, 0x0);
_rtl92e_phy_config_bb(dev, BB_CONFIG_PHY_REG);
dwRegValue = rtl92e_readl(dev, CPU_GEN);
- rtl92e_writel(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
+ rtl92e_writel(dev, CPU_GEN, (dwRegValue | CPU_GEN_BB_RST));
_rtl92e_phy_config_bb(dev, BB_CONFIG_AGC_TAB);
if (priv->ic_cut > VERSION_8190_BD) {
dwRegValue = 0x0;
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
-
+ (bXBTxAGC | bXCTxAGC | bXDTxAGC), dwRegValue);
dwRegValue = priv->crystal_cap;
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
@@ -470,7 +463,6 @@ void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
{
-
int i;
switch (eRFPath) {
@@ -483,7 +475,6 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
rtl92e_set_rf_reg(dev, eRFPath, RTL8192E_RADIO_A_ARR[i],
bMask12Bits,
RTL8192E_RADIO_A_ARR[i + 1]);
-
}
break;
case RF90_PATH_B:
@@ -495,7 +486,6 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
rtl92e_set_rf_reg(dev, eRFPath, RTL8192E_RADIO_B_ARR[i],
bMask12Bits,
RTL8192E_RADIO_B_ARR[i + 1]);
-
}
break;
default:
@@ -503,7 +493,6 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
}
return 0;
-
}
static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel)
@@ -645,7 +634,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
CurrentCmd->Para1, bMask12Bits,
- CurrentCmd->Para2<<7);
+ CurrentCmd->Para2 << 7);
break;
default:
break;
@@ -677,7 +666,6 @@ static void _rtl92e_phy_switch_channel(struct net_device *dev, u8 channel)
static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
{
-
struct r8192_priv *priv = rtllib_priv(dev);
_rtl92e_phy_switch_channel(dev, priv->chan);
@@ -694,7 +682,6 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
if (priv->sw_chnl_in_progress)
return false;
-
switch (priv->rtllib->mode) {
case WIRELESS_MODE_B:
if (channel > 14) {
@@ -824,7 +811,6 @@ static void _rtl92e_cck_tx_power_track_bw_switch(struct net_device *dev)
static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
{
-
struct r8192_priv *priv = rtllib_priv(dev);
u8 regBwOpMode;
@@ -880,7 +866,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
}
rtl92e_set_bb_reg(dev, rCCK0_System, bCCKSideBand,
- (priv->n_cur_40mhz_prime_sc>>1));
+ (priv->n_cur_40mhz_prime_sc >> 1));
rtl92e_set_bb_reg(dev, rOFDM1_LSTF, 0xC00,
priv->n_cur_40mhz_prime_sc);
@@ -890,7 +876,6 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
priv->current_chnl_bw);
break;
-
}
rtl92e_set_bandwidth(dev, priv->current_chnl_bw);
@@ -904,7 +889,6 @@ void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
{
struct r8192_priv *priv = rtllib_priv(dev);
-
if (priv->set_bw_mode_in_progress)
return;
@@ -921,7 +905,6 @@ void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
_rtl92e_set_bw_mode_work_item(dev);
-
}
void rtl92e_init_gain(struct net_device *dev, u8 Operation)
@@ -993,7 +976,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
void rtl92e_set_rf_off(struct net_device *dev)
{
-
rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
@@ -1002,7 +984,6 @@ void rtl92e_set_rf_off(struct net_device *dev)
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
rtl92e_writeb(dev, ANAPAR_FOR_8192PCIE, 0x07);
-
}
static bool _rtl92e_set_rf_power_state(struct net_device *dev,
@@ -1099,9 +1080,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
break;
}
- if (bResult) {
+ if (bResult)
priv->rtllib->rf_power_state = rf_power_state;
- }
priv->set_rf_pwr_state_in_progress = false;
return bResult;
@@ -1130,11 +1110,11 @@ void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
if (priv->up) {
switch (Operation) {
case SCAN_OPT_BACKUP:
- priv->rtllib->InitialGainHandler(dev, IG_Backup);
+ priv->rtllib->init_gain_handler(dev, IG_Backup);
break;
case SCAN_OPT_RESTORE:
- priv->rtllib->InitialGainHandler(dev, IG_Restore);
+ priv->rtllib->init_gain_handler(dev, IG_Restore);
break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index e1b30fbdf8cc..24fb0ca539ea 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
@@ -7,7 +7,6 @@
#ifndef _R819XU_PHYREG_H
#define _R819XU_PHYREG_H
-
#define RF_DATA 0x1d4
#define rPMAC_Reset 0x100
@@ -125,7 +124,6 @@
#define rOFDM0_TxCoeff5 0xcb4
#define rOFDM0_TxCoeff6 0xcb8
-
#define rOFDM1_LSTF 0xd00
#define rOFDM1_TRxPathEnable 0xd04
#define rOFDM1_CFO 0xd08
@@ -163,7 +161,6 @@
#define rTxAGC_Mcs11_Mcs08 0xe18
#define rTxAGC_Mcs15_Mcs12 0xe1c
-
#define rZebra1_HSSIEnable 0x0
#define rZebra1_TRxEnable1 0x1
#define rZebra1_TRxEnable2 0x2
@@ -269,7 +266,6 @@
#define b3WireAddressLength 0x400
#define b3WireRFPowerDown 0x1
/*#define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
#define b2GPAPEPolarity 0x80000000
#define bRFSW_TxDefaultAnt 0x3
#define bRFSW_TxOptionAnt 0x30
@@ -287,7 +283,6 @@
#define bRFSI_ANTSW 0x100
#define bRFSI_ANTSWB 0x200
#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
#define bBandSelect 0x1
#define bHTSIG2_GI 0x80
#define bHTSIG2_Smoothing 0x01
@@ -819,7 +814,6 @@
#define bOFDMPHY0_End 0xcff
#define bOFDMPHY1_End 0xdff
-
#define bPMACControl 0x0
#define bWMACControl 0x1
#define bWNICControl 0x2
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index a4d65b4d99c2..6d9e5c27017d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -101,7 +101,6 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
else
usConfig |= BIT15 | (KeyType << 2) | KeyIndex;
-
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT31 | BIT16;
@@ -149,7 +148,6 @@ void rtl92e_cam_restore(struct net_device *dev)
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
-
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
if (priv->rtllib->swcamtable[EntryId].bused) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 27040d1e3230..4447489a16ea 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -169,7 +169,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
(priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
if ((priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) ||
(change_source > RF_CHANGE_BY_IPS)) {
- if (ieee->state == RTLLIB_LINKED)
+ if (ieee->link_state == MAC80211_LINKED)
priv->blinked_ingpio = true;
else
priv->blinked_ingpio = false;
@@ -277,7 +277,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
}
}
- if (net->mode & (IEEE_G | IEEE_N_24G)) {
+ if (net->mode & (WIRELESS_MODE_G | WIRELESS_MODE_N_24G)) {
u8 slot_time_val;
u8 cur_slot_time = priv->slot_time;
@@ -327,7 +327,7 @@ static void _rtl92e_qos_activate(void *data)
int i;
mutex_lock(&priv->mutex);
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
goto success;
for (i = 0; i < QOS_QUEUE_NUM; i++)
@@ -344,7 +344,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
int ret = 0;
u32 size = sizeof(struct rtllib_qos_parameters);
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
return ret;
if (priv->rtllib->iw_mode != IW_MODE_INFRA)
@@ -400,7 +400,7 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
if (!priv || !network)
return 0;
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
return 0;
if (priv->rtllib->iw_mode != IW_MODE_INFRA)
@@ -466,7 +466,7 @@ static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
tcb_desc = (struct cb_desc *)(pnewskb->cb + 8);
tcb_desc->queue_index = BEACON_QUEUE;
tcb_desc->data_rate = 2;
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
skb_push(pnewskb, priv->rtllib->tx_headroom);
@@ -635,7 +635,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
rtllib_softmac_start_protocol(priv->rtllib, 0);
rtllib_reset_queue(priv->rtllib);
_rtl92e_watchdog_timer_cb(&priv->watch_dog_timer);
@@ -660,7 +660,7 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
priv->rtllib->rtllib_ips_leave(dev);
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
priv->up = 0;
@@ -716,9 +716,9 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->check_nic_enough_desc = _rtl92e_check_nic_enough_desc;
priv->rtllib->handle_assoc_response = _rtl92e_handle_assoc_response;
priv->rtllib->handle_beacon = _rtl92e_handle_beacon;
- priv->rtllib->SetWirelessMode = rtl92e_set_wireless_mode;
- priv->rtllib->LeisurePSLeave = rtl92e_leisure_ps_leave;
- priv->rtllib->SetBWModeHandler = rtl92e_set_bw_mode;
+ priv->rtllib->set_wireless_mode = rtl92e_set_wireless_mode;
+ priv->rtllib->leisure_ps_leave = rtl92e_leisure_ps_leave;
+ priv->rtllib->set_bw_mode_handler = rtl92e_set_bw_mode;
priv->rf_set_chan = rtl92e_set_channel;
priv->rtllib->start_send_beacons = rtl92e_start_beacon;
@@ -734,14 +734,9 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
- priv->rtllib->SetFwCmdHandler = NULL;
- priv->rtllib->InitialGainHandler = rtl92e_init_gain;
+ priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
-
- priv->rtllib->LedControlHandler = NULL;
- priv->rtllib->UpdateBeaconInterruptHandler = NULL;
-
priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
}
@@ -1014,7 +1009,7 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
if (rfState == rf_on &&
(priv->rtllib->iw_mode == IW_MODE_INFRA) &&
- (priv->rtllib->state == RTLLIB_LINKED))
+ (priv->rtllib->link_state == MAC80211_LINKED))
RxResetType = _rtl92e_rx_check_stuck(dev);
if (TxResetType == RESET_TYPE_NORMAL ||
@@ -1056,7 +1051,7 @@ RESET_START:
mutex_lock(&priv->wx_mutex);
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
if (priv->up) {
@@ -1078,9 +1073,9 @@ RESET_START:
rtl92e_dm_deinit(dev);
rtllib_stop_scan_syncro(ieee);
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
mutex_lock(&ieee->wx_mutex);
- netdev_info(dev, "ieee->state is RTLLIB_LINKED\n");
+ netdev_info(dev, "ieee->link_state is MAC80211_LINKED\n");
rtllib_stop_send_beacons(priv->rtllib);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
@@ -1088,7 +1083,7 @@ RESET_START:
netif_carrier_off(dev);
mutex_unlock(&ieee->wx_mutex);
} else {
- netdev_info(dev, "ieee->state is NOT LINKED\n");
+ netdev_info(dev, "ieee->link_state is NOT LINKED\n");
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
}
@@ -1115,14 +1110,14 @@ RESET_START:
rtl92e_enable_hw_security_config(dev);
- if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
+ if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
IW_MODE_INFRA) {
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
schedule_work(&ieee->associate_complete_wq);
- } else if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
+ } else if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode ==
IW_MODE_ADHOC) {
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
@@ -1186,7 +1181,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!priv->up || priv->hw_radio_off)
return;
- if (priv->rtllib->state >= RTLLIB_LINKED) {
+ if (priv->rtllib->link_state >= MAC80211_LINKED) {
if (priv->rtllib->CntAfterLink < 2)
priv->rtllib->CntAfterLink++;
} else {
@@ -1196,8 +1191,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
rtl92e_dm_watchdog(dev);
if (!rtllib_act_scanning(priv->rtllib, false)) {
- if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
- RTLLIB_NOLINK) &&
+ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->link_state ==
+ MAC80211_NOLINK) &&
(ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
if ((ieee->pwr_save_ctrl.ReturnPoint ==
@@ -1207,7 +1202,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
}
}
}
- if ((ieee->state == RTLLIB_LINKED) && (ieee->iw_mode ==
+ if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode ==
IW_MODE_INFRA) && (!ieee->net_promiscuous_md)) {
if (ieee->link_detect_info.NumRxOkInPeriod > 100 ||
ieee->link_detect_info.NumTxOkInPeriod > 100)
@@ -1249,7 +1244,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->link_detect_info.bHigherBusyTraffic = bHigherBusyTraffic;
ieee->link_detect_info.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
- if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_INFRA) {
+ if (ieee->link_state == MAC80211_LINKED && ieee->iw_mode == IW_MODE_INFRA) {
u32 TotalRxBcnNum = 0;
u32 TotalRxDataNum = 0;
@@ -1268,17 +1263,13 @@ static void _rtl92e_watchdog_wq_cb(void *data)
"===>%s(): AP is power off, chan:%d, connect another one\n",
__func__, priv->chan);
- ieee->state = RTLLIB_ASSOCIATING;
+ ieee->link_state = RTLLIB_ASSOCIATING;
RemovePeerTS(priv->rtllib,
priv->rtllib->current_network.bssid);
ieee->is_roaming = true;
ieee->is_set_key = false;
ieee->link_change(dev);
- if (ieee->LedControlHandler)
- ieee->LedControlHandler(ieee->dev,
- LED_CTL_START_TO_LINK);
-
notify_wx_assoc_event(ieee);
if (!(ieee->rtllib_ap_sec_type(ieee) &
@@ -1440,7 +1431,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
tcb_desc->bTxEnableFwCalcDur = 1;
@@ -1509,7 +1500,6 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
MAX_DEV_ADDR_SIZE);
struct tx_desc *pdesc = NULL;
struct rtllib_hdr_1addr *header = NULL;
- u16 fc = 0, type = 0;
u8 *pda_addr = NULL;
int idx;
u32 fwinfo_size = 0;
@@ -1525,8 +1515,6 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
- fc = le16_to_cpu(header->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
pda_addr = header->addr1;
if (!is_broadcast_ether_addr(pda_addr) && !is_multicast_ether_addr(pda_addr))
@@ -1548,11 +1536,6 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return skb->len;
}
-
- if (type == RTLLIB_FTYPE_DATA) {
- if (priv->rtllib->LedControlHandler)
- priv->rtllib->LedControlHandler(dev, LED_CTL_TX);
- }
rtl92e_fill_tx_desc(dev, pdesc, tcb_desc, skb);
__skb_queue_tail(&ring->queue, skb);
pdesc->OWN = 1;
@@ -1779,8 +1762,6 @@ static void _rtl92e_rx_normal(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_hdr_1addr *rtllib_hdr = NULL;
bool unicast_packet = false;
- bool bLedBlinking = true;
- u16 fc = 0, type = 0;
u32 skb_len = 0;
int rx_queue_idx = RX_MPDU_QUEUE;
@@ -1788,7 +1769,6 @@ static void _rtl92e_rx_normal(struct net_device *dev)
.signal = 0,
.noise = (u8)-98,
.rate = 0,
- .freq = RTLLIB_24GHZ_BAND,
};
unsigned int count = priv->rxringcount;
@@ -1824,16 +1804,6 @@ static void _rtl92e_rx_normal(struct net_device *dev)
/* unicast packet */
unicast_packet = true;
}
- fc = le16_to_cpu(rtllib_hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
- if (type == RTLLIB_FTYPE_MGMT)
- bLedBlinking = false;
-
- if (bLedBlinking)
- if (priv->rtllib->LedControlHandler)
- priv->rtllib->LedControlHandler(dev,
- LED_CTL_RX);
-
skb_len = skb->len;
if (!rtllib_rx(priv->rtllib, skb, &stats)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 285dac32c074..ec9e454299a8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -231,7 +231,6 @@ struct r8192_priv {
struct rt_stats stats;
struct iw_statistics wstats;
- short (*rf_set_sens)(struct net_device *dev, short sens);
u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
struct rx_desc *rx_ring[MAX_RX_QUEUE];
@@ -271,8 +270,6 @@ struct r8192_priv {
short promisc;
short chan;
- short sens;
- short max_sens;
bool ps_force;
u32 irq_mask[2];
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 56a8ec517c06..37c275cac40b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -159,7 +159,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev);
static void _rtl92e_dm_init_bandwidth_autoswitch(struct net_device *dev);
static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev);
-
static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev);
static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev);
@@ -179,7 +178,6 @@ static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev);
static void _rtl92e_dm_init_rx_path_selection(struct net_device *dev);
static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev);
-
static void _rtl92e_dm_init_fsync(struct net_device *dev);
static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
@@ -219,9 +217,7 @@ void rtl92e_dm_init(struct net_device *dev)
void rtl92e_dm_deinit(struct net_device *dev)
{
-
_rtl92e_dm_deinit_fsync(dev);
-
}
void rtl92e_dm_watchdog(struct net_device *dev)
@@ -250,7 +246,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
void rtl92e_init_adaptive_rate(struct net_device *dev)
{
-
struct r8192_priv *priv = rtllib_priv(dev);
struct rate_adaptive *pra = &priv->rate_adaptive;
@@ -296,8 +291,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (priv->rtllib->mode != WIRELESS_MODE_N_24G)
return;
- if (priv->rtllib->state == RTLLIB_LINKED) {
-
+ if (priv->rtllib->link_state == MAC80211_LINKED) {
bshort_gi_enabled = (ht_info->cur_tx_bw40mhz &&
ht_info->bCurShortGI40MHz) ||
(!ht_info->cur_tx_bw40mhz &&
@@ -353,7 +347,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (pra->ping_rssi_enable) {
if (priv->undecorated_smoothed_pwdb <
- (long)(pra->ping_rssi_thresh_for_ra+5)) {
+ (long)(pra->ping_rssi_thresh_for_ra + 5)) {
if ((priv->undecorated_smoothed_pwdb <
(long)pra->ping_rssi_thresh_for_ra) ||
ping_rssi_state) {
@@ -524,7 +518,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
priv->rtllib->bdynamic_txpower_enable = false;
for (j = 0; j <= 30; j++) {
-
tx_cmd.op = TXCMD_SET_TX_PWR_TRACKING;
tx_cmd.length = 4;
tx_cmd.value = priv->pwr_track >> 24;
@@ -562,7 +555,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
for (k = 0; k < 5; k++) {
if (k != 4)
tmp_report[k] = rtl92e_readb(dev,
- Tssi_Report_Value1+k);
+ Tssi_Report_Value1 + k);
else
tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value2);
@@ -629,8 +622,9 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
} else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- } else
+ } else {
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
}
if (priv->cck_present_attn_diff <= -12 ||
@@ -657,21 +651,21 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
{
#define ThermalMeterVal 9
struct r8192_priv *priv = rtllib_priv(dev);
- u32 tmpRegA, TempCCk;
- u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
+ u32 tmp_reg, tmp_cck;
+ u8 tmp_ofdm_index, tmp_cck_index, tmp_cck_20m_index, tmp_cck_40m_index, tmpval;
int i = 0, CCKSwingNeedUpdate = 0;
if (!priv->tx_pwr_tracking_init) {
- tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ tmp_reg = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord);
for (i = 0; i < OFDM_TABLE_LEN; i++) {
- if (tmpRegA == OFDMSwingTable[i])
+ if (tmp_reg == OFDMSwingTable[i])
priv->ofdm_index[0] = i;
}
- TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
+ tmp_cck = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
for (i = 0; i < CCK_TABLE_LEN; i++) {
- if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
+ if (tmp_cck == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
priv->cck_index = i;
break;
}
@@ -680,42 +674,42 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
return;
}
- tmpRegA = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
- if (tmpRegA < 3 || tmpRegA > 13)
+ tmp_reg = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
+ if (tmp_reg < 3 || tmp_reg > 13)
return;
- if (tmpRegA >= 12)
- tmpRegA = 12;
+ if (tmp_reg >= 12)
+ tmp_reg = 12;
priv->thermal_meter[0] = ThermalMeterVal;
priv->thermal_meter[1] = ThermalMeterVal;
- if (priv->thermal_meter[0] >= (u8)tmpRegA) {
- tmpOFDMindex = tmpCCK20Mindex = 6+(priv->thermal_meter[0] -
- (u8)tmpRegA);
- tmpCCK40Mindex = tmpCCK20Mindex - 6;
- if (tmpOFDMindex >= OFDM_TABLE_LEN)
- tmpOFDMindex = OFDM_TABLE_LEN - 1;
- if (tmpCCK20Mindex >= CCK_TABLE_LEN)
- tmpCCK20Mindex = CCK_TABLE_LEN - 1;
- if (tmpCCK40Mindex >= CCK_TABLE_LEN)
- tmpCCK40Mindex = CCK_TABLE_LEN - 1;
+ if (priv->thermal_meter[0] >= (u8)tmp_reg) {
+ tmp_ofdm_index = 6 + (priv->thermal_meter[0] - (u8)tmp_reg);
+ tmp_cck_20m_index = tmp_ofdm_index;
+ tmp_cck_40m_index = tmp_cck_20m_index - 6;
+ if (tmp_ofdm_index >= OFDM_TABLE_LEN)
+ tmp_ofdm_index = OFDM_TABLE_LEN - 1;
+ if (tmp_cck_20m_index >= CCK_TABLE_LEN)
+ tmp_cck_20m_index = CCK_TABLE_LEN - 1;
+ if (tmp_cck_40m_index >= CCK_TABLE_LEN)
+ tmp_cck_40m_index = CCK_TABLE_LEN - 1;
} else {
- tmpval = (u8)tmpRegA - priv->thermal_meter[0];
+ tmpval = (u8)tmp_reg - priv->thermal_meter[0];
if (tmpval >= 6) {
- tmpOFDMindex = 0;
- tmpCCK20Mindex = 0;
+ tmp_ofdm_index = 0;
+ tmp_cck_20m_index = 0;
} else {
- tmpOFDMindex = 6 - tmpval;
- tmpCCK20Mindex = 6 - tmpval;
+ tmp_ofdm_index = 6 - tmpval;
+ tmp_cck_20m_index = 6 - tmpval;
}
- tmpCCK40Mindex = 0;
+ tmp_cck_40m_index = 0;
}
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- tmpCCKindex = tmpCCK40Mindex;
+ tmp_cck_index = tmp_cck_40m_index;
else
- tmpCCKindex = tmpCCK20Mindex;
+ tmp_cck_index = tmp_cck_20m_index;
- priv->rec_cck_20m_idx = tmpCCK20Mindex;
- priv->rec_cck_40m_idx = tmpCCK40Mindex;
+ priv->rec_cck_20m_idx = tmp_cck_20m_index;
+ priv->rec_cck_40m_idx = tmp_cck_40m_index;
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
@@ -727,15 +721,15 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
CCKSwingNeedUpdate = 1;
}
- if (priv->cck_index != tmpCCKindex) {
- priv->cck_index = tmpCCKindex;
+ if (priv->cck_index != tmp_cck_index) {
+ priv->cck_index = tmp_cck_index;
CCKSwingNeedUpdate = 1;
}
if (CCKSwingNeedUpdate)
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- if (priv->ofdm_index[0] != tmpOFDMindex) {
- priv->ofdm_index[0] = tmpOFDMindex;
+ if (priv->ofdm_index[0] != tmp_ofdm_index) {
+ priv->ofdm_index[0] = tmp_ofdm_index;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
OFDMSwingTable[priv->ofdm_index[0]]);
}
@@ -756,20 +750,17 @@ void rtl92e_dm_txpower_tracking_wq(void *data)
static void _rtl92e_dm_initialize_tx_power_tracking_tssi(struct net_device *dev)
{
-
struct r8192_priv *priv = rtllib_priv(dev);
priv->btxpower_tracking = true;
priv->txpower_count = 0;
priv->tx_pwr_tracking_init = false;
-
}
static void _rtl92e_dm_init_tx_power_tracking_thermal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
-
if (priv->rtllib->FwRWRF)
priv->btxpower_tracking = true;
else
@@ -799,12 +790,10 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
return;
tx_power_track_counter++;
-
if (tx_power_track_counter >= 180) {
schedule_delayed_work(&priv->txpower_tracking_wq, 0);
tx_power_track_counter = 0;
}
-
}
static void _rtl92e_dm_check_tx_power_tracking_thermal(struct net_device *dev)
@@ -833,7 +822,6 @@ static void _rtl92e_dm_check_tx_power_tracking_thermal(struct net_device *dev)
netdev_info(dev, "===============>Schedule TxPowerTrackingWorkItem\n");
schedule_delayed_work(&priv->txpower_tracking_wq, 0);
TM_Trigger = 0;
-
}
static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev)
@@ -861,7 +849,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain[attenuation][2]) +
(dm_cck_tx_bb_gain[attenuation][3] << 8) +
- (dm_cck_tx_bb_gain[attenuation][4] << 16)+
+ (dm_cck_tx_bb_gain[attenuation][4] << 16) +
(dm_cck_tx_bb_gain[attenuation][5] << 24));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][6] +
@@ -875,7 +863,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][2]) +
(dm_cck_tx_bb_gain_ch14[attenuation][3] << 8) +
- (dm_cck_tx_bb_gain_ch14[attenuation][4] << 16)+
+ (dm_cck_tx_bb_gain_ch14[attenuation][4] << 16) +
(dm_cck_tx_bb_gain_ch14[attenuation][5] << 24));
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][6]) +
@@ -898,7 +886,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][3] << 8) +
- (CCKSwingTable_Ch1_Ch13[priv->cck_index][4] << 16)+
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][4] << 16) +
(CCKSwingTable_Ch1_Ch13[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][6] +
@@ -912,11 +900,11 @@ static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = CCKSwingTable_Ch14[priv->cck_index][2] +
(CCKSwingTable_Ch14[priv->cck_index][3] << 8) +
- (CCKSwingTable_Ch14[priv->cck_index][4] << 16)+
+ (CCKSwingTable_Ch14[priv->cck_index][4] << 16) +
(CCKSwingTable_Ch14[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = CCKSwingTable_Ch14[priv->cck_index][6] +
- (CCKSwingTable_Ch14[priv->cck_index][7]<<8);
+ (CCKSwingTable_Ch14[priv->cck_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
@@ -965,7 +953,6 @@ void rtl92e_dm_restore_state(struct net_device *dev)
_rtl92e_dm_tx_power_reset_recovery(dev);
_rtl92e_dm_bb_initialgain_restore(dev);
-
}
static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
@@ -989,7 +976,6 @@ static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
(u32)priv->initgain_backup.cca);
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
-
}
void rtl92e_dm_backup_state(struct net_device *dev)
@@ -1043,7 +1029,6 @@ static void _rtl92e_dm_dig_init(struct net_device *dev)
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
{
-
if (!dm_digtable.dig_enable_flag)
return;
@@ -1091,12 +1076,11 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
dm_digtable.dig_state = DM_STA_DIG_OFF;
}
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
dm_digtable.cur_sta_connect_state = DIG_STA_CONNECT;
else
dm_digtable.cur_sta_connect_state = DIG_STA_DISCONNECT;
-
dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
_rtl92e_dm_initial_gain(dev);
_rtl92e_dm_pd_th(dev);
@@ -1104,7 +1088,6 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
if (dm_digtable.dig_algorithm_switch)
dm_digtable.dig_algorithm_switch = 0;
dm_digtable.pre_sta_connect_state = dm_digtable.cur_sta_connect_state;
-
}
static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
@@ -1123,7 +1106,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
dm_digtable.dig_algorithm_switch = 0;
}
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
return;
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
@@ -1146,7 +1129,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
@@ -1183,7 +1166,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
}
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
@@ -1194,7 +1177,6 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
_rtl92e_dm_ctrl_initgain_byrssi_highpwr(dev);
}
-
static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1214,7 +1196,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
} else {
@@ -1228,7 +1210,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
(priv->undecorated_smoothed_pwdb >=
dm_digtable.rssi_high_thresh)) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
}
@@ -1339,18 +1321,18 @@ static void _rtl92e_dm_pd_th(struct net_device *dev)
(initialized <= 3) || force_write) {
if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
} else if (dm_digtable.curpd_thstate ==
DIG_PD_AT_NORMAL_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
} else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
- rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE + 3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
}
@@ -1392,7 +1374,6 @@ static void _rtl92e_dm_cs_ratio(struct net_device *dev)
reset_cnt = priv->reset_count;
}
-
if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
!initialized || force_write) {
if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
@@ -1426,7 +1407,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
goto dm_CheckEdcaTurbo_EXIT;
- if (priv->rtllib->state != RTLLIB_LINKED)
+ if (priv->rtllib->link_state != MAC80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->ht_info->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
@@ -1435,7 +1416,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (ht_info->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
- if (curTxOkCnt > 4*curRxOkCnt) {
+ if (curTxOkCnt > 4 * curRxOkCnt) {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
rtl92e_writel(dev, EDCAPARA_BE,
@@ -1456,7 +1437,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
}
priv->bcurrent_turbo_EDCA = true;
} else {
- if (curRxOkCnt > 4*curTxOkCnt) {
+ if (curRxOkCnt > 4 * curTxOkCnt) {
if (!priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
@@ -1474,7 +1455,6 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
edca_setting_UL[ht_info->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
-
}
priv->bcurrent_turbo_EDCA = true;
@@ -1489,7 +1469,6 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
}
}
-
dm_CheckEdcaTurbo_EXIT:
priv->rtllib->bis_any_nonbepkts = false;
lastTxOkCnt = priv->stats.txbytesunicast;
@@ -1519,7 +1498,7 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
if (ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if (curRxOkCnt > 4*curTxOkCnt)
+ if (curRxOkCnt > 4 * curTxOkCnt)
ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
else
ht_info->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF;
@@ -1546,7 +1525,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
tmp1byte = rtl92e_readb(dev, GPI);
- rf_power_state_to_set = (tmp1byte&BIT1) ? rf_on : rf_off;
+ rf_power_state_to_set = (tmp1byte & BIT1) ? rf_on : rf_off;
if (priv->hw_radio_off && (rf_power_state_to_set == rf_on)) {
netdev_info(dev, "gpiochangeRF - HW Radio ON\n");
@@ -1576,7 +1555,7 @@ void rtl92e_dm_rf_pathcheck_wq(void *data)
rfpath = rtl92e_readb(dev, 0xc04);
for (i = 0; i < RF90_PATH_MAX; i++) {
- if (rfpath & (0x01<<i))
+ if (rfpath & (0x01 << i))
priv->brfpath_rxenable[i] = true;
else
priv->brfpath_rxenable[i] = false;
@@ -1627,7 +1606,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
u8 update_cck_rx_path;
if (!cck_Rx_Path_initialized) {
- dm_rx_path_sel_table.cck_rx_path = (rtl92e_readb(dev, 0xa07)&0xf);
+ dm_rx_path_sel_table.cck_rx_path = (rtl92e_readb(dev, 0xa07) & 0xf);
cck_Rx_Path_initialized = 1;
}
@@ -1746,7 +1725,6 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
tmp_cck_min_pwdb = cur_cck_pwdb;
}
}
-
}
}
}
@@ -1763,11 +1741,11 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
if ((tmp_max_rssi - tmp_min_rssi) >=
dm_rx_path_sel_table.diff_th) {
dm_rx_path_sel_table.rf_enable_rssi_th[min_rssi_index] =
- tmp_max_rssi+5;
+ tmp_max_rssi + 5;
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
- 0x1<<min_rssi_index, 0x0);
+ 0x1 << min_rssi_index, 0x0);
rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
- 0x1<<min_rssi_index, 0x0);
+ 0x1 << min_rssi_index, 0x0);
disabled_rf_cnt++;
}
if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_1) {
@@ -1779,7 +1757,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
}
if (update_cck_rx_path) {
- dm_rx_path_sel_table.cck_rx_path = (cck_default_Rx<<2) |
+ dm_rx_path_sel_table.cck_rx_path = (cck_default_Rx << 2) |
(cck_optional_Rx);
rtl92e_set_bb_reg(dev, rCCK0_AFESetting, 0x0f000000,
dm_rx_path_sel_table.cck_rx_path);
@@ -1812,7 +1790,6 @@ static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev)
schedule_delayed_work(&priv->rfpath_check_wq, 0);
}
-
static void _rtl92e_dm_init_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1829,7 +1806,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
-
static void _rtl92e_dm_deinit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1845,7 +1821,7 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
- if (priv->rtllib->state == RTLLIB_LINKED &&
+ if (priv->rtllib->link_state == MAC80211_LINKED &&
priv->rtllib->bfsync_enable &&
(priv->rtllib->ht_info->iot_action & HT_IOT_ACT_CDD_FSYNC)) {
u32 rate_bitmap;
@@ -1864,7 +1840,6 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
else
rate_count_diff = rate_count - priv->rate_record;
if (rate_count_diff < priv->rate_count_diff_rec) {
-
u32 DiffNum = priv->rate_count_diff_rec -
rate_count_diff;
if (DiffNum >=
@@ -2005,7 +1980,6 @@ static void _rtl92e_dm_start_sw_fsync(struct net_device *dev)
add_timer(&priv->fsync_timer);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cd);
-
}
static void _rtl92e_dm_check_fsync(struct net_device *dev)
@@ -2017,7 +1991,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
static u8 reg_c38_State = RegC38_Default;
static u32 reset_cnt;
- if (priv->rtllib->state == RTLLIB_LINKED &&
+ if (priv->rtllib->link_state == MAC80211_LINKED &&
priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
switch (priv->rtllib->fsync_state) {
@@ -2048,7 +2022,6 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
case SW_Fsync:
default:
break;
-
}
}
if (reg_c38_State != RegC38_Fsync_AP_BCM) {
@@ -2071,7 +2044,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
break;
}
- if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (priv->rtllib->link_state == MAC80211_LINKED) {
if (priv->undecorated_smoothed_pwdb <=
RegC38_TH) {
if (reg_c38_State !=
@@ -2084,7 +2057,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
RegC38_NonFsync_Other_AP;
}
} else if (priv->undecorated_smoothed_pwdb >=
- (RegC38_TH+5)) {
+ (RegC38_TH + 5)) {
if (reg_c38_State) {
rtl92e_writeb(dev,
rOFDM0_RxDetector3,
@@ -2132,7 +2105,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
return;
}
if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) &&
- (priv->rtllib->mode == IEEE_G)) {
+ (priv->rtllib->mode == WIRELESS_MODE_G)) {
txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
} else {
@@ -2140,7 +2113,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
- if (priv->rtllib->state == RTLLIB_LINKED) {
+ if (priv->rtllib->link_state == MAC80211_LINKED) {
if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->dynamic_tx_high_pwr = true;
priv->dynamic_tx_low_pwr = false;
@@ -2164,7 +2137,6 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
}
priv->last_dtp_flag_high = priv->dynamic_tx_high_pwr;
priv->last_dtp_flag_low = priv->dynamic_tx_low_pwr;
-
}
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index f4f7b74c8cd1..fab8932e67da 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -27,8 +27,8 @@ static u32 _rtl92e_ethtool_get_link(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- return ((priv->rtllib->state == RTLLIB_LINKED) ||
- (priv->rtllib->state == RTLLIB_LINKED_SCANNING));
+ return ((priv->rtllib->link_state == MAC80211_LINKED) ||
+ (priv->rtllib->link_state == MAC80211_LINKED_SCANNING));
}
const struct ethtool_ops rtl819x_ethtool_ops = {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 9c80dc1b6e12..d124b5eee0cc 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -9,7 +9,6 @@
#include "r8190P_rtl8256.h"
#include "rtl_pm.h"
-
int rtl92e_suspend(struct device *dev_d)
{
struct net_device *dev = dev_get_drvdata(dev_d);
@@ -62,7 +61,6 @@ int rtl92e_resume(struct device *dev_d)
netdev_info(dev, "================>r8192E resume call.\n");
-
pci_read_config_dword(pdev, 0x40, &val);
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 5a1cd22f5e25..1c49d5da68eb 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -118,7 +118,7 @@ void rtl92e_ips_enter(struct net_device *dev)
rt_state = priv->rtllib->rf_power_state;
if (rt_state == rf_on && !psc->bSwRfProcessing &&
- (priv->rtllib->state != RTLLIB_LINKED) &&
+ (priv->rtllib->link_state != MAC80211_LINKED) &&
(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
psc->eInactivePowerState = rf_off;
_rtl92e_ps_update_rf_state(dev);
@@ -209,7 +209,7 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
&priv->rtllib->pwr_save_ctrl;
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
- (priv->rtllib->state == RTLLIB_LINKED))
+ (priv->rtllib->link_state == MAC80211_LINKED))
|| (priv->rtllib->iw_mode == IW_MODE_ADHOC) ||
(priv->rtllib->iw_mode == IW_MODE_MASTER))
return;
@@ -217,14 +217,11 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
if (psc->bLeisurePs) {
if (psc->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
- if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
- if (priv->rtllib->SetFwCmdHandler)
- priv->rtllib->SetFwCmdHandler(dev, FW_CMD_LPS_ENTER);
- _rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST |
- RTLLIB_PS_UNICAST);
- }
- } else
+ if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
+ _rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
+ } else {
psc->LpsIdleCount++;
+ }
}
}
@@ -235,10 +232,7 @@ void rtl92e_leisure_ps_leave(struct net_device *dev)
&priv->rtllib->pwr_save_ctrl;
if (psc->bLeisurePs) {
- if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
+ if (priv->rtllib->ps != RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED);
- if (priv->rtllib->SetFwCmdHandler)
- priv->rtllib->SetFwCmdHandler(dev, FW_CMD_LPS_LEAVE);
- }
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index cb28288a618b..88975dc804c6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -169,7 +169,7 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
priv->ps_force = false;
psc->bLeisurePs = true;
} else {
- if (priv->rtllib->state == RTLLIB_LINKED)
+ if (priv->rtllib->link_state == MAC80211_LINKED)
rtl92e_leisure_ps_leave(dev);
priv->ps_force = true;
@@ -311,10 +311,6 @@ static int _rtl92e_wx_get_range(struct net_device *dev,
/* ~130 Mb/s real (802.11n) */
range->throughput = 130 * 1000 * 1000;
- if (priv->rf_set_sens != NULL)
- /* signal level threshold range */
- range->sensitivity = priv->max_sens;
-
range->max_qual.qual = 100;
range->max_qual.level = 0;
range->max_qual.noise = 0;
@@ -346,9 +342,11 @@ static int _rtl92e_wx_get_range(struct net_device *dev,
for (i = 0, val = 0; i < 14; i++) {
if ((priv->rtllib->active_channel_map)[i + 1]) {
+ s32 freq_khz;
+
range->freq[val].i = i + 1;
- range->freq[val].m = rtllib_wlan_frequencies[i] *
- 100000;
+ freq_khz = ieee80211_channel_to_freq_khz(i + 1, NL80211_BAND_2GHZ);
+ range->freq[val].m = freq_khz * 100;
range->freq[val].e = 1;
val++;
}
@@ -377,10 +375,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
int ret;
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
- if ((ieee->state >= RTLLIB_ASSOCIATING) &&
- (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED))
+ if ((ieee->link_state >= RTLLIB_ASSOCIATING) &&
+ (ieee->link_state <= RTLLIB_ASSOCIATING_AUTHENTICATED))
return 0;
- if ((priv->rtllib->state == RTLLIB_LINKED) &&
+ if ((priv->rtllib->link_state == MAC80211_LINKED) &&
(priv->rtllib->CntAfterLink < 2))
return 0;
}
@@ -393,7 +391,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
- if (priv->rtllib->link_detect_info.bBusyTraffic == true)
+ if (priv->rtllib->link_detect_info.bBusyTraffic)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
@@ -411,7 +409,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
priv->rtllib->FirstIe_InScan = true;
- if (priv->rtllib->state != RTLLIB_LINKED) {
+ if (priv->rtllib->link_state != MAC80211_LINKED) {
if (rt_state == rf_off) {
if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
@@ -425,10 +423,6 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
mutex_unlock(&priv->rtllib->ips_mutex);
}
rtllib_stop_scan(priv->rtllib);
- if (priv->rtllib->LedControlHandler)
- priv->rtllib->LedControlHandler(dev,
- LED_CTL_SITE_SURVEY);
-
if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
@@ -574,9 +568,9 @@ static int _rtl92e_wx_set_frag(struct net_device *dev,
if (priv->hw_radio_off)
return 0;
- if (wrqu->frag.disabled)
+ if (wrqu->frag.disabled) {
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
- else {
+ } else {
if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
wrqu->frag.value > MAX_FRAG_THRESHOLD)
return -EINVAL;
@@ -807,45 +801,6 @@ static int _rtl92e_wx_get_retry(struct net_device *dev,
return 0;
}
-static int _rtl92e_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- if (priv->rf_set_sens == NULL)
- return -1; /* we have not this support for this radio */
- wrqu->sens.value = priv->sens;
- return 0;
-}
-
-static int _rtl92e_wx_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- short err = 0;
-
- if (priv->hw_radio_off)
- return 0;
-
- mutex_lock(&priv->wx_mutex);
- if (priv->rf_set_sens == NULL) {
- err = -1; /* we have not this support for this radio */
- goto exit;
- }
- if (priv->rf_set_sens(dev, wrqu->sens.value) == 0)
- priv->sens = wrqu->sens.value;
- else
- err = -EINVAL;
-
-exit:
- mutex_unlock(&priv->wx_mutex);
-
- return err;
-}
-
static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -1066,8 +1021,6 @@ static iw_handler r8192_wx_handlers[] = {
[IW_IOCTL(SIOCGIWFREQ)] = _rtl92e_wx_get_freq,
[IW_IOCTL(SIOCSIWMODE)] = _rtl92e_wx_set_mode,
[IW_IOCTL(SIOCGIWMODE)] = _rtl92e_wx_get_mode,
- [IW_IOCTL(SIOCSIWSENS)] = _rtl92e_wx_set_sens,
- [IW_IOCTL(SIOCGIWSENS)] = _rtl92e_wx_get_sens,
[IW_IOCTL(SIOCGIWRANGE)] = _rtl92e_wx_get_range,
[IW_IOCTL(SIOCSIWAP)] = _rtl92e_wx_set_wap,
[IW_IOCTL(SIOCGIWAP)] = _rtl92e_wx_get_wap,
@@ -1171,7 +1124,7 @@ static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev)
int tmp_qual = 0;
int tmp_noise = 0;
- if (ieee->state < RTLLIB_LINKED) {
+ if (ieee->link_state < MAC80211_LINKED) {
wstats->qual.qual = 10;
wstats->qual.level = 0;
wstats->qual.noise = 0x100 - 100; /* -100 dBm */
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index f4e9fa849796..2bbd01048561 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -24,7 +24,6 @@ enum ht_extchnl_offset {
};
struct ht_capab_ele {
-
u8 AdvCoding:1;
u8 ChlWidth:1;
u8 MimoPwrSave:2;
@@ -46,7 +45,6 @@ struct ht_capab_ele {
u8 MCS[16];
-
u16 ExtHTCapInfo;
u8 TxBFCap[4];
@@ -55,7 +53,6 @@ struct ht_capab_ele {
} __packed;
-
struct ht_info_ele {
u8 ControlChl;
@@ -94,72 +91,54 @@ enum ht_aggre_mode {
HT_AGG_FORCE_DISABLE = 2,
};
-
struct rt_hi_throughput {
- u8 enable_ht;
- u8 bCurrentHTSupport;
-
- u8 bRegBW40MHz;
- u8 bCurBW40MHz;
-
- u8 bRegShortGI40MHz;
- u8 bCurShortGI40MHz;
-
- u8 bRegShortGI20MHz;
- u8 bCurShortGI20MHz;
-
- u8 bRegSuppCCK;
- u8 bCurSuppCCK;
-
+ u8 enable_ht;
+ u8 bCurrentHTSupport;
+ u8 bRegBW40MHz;
+ u8 bCurBW40MHz;
+ u8 bRegShortGI40MHz;
+ u8 bCurShortGI40MHz;
+ u8 bRegShortGI20MHz;
+ u8 bCurShortGI20MHz;
+ u8 bRegSuppCCK;
+ u8 bCurSuppCCK;
enum ht_spec_ver ePeerHTSpecVer;
-
-
struct ht_capab_ele SelfHTCap;
struct ht_info_ele SelfHTInfo;
-
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
-
- u8 bAMSDU_Support;
- u16 nAMSDU_MaxSize;
- u8 bCurrent_AMSDU_Support;
- u16 nCurrent_AMSDU_MaxSize;
-
- u8 bAMPDUEnable;
- u8 bCurrentAMPDUEnable;
- u8 AMPDU_Factor;
- u8 CurrentAMPDUFactor;
- u8 MPDU_Density;
+ u8 PeerHTCapBuf[32];
+ u8 PeerHTInfoBuf[32];
+ u8 bAMSDU_Support;
+ u16 nAMSDU_MaxSize;
+ u8 bCurrent_AMSDU_Support;
+ u16 nCurrent_AMSDU_MaxSize;
+ u8 bAMPDUEnable;
+ u8 bCurrentAMPDUEnable;
+ u8 AMPDU_Factor;
+ u8 CurrentAMPDUFactor;
+ u8 MPDU_Density;
u8 current_mpdu_density;
-
enum ht_aggre_mode ForcedAMPDUMode;
u8 forced_ampdu_factor;
u8 forced_mpdu_density;
-
enum ht_aggre_mode ForcedAMSDUMode;
u8 forced_short_gi;
-
u8 current_op_mode;
-
u8 self_mimo_ps;
u8 peer_mimo_ps;
-
enum ht_extchnl_offset CurSTAExtChnlOffset;
u8 cur_tx_bw40mhz;
u8 sw_bw_in_progress;
u8 reg_rt2rt_aggregation;
- u8 RT2RT_HT_Mode;
+ u8 RT2RT_HT_Mode;
u8 current_rt2rt_aggregation;
u8 current_rt2rt_long_slot_time;
u8 sz_rt2rt_agg_buf[10];
-
u8 reg_rx_reorder_enable;
u8 cur_rx_reorder_enable;
u8 rx_reorder_win_size;
u8 rx_reorder_pending_time;
u16 rx_reorder_drop_counter;
- u8 IOTPeer;
+ u8 IOTPeer;
u32 iot_action;
u8 iot_ra_func;
} __packed;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index fe30a291e64c..f9fa3f2bb728 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -424,14 +424,12 @@ static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
}
switch (ieee->mode) {
- case IEEE_A:
- case IEEE_B:
- case IEEE_G:
+ case WIRELESS_MODE_B:
+ case WIRELESS_MODE_G:
for (i = 0; i <= 15; i++)
pOperateMCS[i] = 0;
break;
- case IEEE_N_24G:
- case IEEE_N_5G:
+ case WIRELESS_MODE_N_24G:
pOperateMCS[0] &= RATE_ADPT_1SS_MASK;
pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
@@ -835,11 +833,11 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40,
+ ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20_40,
ht_info->CurSTAExtChnlOffset);
} else {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
+ ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index c010eb0d6036..0499711ea2bf 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -104,7 +104,6 @@ struct acm {
};
union qos_tclas {
-
struct _TYPE_GENERAL {
u8 Priority;
u8 ClassifierType;
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index 4aa9b12a2dd5..37760d0bc35d 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -53,6 +53,4 @@ struct rx_ts_record {
u8 num;
};
-
-
#endif
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 6e665e866f1f..e3ce4431d460 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <net/cfg80211.h>
#include <net/lib80211.h>
#define MAX_PRECMD_CNT 16
@@ -139,7 +140,7 @@ struct cb_desc {
u8 rata_index;
u8 queue_index;
u16 txbuf_size;
- u8 RATRIndex;
+ u8 ratr_index;
u8 bAMSDU:1;
u8 bFromAggrQ:1;
u8 reserved6:6;
@@ -308,9 +309,7 @@ enum rt_op_mode {
};
#define aSifsTime \
- (((priv->rtllib->current_network.mode == IEEE_A) \
- || (priv->rtllib->current_network.mode == IEEE_N_24G) \
- || (priv->rtllib->current_network.mode == IEEE_N_5G)) ? 16 : 10)
+ ((priv->rtllib->current_network.mode == WIRELESS_MODE_N_24G) ? 16 : 10)
#define MGMT_QUEUE_NUM 5
@@ -421,17 +420,6 @@ enum init_gain_op_type {
IG_Max
};
-enum led_ctl_mode {
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
-};
-
enum wireless_mode {
WIRELESS_MODE_UNKNOWN = 0x00,
WIRELESS_MODE_A = 0x01,
@@ -439,7 +427,6 @@ enum wireless_mode {
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20
};
#ifndef ETH_P_PAE
@@ -499,9 +486,6 @@ enum _REG_PREAMBLE_MODE {
#define RTLLIB_CCK_MODULATION (1<<0)
#define RTLLIB_OFDM_MODULATION (1<<1)
-#define RTLLIB_24GHZ_BAND (1<<0)
-#define RTLLIB_52GHZ_BAND (1<<1)
-
#define RTLLIB_CCK_RATE_LEN 4
#define RTLLIB_CCK_RATE_1MB 0x02
#define RTLLIB_CCK_RATE_2MB 0x04
@@ -548,10 +532,8 @@ struct rtllib_rx_stats {
u8 signal;
u8 noise;
u16 rate; /* in 100 kbps */
- u8 received_channel;
u8 control;
u8 mask;
- u8 freq;
u16 len;
u64 tsf;
u32 beacon_time;
@@ -1136,10 +1118,9 @@ struct rtllib_network {
struct list_head list;
};
-enum rtllib_state {
-
+enum rtl_link_state {
/* the card is not linked at all */
- RTLLIB_NOLINK = 0,
+ MAC80211_NOLINK = 0,
/* RTLLIB_ASSOCIATING* are for BSS client mode
* the driver shall not perform RX filtering unless
@@ -1164,14 +1145,14 @@ enum rtllib_state {
/* the link is ok. the card associated to a BSS or linked
* to a ibss cell or acting as an AP and creating the bss
*/
- RTLLIB_LINKED,
+ MAC80211_LINKED,
/* same as LINKED, but the driver shall apply RX filter
* rules as we are in NO_LINK mode. As the card is still
* logically linked, but it is doing a syncro site survey
* then it will be back to LINKED state.
*/
- RTLLIB_LINKED_SCANNING,
+ MAC80211_LINKED_SCANNING,
};
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
@@ -1298,7 +1279,6 @@ enum fw_cmd_io_type {
#define RT_MAX_LD_SLOT_NUM 10
struct rt_link_detect {
-
u32 NumRecvBcnInPeriod;
u32 NumRecvDataInPeriod;
@@ -1316,7 +1296,6 @@ struct rt_link_detect {
};
struct sw_cam_table {
-
u8 macaddr[ETH_ALEN];
bool bused;
u8 key_buf[16];
@@ -1509,7 +1488,7 @@ struct rtllib_device {
*/
struct rtllib_network current_network;
- enum rtllib_state state;
+ enum rtl_link_state link_state;
int short_slot;
int mode; /* A, B, G */
@@ -1651,17 +1630,6 @@ struct rtllib_device {
};
/* Callback functions */
- void (*set_security)(struct net_device *dev,
- struct rtllib_security *sec);
-
- /* Used to TX data frame by using txb structs.
- * this is not used if in the softmac_features
- * is set the flag IEEE_SOFTMAC_TX_QUEUE
- */
- int (*hard_start_xmit)(struct rtllib_txb *txb,
- struct net_device *dev);
-
- int (*reset_port)(struct net_device *dev);
/* Softmac-generated frames (management) are TXed via this
* callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
@@ -1682,24 +1650,12 @@ struct rtllib_device {
void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
struct net_device *dev, int rate);
- /* stops the HW queue for DATA frames. Useful to avoid
- * waste time to TX data frame when we are reassociating
- * This function can sleep.
- */
- void (*data_hard_stop)(struct net_device *dev);
-
- /* OK this is complementing to data_poll_hard_stop */
- void (*data_hard_resume)(struct net_device *dev);
-
/* ask to the driver to retune the radio.
* This function can sleep. the driver should ensure
* the radio has been switched before return.
*/
void (*set_chan)(struct net_device *dev, short ch);
- void (*rtllib_start_hw_scan)(struct net_device *dev);
- void (*rtllib_stop_hw_scan)(struct net_device *dev);
-
/* indicate the driver that the link state is changed
* for example it may indicate the card is associated now.
* Driver might be interested in this to apply RX filter
@@ -1729,22 +1685,16 @@ struct rtllib_device {
/* check whether Tx hw resource available */
short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
- void (*SetBWModeHandler)(struct net_device *dev,
- enum ht_channel_width bandwidth,
- enum ht_extchnl_offset Offset);
+ void (*set_bw_mode_handler)(struct net_device *dev,
+ enum ht_channel_width bandwidth,
+ enum ht_extchnl_offset Offset);
bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
- void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
+ void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode);
bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
- void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
- bool (*SetFwCmdHandler)(struct net_device *dev,
- enum fw_cmd_io_type FwCmdIO);
- void (*UpdateBeaconInterruptHandler)(struct net_device *dev,
- bool start);
+ void (*init_gain_handler)(struct net_device *dev, u8 Operation);
void (*ScanOperationBackupHandler)(struct net_device *dev,
u8 Operation);
- void (*LedControlHandler)(struct net_device *dev,
- enum led_ctl_mode LedAction);
void (*SetHwRegHandler)(struct net_device *dev, u8 variable, u8 *val);
void (*AllowAllDestAddrHandler)(struct net_device *dev,
@@ -1752,7 +1702,7 @@ struct rtllib_device {
void (*rtllib_ips_leave_wq)(struct net_device *dev);
void (*rtllib_ips_leave)(struct net_device *dev);
- void (*LeisurePSLeave)(struct net_device *dev);
+ void (*leisure_ps_leave)(struct net_device *dev);
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_rtllib
@@ -1760,12 +1710,7 @@ struct rtllib_device {
u8 priv[];
};
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_MODE_MASK (WIRELESS_MODE_B | WIRELESS_MODE_G)
/* Generate a 802.11 header */
@@ -2080,8 +2025,6 @@ void TsStartAddBaProcess(struct rtllib_device *ieee,
void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
void RemoveAllTS(struct rtllib_device *ieee);
-extern const long rtllib_wlan_frequencies[];
-
static inline const char *escape_essid(const char *essid, u8 essid_len)
{
static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index b649d02dc5c8..91dd3c373aef 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -225,18 +225,6 @@ rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
return 0;
}
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation
- * Ethernet-II snap header (RFC1042 for most EtherTypes)
- */
-static unsigned char rfc1042_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
-};
-
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
-};
-
/* No encapsulation header if EtherType < 0x600 (=length) */
/* Called by rtllib_rx_frame_decrypt */
@@ -264,8 +252,9 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
RTLLIB_FCTL_FROMDS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
/* FromDS frame with own addr as DA */
- } else
+ } else {
return 0;
+ }
if (skb->len < 24 + 8)
return 0;
@@ -433,8 +422,9 @@ static int is_duplicate_packet(struct rtllib_device *ieee,
if (*last_frag + 1 != frag)
/* out-of-order fragment */
goto drop;
- } else
+ } else {
*last_seq = seq;
+ }
*last_frag = frag;
*last_time = jiffies;
@@ -1206,11 +1196,11 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
u8 nr_subframes)
{
if (unicast) {
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
(ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
- ieee->LeisurePSLeave(ieee->dev);
+ ieee->leisure_ps_leave(ieee->dev);
}
}
}
@@ -2127,7 +2117,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
network->tim.tim_period = info_element->data[1];
network->dtim_period = info_element->data[1];
- if (ieee->state != RTLLIB_LINKED)
+ if (ieee->link_state != MAC80211_LINKED)
break;
network->last_dtim_sta_time = jiffies;
@@ -2311,11 +2301,7 @@ static inline int rtllib_network_init(
network->CountryIeLen = 0;
memset(network->CountryIeBuf, 0, MAX_IE_LEN);
HTInitializeBssDesc(&network->bssht);
- if (stats->freq == RTLLIB_52GHZ_BAND) {
- /* for A band (No DS info) */
- network->channel = stats->received_channel;
- } else
- network->flags |= NETWORK_HAS_CCK;
+ network->flags |= NETWORK_HAS_CCK;
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
@@ -2329,14 +2315,11 @@ static inline int rtllib_network_init(
return 1;
network->mode = 0;
- if (stats->freq == RTLLIB_52GHZ_BAND)
- network->mode = IEEE_A;
- else {
- if (network->flags & NETWORK_HAS_OFDM)
- network->mode |= IEEE_G;
- if (network->flags & NETWORK_HAS_CCK)
- network->mode |= IEEE_B;
- }
+
+ if (network->flags & NETWORK_HAS_OFDM)
+ network->mode |= WIRELESS_MODE_G;
+ if (network->flags & NETWORK_HAS_CCK)
+ network->mode |= WIRELESS_MODE_B;
if (network->mode == 0) {
netdev_dbg(ieee->dev, "Filtered out '%s (%pM)' network.\n",
@@ -2346,10 +2329,8 @@ static inline int rtllib_network_init(
}
if (network->bssht.bd_support_ht) {
- if (network->mode == IEEE_A)
- network->mode = IEEE_N_5G;
- else if (network->mode & (IEEE_G | IEEE_B))
- network->mode = IEEE_N_24G;
+ if (network->mode & (WIRELESS_MODE_G | WIRELESS_MODE_B))
+ network->mode = WIRELESS_MODE_N_24G;
}
if (rtllib_is_empty_essid(network->ssid, network->ssid_len))
network->flags |= NETWORK_EMPTY_ESSID;
@@ -2595,8 +2576,8 @@ static inline void rtllib_process_probe_response(
if (is_same_network(&ieee->current_network, network,
(network->ssid_len ? 1 : 0))) {
update_network(ieee, &ieee->current_network, network);
- if ((ieee->current_network.mode == IEEE_N_24G ||
- ieee->current_network.mode == IEEE_G) &&
+ if ((ieee->current_network.mode == WIRELESS_MODE_N_24G ||
+ ieee->current_network.mode == WIRELESS_MODE_G) &&
ieee->current_network.berp_info_valid) {
if (ieee->current_network.erp_value & ERP_UseProtection)
ieee->current_network.buseprotection = true;
@@ -2604,7 +2585,7 @@ static inline void rtllib_process_probe_response(
ieee->current_network.buseprotection = false;
}
if (is_beacon(frame_ctl)) {
- if (ieee->state >= RTLLIB_LINKED)
+ if (ieee->link_state >= MAC80211_LINKED)
ieee->link_detect_info.NumRecvBcnInPeriod++;
}
}
@@ -2662,7 +2643,7 @@ static inline void rtllib_process_probe_response(
|| ((ieee->current_network.ssid_len == network->ssid_len) &&
(strncmp(ieee->current_network.ssid, network->ssid,
network->ssid_len) == 0) &&
- (ieee->state == RTLLIB_NOLINK))))
+ (ieee->link_state == MAC80211_NOLINK))))
renew = 1;
update_network(ieee, target, network);
if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
@@ -2673,7 +2654,7 @@ static inline void rtllib_process_probe_response(
if (is_beacon(frame_ctl) &&
is_same_network(&ieee->current_network, network,
(network->ssid_len ? 1 : 0)) &&
- (ieee->state == RTLLIB_LINKED)) {
+ (ieee->link_state == MAC80211_LINKED)) {
ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network);
}
free_network:
@@ -2702,7 +2683,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
- ieee->state == RTLLIB_LINKED))
+ ieee->link_state == MAC80211_LINKED))
schedule_work(&ieee->ps_task);
break;
@@ -2719,7 +2700,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
((ieee->iw_mode == IW_MODE_ADHOC ||
ieee->iw_mode == IW_MODE_MASTER) &&
- ieee->state == RTLLIB_LINKED))
+ ieee->link_state == MAC80211_LINKED))
rtllib_rx_probe_rq(ieee, skb);
break;
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b9886e83a6dc..425d4acbcdf0 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -22,7 +22,6 @@
static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
-
static short rtllib_is_54g(struct rtllib_network *net)
{
return (net->rates_ex_len > 0) || (net->rates_len > 4);
@@ -139,7 +138,6 @@ static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
*/
ieee->mgmt_queue_head = nh;
ieee->mgmt_queue_ring[nh] = skb;
-
}
static void init_mgmt_queue(struct rtllib_device *ieee)
@@ -147,16 +145,14 @@ static void init_mgmt_queue(struct rtllib_device *ieee)
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
-
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
{
u16 i;
u8 QueryRate = 0;
u8 BasicRate;
-
for (i = 0; i < ieee->current_network.rates_len; i++) {
- BasicRate = ieee->current_network.rates[i]&0x7F;
+ BasicRate = ieee->current_network.rates[i] & 0x7F;
if (!rtllib_is_cck_rate(BasicRate)) {
if (QueryRate == 0) {
QueryRate = BasicRate;
@@ -185,9 +181,7 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
rate = ieee->basic_rate & 0x7f;
if (rate == 0) {
- if (ieee->mode == IEEE_A ||
- ieee->mode == IEEE_N_5G ||
- (ieee->mode == IEEE_N_24G && !ht_info->bCurSuppCCK))
+ if (ieee->mode == WIRELESS_MODE_N_24G && !ht_info->bCurSuppCCK)
rate = 0x0c;
else
rate = 0x02;
@@ -219,14 +213,14 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
tcb_desc->queue_index = HIGH_QUEUE;
tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
if (single) {
if (ieee->queue_stop) {
enqueue_mgmt(ieee, skb);
} else {
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -286,7 +280,6 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
-
if (stype != RTLLIB_STYPE_PSPOLL)
tcb_desc->queue_index = MGNT_QUEUE;
else
@@ -295,9 +288,8 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
-
tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
if (single) {
@@ -308,7 +300,6 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
-
}
/* avoid watchdog triggers */
ieee->softmac_data_hard_start_xmit(skb, ieee->dev,
@@ -324,7 +315,6 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
ieee->seq_ctrl[0]++;
}
ieee->softmac_hard_start_xmit(skb, ieee->dev);
-
}
}
@@ -388,7 +378,6 @@ static void rtllib_send_beacon(struct rtllib_device *ieee)
(msecs_to_jiffies(ieee->current_network.beacon_interval - 5)));
}
-
static void rtllib_send_beacon_cb(struct timer_list *t)
{
struct rtllib_device *ieee =
@@ -411,7 +400,6 @@ void rtllib_EnableNetMonitorMode(struct net_device *dev,
ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
}
-
/* Disables network monitor mode. Only packets destinated to
* us will be received.
*/
@@ -425,7 +413,6 @@ void rtllib_DisableNetMonitorMode(struct net_device *dev,
ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
}
-
/* Enables the specialized promiscuous mode required by Intel.
* In this mode, Intel intends to hear traffics from/to other STAs in the
* same BSS. Therefore we don't have to disable checking BSSID and we only need
@@ -449,7 +436,6 @@ void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
}
EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
-
/* Disables the specialized promiscuous mode required by Intel.
* See MgntEnableIntelPromiscuousMode for detail.
*/
@@ -481,7 +467,6 @@ static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
}
}
-
static void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
{
if (ieee->active_scan && (ieee->softmac_features &
@@ -494,7 +479,7 @@ static void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
{
memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
- MAX_CHANNEL_NUMBER+1);
+ MAX_CHANNEL_NUMBER + 1);
}
/* this performs syncro scan blocking the caller until all channels
@@ -523,12 +508,12 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
* performing a complete syncro scan before conclude
* there are no interesting cell and to create a
* new one. In this case the link state is
- * RTLLIB_NOLINK until we found an interesting cell.
+ * MAC80211_NOLINK until we found an interesting cell.
* If so the ieee8021_new_net, called by the RX path
- * will set the state to RTLLIB_LINKED, so we stop
+ * will set the state to MAC80211_LINKED, so we stop
* scanning
* 2- We are linked and the root uses run iwlist scan.
- * So we switch to RTLLIB_LINKED_SCANNING to remember
+ * So we switch to MAC80211_LINKED_SCANNING to remember
* that we are still logically linked (not interested in
* new network events, despite for updating the net list,
* but we are temporarly 'unlinked' as the driver shall
@@ -537,7 +522,7 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
* if the state become LINKED because of the #1 situation
*/
- if (ieee->state == RTLLIB_LINKED)
+ if (ieee->link_state == MAC80211_LINKED)
goto out;
if (ieee->sync_scan_hurryup) {
netdev_info(ieee->dev,
@@ -558,7 +543,7 @@ out:
ieee->actscanning = false;
ieee->sync_scan_hurryup = 0;
- if (ieee->state >= RTLLIB_LINKED) {
+ if (ieee->link_state >= MAC80211_LINKED) {
if (IS_DOT11D_ENABLE(ieee))
dot11d_scan_complete(ieee);
}
@@ -629,8 +614,6 @@ out1:
mutex_unlock(&ieee->scan_mutex);
}
-
-
static void rtllib_beacons_start(struct rtllib_device *ieee)
{
unsigned long flags;
@@ -653,10 +636,8 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
del_timer_sync(&ieee->beacon_timer);
-
}
-
void rtllib_stop_send_beacons(struct rtllib_device *ieee)
{
ieee->stop_send_beacons(ieee->dev);
@@ -665,7 +646,6 @@ void rtllib_stop_send_beacons(struct rtllib_device *ieee)
}
EXPORT_SYMBOL(rtllib_stop_send_beacons);
-
void rtllib_start_send_beacons(struct rtllib_device *ieee)
{
ieee->start_send_beacons(ieee->dev);
@@ -674,7 +654,6 @@ void rtllib_start_send_beacons(struct rtllib_device *ieee)
}
EXPORT_SYMBOL(rtllib_start_send_beacons);
-
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
{
mutex_lock(&ieee->scan_mutex);
@@ -682,32 +661,24 @@ static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
if (ieee->scanning_continue == 1) {
ieee->scanning_continue = 0;
ieee->actscanning = false;
-
+ mutex_unlock(&ieee->scan_mutex);
cancel_delayed_work_sync(&ieee->softmac_scan_wq);
+ } else {
+ mutex_unlock(&ieee->scan_mutex);
}
-
- mutex_unlock(&ieee->scan_mutex);
}
void rtllib_stop_scan(struct rtllib_device *ieee)
{
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
rtllib_softmac_stop_scan(ieee);
- } else {
- if (ieee->rtllib_stop_hw_scan)
- ieee->rtllib_stop_hw_scan(ieee->dev);
- }
}
EXPORT_SYMBOL(rtllib_stop_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
{
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
ieee->sync_scan_hurryup = 1;
- } else {
- if (ieee->rtllib_stop_hw_scan)
- ieee->rtllib_stop_hw_scan(ieee->dev);
- }
}
EXPORT_SYMBOL(rtllib_stop_scan_syncro);
@@ -739,9 +710,6 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
ieee->scanning_continue = 1;
schedule_delayed_work(&ieee->softmac_scan_wq, 0);
}
- } else {
- if (ieee->rtllib_start_hw_scan)
- ieee->rtllib_start_hw_scan(ieee->dev);
}
}
@@ -753,12 +721,8 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
RESET_CIE_WATCHDOG(ieee);
}
ieee->sync_scan_hurryup = 0;
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
rtllib_softmac_scan_syncro(ieee, is_mesh);
- } else {
- if (ieee->rtllib_start_hw_scan)
- ieee->rtllib_start_hw_scan(ieee->dev);
- }
}
EXPORT_SYMBOL(rtllib_start_scan_syncro);
@@ -817,7 +781,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
- int rate_len = ieee->current_network.rates_len+2;
+ int rate_len = ieee->current_network.rates_len + 2;
int rate_ex_len = ieee->current_network.rates_ex_len;
int wpa_ie_len = ieee->wpa_ie_len;
u8 erpinfo_content = 0;
@@ -838,15 +802,16 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
else
atim_len = 0;
- if ((ieee->current_network.mode == IEEE_G) ||
- (ieee->current_network.mode == IEEE_N_24G &&
+ if ((ieee->current_network.mode == WIRELESS_MODE_G) ||
+ (ieee->current_network.mode == WIRELESS_MODE_N_24G &&
ieee->ht_info->bCurSuppCCK)) {
erp_len = 3;
erpinfo_content = 0;
if (ieee->current_network.buseprotection)
erpinfo_content |= ERP_UseProtection;
- } else
+ } else {
erp_len = 0;
+ }
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
@@ -870,7 +835,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
}
}
- beacon_size = sizeof(struct rtllib_probe_response)+2+
+ beacon_size = sizeof(struct rtllib_probe_response) + 2 +
ssid_len + 3 + rate_len + rate_ex_len + atim_len + erp_len
+ wpa_ie_len + ieee->tx_headroom;
skb = dev_alloc_skb(beacon_size);
@@ -903,7 +868,6 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
beacon_buf->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_RESP);
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
@@ -915,9 +879,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
tag += ssid_len;
*(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len-2;
- memcpy(tag, ieee->current_network.rates, rate_len-2);
- tag += rate_len-2;
+ *(tag++) = rate_len - 2;
+ memcpy(tag, ieee->current_network.rates, rate_len - 2);
+ tag += rate_len - 2;
*(tag++) = MFIE_TYPE_DS_SET;
*(tag++) = 1;
@@ -939,9 +903,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
}
if (rate_ex_len) {
*(tag++) = MFIE_TYPE_RATES_EX;
- *(tag++) = rate_ex_len-2;
- memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
- tag += rate_ex_len-2;
+ *(tag++) = rate_ex_len - 2;
+ memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
+ tag += rate_ex_len - 2;
}
if (wpa_ie_len) {
@@ -982,7 +946,6 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS);
-
if (ieee->short_slot)
assoc->capability |=
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
@@ -1037,8 +1000,6 @@ static struct sk_buff *rtllib_auth_resp(struct rtllib_device *ieee, int status,
ether_addr_copy(auth->header.addr1, dest);
auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
return skb;
-
-
}
static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
@@ -1046,7 +1007,7 @@ static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
struct sk_buff *skb;
struct rtllib_hdr_3addr *hdr;
- skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr)+ieee->tx_headroom);
+ skb = dev_alloc_skb(sizeof(struct rtllib_hdr_3addr) + ieee->tx_headroom);
if (!skb)
return NULL;
@@ -1063,8 +1024,6 @@ static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
(pwr ? RTLLIB_FCTL_PM : 0));
return skb;
-
-
}
static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
@@ -1072,7 +1031,7 @@ static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
struct sk_buff *skb;
struct rtllib_pspoll_hdr *hdr;
- skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr)+ieee->tx_headroom);
+ skb = dev_alloc_skb(sizeof(struct rtllib_pspoll_hdr) + ieee->tx_headroom);
if (!skb)
return NULL;
@@ -1088,7 +1047,6 @@ static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
RTLLIB_FCTL_PM);
return skb;
-
}
static void rtllib_resp_to_assoc_rq(struct rtllib_device *ieee, u8 *dest)
@@ -1099,7 +1057,6 @@ static void rtllib_resp_to_assoc_rq(struct rtllib_device *ieee, u8 *dest)
softmac_mgmt_xmit(buf, ieee);
}
-
static void rtllib_resp_to_auth(struct rtllib_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = rtllib_auth_resp(ieee, s, dest);
@@ -1108,7 +1065,6 @@ static void rtllib_resp_to_auth(struct rtllib_device *ieee, int s, u8 *dest)
softmac_mgmt_xmit(buf, ieee);
}
-
static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
{
struct sk_buff *buf = rtllib_probe_resp(ieee, dest);
@@ -1117,7 +1073,6 @@ static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
softmac_mgmt_xmit(buf, ieee);
}
-
static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
@@ -1195,11 +1150,11 @@ rtllib_association_req(struct rtllib_network *beacon,
}
if (beacon->bCkipSupported)
- ckip_ie_len = 30+2;
+ ckip_ie_len = 30 + 2;
if (beacon->bCcxRmEnable)
- ccxrm_ie_len = 6+2;
+ ccxrm_ie_len = 6 + 2;
if (beacon->BssCcxVerNumber >= 2)
- cxvernum_ie_len = 5+2;
+ cxvernum_ie_len = 5 + 2;
PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
if (PMKCacheIdx >= 0) {
@@ -1230,7 +1185,6 @@ rtllib_association_req(struct rtllib_network *beacon,
hdr = skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
-
hdr->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_REQ);
hdr->header.duration_id = cpu_to_le16(37);
ether_addr_copy(hdr->header.addr1, beacon->bssid);
@@ -1247,10 +1201,9 @@ rtllib_association_req(struct rtllib_network *beacon,
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
if (ieee->short_slot &&
- (beacon->capability&WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ (beacon->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME))
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
-
hdr->listen_interval = cpu_to_le16(beacon->listen_interval);
hdr->info_element[0].id = MFIE_TYPE_SSID;
@@ -1286,7 +1239,7 @@ rtllib_association_req(struct rtllib_network *beacon,
sizeof(AironetIeOui));
osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |=
- (SUPPORT_CKIP_PK|SUPPORT_CKIP_MIC);
+ (SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
tag = skb_put(skb, ckip_ie_len);
*tag++ = MFIE_TYPE_AIRONET;
*tag++ = osCcxAironetIE.Length;
@@ -1397,7 +1350,7 @@ static void rtllib_associate_abort(struct rtllib_device *ieee)
* Here we will check if there are good nets to associate
* with, so we retry or just get back to NO_LINK and scanning
*/
- if (ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATING) {
+ if (ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATING) {
netdev_dbg(ieee->dev, "Authentication failed\n");
ieee->softmac_stats.no_auth_rs++;
} else {
@@ -1405,7 +1358,7 @@ static void rtllib_associate_abort(struct rtllib_device *ieee)
ieee->softmac_stats.no_ass_rs++;
}
- ieee->state = RTLLIB_ASSOCIATING_RETRY;
+ ieee->link_state = RTLLIB_ASSOCIATING_RETRY;
schedule_delayed_work(&ieee->associate_retry_wq,
RTLLIB_SOFTMAC_ASSOC_RETRY_TIME);
@@ -1431,10 +1384,10 @@ static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
skb = rtllib_authentication_req(beacon, ieee, 0, daddr);
- if (!skb)
+ if (!skb) {
rtllib_associate_abort(ieee);
- else {
- ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATING;
+ } else {
+ ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATING;
netdev_dbg(ieee->dev, "Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
if (!timer_pending(&ieee->associate_timer)) {
@@ -1456,10 +1409,10 @@ static void rtllib_auth_challenge(struct rtllib_device *ieee, u8 *challenge,
skb = rtllib_authentication_req(beacon, ieee, chlen + 2, beacon->bssid);
- if (!skb)
+ if (!skb) {
rtllib_associate_abort(ieee);
- else {
- c = skb_put(skb, chlen+2);
+ } else {
+ c = skb_put(skb, chlen + 2);
*(c++) = MFIE_TYPE_CHALLENGE;
*(c++) = chlen;
memcpy(c, challenge, chlen);
@@ -1471,7 +1424,7 @@ static void rtllib_auth_challenge(struct rtllib_device *ieee, u8 *challenge,
sizeof(struct rtllib_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
}
kfree(challenge);
}
@@ -1487,11 +1440,11 @@ static void rtllib_associate_step2(struct rtllib_device *ieee)
ieee->softmac_stats.tx_ass_rq++;
skb = rtllib_association_req(beacon, ieee);
- if (!skb)
+ if (!skb) {
rtllib_associate_abort(ieee);
- else {
+ } else {
softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
}
}
@@ -1518,7 +1471,7 @@ static void rtllib_associate_complete_wq(void *data)
netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate);
} else {
ieee->rate = 22;
- ieee->SetWirelessMode(ieee->dev, IEEE_B);
+ ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_B);
netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate);
}
if (ieee->ht_info->bCurrentHTSupport && ieee->ht_info->enable_ht) {
@@ -1546,10 +1499,6 @@ static void rtllib_associate_complete_wq(void *data)
netdev_info(ieee->dev, "silent reset associate\n");
ieee->is_silent_reset = false;
}
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
}
static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
@@ -1560,7 +1509,7 @@ static void rtllib_associate_complete(struct rtllib_device *ieee)
{
del_timer_sync(&ieee->associate_timer);
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
rtllib_sta_send_associnfo(ieee);
schedule_work(&ieee->associate_complete_wq);
@@ -1575,9 +1524,6 @@ static void rtllib_associate_procedure_wq(void *data)
ieee->rtllib_ips_leave(ieee->dev);
mutex_lock(&ieee->wx_mutex);
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
-
rtllib_stop_scan(ieee);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
if (ieee->rf_power_state == rf_off) {
@@ -1603,7 +1549,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
/* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
- if (ieee->state != RTLLIB_NOLINK)
+ if (ieee->link_state != MAC80211_NOLINK)
return;
if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability &
@@ -1638,11 +1584,12 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
net->ssid_len = net->hidden_ssid_len;
ssidbroad = 1;
}
- } else
+ } else {
ssidmatch =
(ieee->current_network.ssid_len == net->ssid_len) &&
(!strncmp(ieee->current_network.ssid, net->ssid,
net->ssid_len));
+ }
/* if the user set the AP check if match.
* if the network does not broadcast essid we check the
@@ -1701,10 +1648,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
ieee->ht_info->bCurrentHTSupport =
false;
- ieee->state = RTLLIB_ASSOCIATING;
- if (ieee->LedControlHandler != NULL)
- ieee->LedControlHandler(ieee->dev,
- LED_CTL_START_TO_LINK);
+ ieee->link_state = RTLLIB_ASSOCIATING;
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
} else {
@@ -1712,19 +1656,17 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
(ieee->modulation &
RTLLIB_OFDM_MODULATION)) {
ieee->rate = 108;
- ieee->SetWirelessMode(ieee->dev,
- IEEE_G);
+ ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
netdev_info(ieee->dev,
"Using G rates\n");
} else {
ieee->rate = 22;
- ieee->SetWirelessMode(ieee->dev,
- IEEE_B);
+ ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_B);
netdev_info(ieee->dev,
"Using B rates\n");
}
memset(ieee->dot11ht_oper_rate_set, 0, 16);
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
}
}
}
@@ -1738,12 +1680,11 @@ static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(target, &ieee->network_list, list) {
-
/* if the state become different that NOLINK means
* we had found what we are searching for
*/
- if (ieee->state != RTLLIB_NOLINK)
+ if (ieee->link_state != MAC80211_NOLINK)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned +
@@ -1885,9 +1826,9 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
status_code = le16_to_cpu(response_head->status);
if ((status_code == WLAN_STATUS_ASSOC_DENIED_RATES ||
status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
- ((ieee->mode == IEEE_G) &&
- (ieee->current_network.mode == IEEE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
+ ((ieee->mode == WIRELESS_MODE_G) &&
+ (ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
+ (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
ieee->ht_info->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
ieee->AsocRetryCount = 0;
@@ -1925,7 +1866,6 @@ static inline void rtllib_rx_assoc_rq(struct rtllib_device *ieee,
{
u8 dest[ETH_ALEN];
-
ieee->softmac_stats.rx_ass_rq++;
if (assoc_rq_parse(ieee->dev, skb, dest) != -1)
rtllib_resp_to_assoc_rq(ieee, dest);
@@ -1935,7 +1875,6 @@ static inline void rtllib_rx_assoc_rq(struct rtllib_device *ieee,
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
{
-
struct sk_buff *buf = rtllib_null_func(ieee, pwr);
if (buf)
@@ -2011,7 +1950,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
LPSAwakeIntvl_tmp = period +
(psc->LPSAwakeIntvl -
period) -
- ((psc->LPSAwakeIntvl-period) %
+ ((psc->LPSAwakeIntvl - period) %
period);
else
LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
@@ -2021,7 +1960,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
ieee->current_network.tim.tim_count)
LPSAwakeIntvl_tmp = count +
(psc->LPSAwakeIntvl - count) -
- ((psc->LPSAwakeIntvl-count)%period);
+ ((psc->LPSAwakeIntvl - count) % period);
else
LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
}
@@ -2033,8 +1972,6 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
return 1;
-
-
}
static inline void rtllib_sta_ps(struct work_struct *work)
@@ -2050,7 +1987,7 @@ static inline void rtllib_sta_ps(struct work_struct *work)
if ((ieee->ps == RTLLIB_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != RTLLIB_LINKED)) {
+ ieee->link_state != MAC80211_LINKED)) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
rtllib_sta_wakeup(ieee, 1);
@@ -2073,7 +2010,6 @@ static inline void rtllib_sta_ps(struct work_struct *work)
ieee->ps_time = time;
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
-
}
ieee->bAwakePktSent = false;
@@ -2088,7 +2024,6 @@ static inline void rtllib_sta_ps(struct work_struct *work)
out:
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
@@ -2105,7 +2040,6 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
}
}
return;
-
}
if (ieee->sta_sleep == LPS_IS_SLEEP)
@@ -2207,7 +2141,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
WLAN_FC_GET_STYPE(frame_ctl));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATED &&
+ ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATED &&
(ieee->iw_mode == IW_MODE_INFRA)) {
errcode = assoc_parse(ieee, skb, &aid);
if (!errcode) {
@@ -2217,7 +2151,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
if (!network)
return 1;
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
/* station support qos */
@@ -2286,7 +2220,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
}
if (ieee->open_wep || !challenge) {
- ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATED;
+ ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
@@ -2301,14 +2235,14 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
}
/* Dummy wirless mode setting to avoid encryption issue */
if (bSupportNmode) {
- ieee->SetWirelessMode(ieee->dev,
+ ieee->set_wireless_mode(ieee->dev,
ieee->current_network.mode);
} else {
/*TODO*/
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
+ ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
}
- if ((ieee->current_network.mode == IEEE_N_24G) &&
+ if ((ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
bHalfSupportNmode) {
netdev_info(ieee->dev, "======>enter half N mode\n");
ieee->bHalfWirelessN24GMode = true;
@@ -2325,9 +2259,8 @@ static inline int
rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
-
if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
- if (ieee->state == RTLLIB_ASSOCIATING_AUTHENTICATING &&
+ if (ieee->link_state == RTLLIB_ASSOCIATING_AUTHENTICATING &&
(ieee->iw_mode == IW_MODE_INFRA)) {
netdev_dbg(ieee->dev,
"Received authentication response");
@@ -2352,25 +2285,21 @@ rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
* both for disassociation and deauthentication
*/
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == RTLLIB_LINKED &&
+ ieee->link_state == MAC80211_LINKED &&
(ieee->iw_mode == IW_MODE_INFRA)) {
frame_ctl = le16_to_cpu(header->frame_ctl);
netdev_info(ieee->dev,
"==========>received disassoc/deauth(%x) frame, reason code:%x\n",
WLAN_FC_GET_STYPE(frame_ctl),
((struct rtllib_disassoc *)skb->data)->reason);
- ieee->state = RTLLIB_ASSOCIATING;
+ ieee->link_state = RTLLIB_ASSOCIATING;
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
ieee->link_detect_info.bBusyTraffic = false;
rtllib_disassociate(ieee);
RemovePeerTS(ieee, header->addr2);
- if (ieee->LedControlHandler != NULL)
- ieee->LedControlHandler(ieee->dev,
- LED_CTL_START_TO_LINK);
-
if (!(ieee->rtllib_ap_sec_type(ieee) &
- (SEC_ALG_CCMP|SEC_ALG_TKIP)))
+ (SEC_ALG_CCMP | SEC_ALG_TKIP)))
schedule_delayed_work(
&ieee->associate_procedure_wq, 5);
}
@@ -2437,7 +2366,6 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
*/
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
{
-
unsigned int queue_index = txb->queue_index;
unsigned long flags;
int i;
@@ -2482,7 +2410,6 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
rtllib_txb_free(txb);
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
void rtllib_reset_queue(struct rtllib_device *ieee)
@@ -2497,7 +2424,6 @@ void rtllib_reset_queue(struct rtllib_device *ieee)
}
ieee->queue_stop = 0;
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
EXPORT_SYMBOL(rtllib_reset_queue);
@@ -2534,25 +2460,17 @@ static void rtllib_start_master_bss(struct rtllib_device *ieee)
ether_addr_copy(ieee->current_network.bssid, ieee->dev->dev_addr);
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
notify_wx_assoc_event(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
netif_carrier_on(ieee->dev);
}
static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
{
/* reset hardware status */
- if (ieee->raw_tx) {
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
+ if (ieee->raw_tx)
netif_carrier_on(ieee->dev);
- }
}
static void rtllib_start_ibss_wq(void *data)
@@ -2579,12 +2497,11 @@ static void rtllib_start_ibss_wq(void *data)
ieee->ssid_set = 1;
}
- ieee->state = RTLLIB_NOLINK;
- ieee->mode = IEEE_G;
+ ieee->link_state = MAC80211_NOLINK;
+ ieee->mode = WIRELESS_MODE_G;
/* check if we have this cell in our network list */
rtllib_softmac_check_all_nets(ieee);
-
/* if not then the state is not linked. Maybe the user switched to
* ad-hoc mode just after being in monitor mode, or just after
* being very few time in managed mode (so the card have had no
@@ -2592,25 +2509,24 @@ static void rtllib_start_ibss_wq(void *data)
* after setting ad-hoc mode. So we have to give another try..
* Here, in ibss mode, should be safe to do this without extra care
* (in bss mode we had to make sure no-one tried to associate when
- * we had just checked the ieee->state and we was going to start the
+ * we had just checked the ieee->link_state and we was going to start the
* scan) because in ibss mode the rtllib_new_net function, when
- * finds a good net, just set the ieee->state to RTLLIB_LINKED,
+ * finds a good net, just set the ieee->link_state to MAC80211_LINKED,
* so, at worst, we waste a bit of time to initiate an unneeded syncro
* scan, that will stop at the first round because it sees the state
* associated.
*/
- if (ieee->state == RTLLIB_NOLINK)
+ if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan_syncro(ieee, 0);
/* the network definitively is not here.. create a new cell */
- if (ieee->state == RTLLIB_NOLINK) {
+ if (ieee->link_state == MAC80211_NOLINK) {
netdev_info(ieee->dev, "creating new IBSS cell\n");
ieee->current_network.channel = ieee->bss_start_channel;
if (!ieee->wap_set)
eth_random_addr(ieee->current_network.bssid);
if (ieee->modulation & RTLLIB_CCK_MODULATION) {
-
ieee->current_network.rates_len = 4;
ieee->current_network.rates[0] =
@@ -2622,8 +2538,9 @@ static void rtllib_start_ibss_wq(void *data)
ieee->current_network.rates[3] =
RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
- } else
+ } else {
ieee->current_network.rates_len = 0;
+ }
if (ieee->modulation & RTLLIB_OFDM_MODULATION) {
ieee->current_network.rates_ex_len = 8;
@@ -2652,35 +2569,28 @@ static void rtllib_start_ibss_wq(void *data)
}
ieee->current_network.qos_data.supported = 0;
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
+ ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
ieee->current_network.mode = ieee->mode;
ieee->current_network.atim_window = 0;
ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
}
netdev_info(ieee->dev, "%s(): ieee->mode = %d\n", __func__, ieee->mode);
- if ((ieee->mode == IEEE_N_24G) || (ieee->mode == IEEE_N_5G))
+ if (ieee->mode == WIRELESS_MODE_N_24G)
HTUseDefaultSetting(ieee);
else
ieee->ht_info->bCurrentHTSupport = false;
ieee->SetHwRegHandler(ieee->dev, HW_VAR_MEDIA_STATUS,
- (u8 *)(&ieee->state));
+ (u8 *)(&ieee->link_state));
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- if (ieee->LedControlHandler != NULL)
- ieee->LedControlHandler(ieee->dev, LED_CTL_LINK);
-
rtllib_start_send_beacons(ieee);
notify_wx_assoc_event(ieee);
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
netif_carrier_on(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
@@ -2708,7 +2618,7 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
rtllib_softmac_check_all_nets(ieee);
/* ensure no-one start an associating process (thus setting
- * the ieee->state to rtllib_ASSOCIATING) while we
+ * the ieee->link_state to rtllib_ASSOCIATING) while we
* have just checked it and we are going to enable scan.
* The rtllib_new_net function is always called with
* lock held (from both rtllib_softmac_check_all_nets and
@@ -2716,7 +2626,7 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
*/
spin_lock_irqsave(&ieee->lock, flags);
- if (ieee->state == RTLLIB_NOLINK)
+ if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan(ieee);
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -2734,11 +2644,9 @@ void rtllib_disassociate(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
rtllib_reset_queue(ieee);
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
if (IS_DOT11D_ENABLE(ieee))
dot11d_reset(ieee);
- ieee->state = RTLLIB_NOLINK;
+ ieee->link_state = MAC80211_NOLINK;
ieee->is_set_key = false;
ieee->wap_set = 0;
@@ -2757,14 +2665,14 @@ static void rtllib_associate_retry_wq(void *data)
if (!ieee->proto_started)
goto exit;
- if (ieee->state != RTLLIB_ASSOCIATING_RETRY)
+ if (ieee->link_state != RTLLIB_ASSOCIATING_RETRY)
goto exit;
- /* until we do not set the state to RTLLIB_NOLINK
+ /* until we do not set the state to MAC80211_NOLINK
* there are no possibility to have someone else trying
* to start an association procedure (we get here with
- * ieee->state = RTLLIB_ASSOCIATING).
- * When we set the state to RTLLIB_NOLINK it is possible
+ * ieee->link_state = RTLLIB_ASSOCIATING).
+ * When we set the state to MAC80211_NOLINK it is possible
* that the RX path run an attempt to associate, but
* both rtllib_softmac_check_all_nets and the
* RX path works with ieee->lock held so there are no
@@ -2774,13 +2682,13 @@ static void rtllib_associate_retry_wq(void *data)
* state and we are going to start the scan.
*/
ieee->beinretry = true;
- ieee->state = RTLLIB_NOLINK;
+ ieee->link_state = MAC80211_NOLINK;
rtllib_softmac_check_all_nets(ieee);
spin_lock_irqsave(&ieee->lock, flags);
- if (ieee->state == RTLLIB_NOLINK)
+ if (ieee->link_state == MAC80211_NOLINK)
rtllib_start_scan(ieee);
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -2806,7 +2714,6 @@ static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
return skb;
-
}
struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
@@ -2840,7 +2747,6 @@ void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
}
EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
-
void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
{
if (!ieee->proto_started)
@@ -2859,10 +2765,10 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
cancel_delayed_work_sync(&ieee->link_change_wq);
rtllib_stop_scan(ieee);
- if (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED)
- ieee->state = RTLLIB_NOLINK;
+ if (ieee->link_state <= RTLLIB_ASSOCIATING_AUTHENTICATED)
+ ieee->link_state = MAC80211_NOLINK;
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
if (ieee->iw_mode == IW_MODE_INFRA)
SendDisassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
rtllib_disassociate(ieee);
@@ -2918,27 +2824,25 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
ieee->last_packet_time[i] = 0;
}
- if (ieee->UpdateBeaconInterruptHandler)
- ieee->UpdateBeaconInterruptHandler(ieee->dev, false);
-
ieee->wmm_acm = 0;
/* if the user set the MAC of the ad-hoc cell and then
* switch to managed mode, shall we make sure that association
* attempts does not fail just because the user provide the essid
* and the nic is still checking for the AP MAC ??
*/
- if (ieee->iw_mode == IW_MODE_INFRA) {
+ switch (ieee->iw_mode) {
+ case IW_MODE_INFRA:
rtllib_start_bss(ieee);
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
- if (ieee->UpdateBeaconInterruptHandler)
- ieee->UpdateBeaconInterruptHandler(ieee->dev, true);
-
+ break;
+ case IW_MODE_ADHOC:
rtllib_start_ibss(ieee);
-
- } else if (ieee->iw_mode == IW_MODE_MASTER) {
+ break;
+ case IW_MODE_MASTER:
rtllib_start_master_bss(ieee);
- } else if (ieee->iw_mode == IW_MODE_MONITOR) {
+ break;
+ case IW_MODE_MONITOR:
rtllib_start_monitor_mode(ieee);
+ break;
}
}
@@ -2948,7 +2852,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
memset(&ieee->current_network, 0, sizeof(struct rtllib_network));
- ieee->state = RTLLIB_NOLINK;
+ ieee->link_state = MAC80211_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
@@ -3139,7 +3043,7 @@ static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
u8 i;
bool bFilterOutNonAssociatedBSSID = false;
- rtllib->state = RTLLIB_NOLINK;
+ rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x55;
@@ -3155,7 +3059,6 @@ static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
(u8 *)(&bFilterOutNonAssociatedBSSID));
notify_wx_assoc_event(rtllib);
-
}
static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
@@ -3167,7 +3070,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
RemovePeerTS(rtllib, asSta);
if (memcmp(rtllib->current_network.bssid, asSta, 6) == 0) {
- rtllib->state = RTLLIB_NOLINK;
+ rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x22;
@@ -3179,9 +3082,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
rtllib->current_network.bssid);
-
}
-
}
static void
@@ -3198,7 +3099,7 @@ rtllib_MgntDisconnectAP(
rtllib_MlmeDisassociateRequest(rtllib, rtllib->current_network.bssid,
asRsn);
- rtllib->state = RTLLIB_NOLINK;
+ rtllib->link_state = MAC80211_NOLINK;
}
bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
@@ -3206,12 +3107,11 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
if (rtllib->ps != RTLLIB_PS_DISABLED)
rtllib->sta_wake_up(rtllib->dev);
- if (rtllib->state == RTLLIB_LINKED) {
+ if (rtllib->link_state == MAC80211_LINKED) {
if (rtllib->iw_mode == IW_MODE_ADHOC)
rtllib_MgntDisconnectIBSS(rtllib);
if (rtllib->iw_mode == IW_MODE_INFRA)
rtllib_MgntDisconnectAP(rtllib, asRsn);
-
}
return true;
@@ -3226,11 +3126,10 @@ void notify_wx_assoc_event(struct rtllib_device *ieee)
return;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (ieee->state == RTLLIB_LINKED)
+ if (ieee->link_state == MAC80211_LINKED) {
memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid,
ETH_ALEN);
- else {
-
+ } else {
netdev_info(ieee->dev, "%s(): Tell user space disconnected\n",
__func__);
eth_zero_addr(wrqu.ap_addr.sa_data);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 1f2fa711e60b..2de63d1f2009 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -15,16 +15,6 @@
#include "rtllib.h"
#include "dot11d.h"
-/* FIXME: add A freqs */
-
-const long rtllib_wlan_frequencies[] = {
- 2412, 2417, 2422, 2427,
- 2432, 2437, 2442, 2447,
- 2452, 2457, 2462, 2467,
- 2472, 2484
-};
-EXPORT_SYMBOL(rtllib_wlan_frequencies);
-
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -43,15 +33,8 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
if (fwrq->e == 1) {
if ((fwrq->m >= (int)2.412e8 &&
fwrq->m <= (int)2.487e8)) {
- int f = fwrq->m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != rtllib_wlan_frequencies[c]))
- c++;
-
- /* hack to fall through */
+ fwrq->m = ieee80211_freq_khz_to_channel(fwrq->m / 100);
fwrq->e = 0;
- fwrq->m = c + 1;
}
}
@@ -70,7 +53,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
if (ieee->iw_mode == IW_MODE_ADHOC ||
ieee->iw_mode == IW_MODE_MASTER)
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
rtllib_stop_send_beacons(ieee);
rtllib_start_send_beacons(ieee);
}
@@ -83,7 +66,6 @@ out:
}
EXPORT_SYMBOL(rtllib_wx_set_freq);
-
int rtllib_wx_get_freq(struct rtllib_device *ieee,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -92,8 +74,8 @@ int rtllib_wx_get_freq(struct rtllib_device *ieee,
if (ieee->current_network.channel == 0)
return -1;
- fwrq->m = rtllib_wlan_frequencies[ieee->current_network.channel-1] *
- 100000;
+ fwrq->m = ieee80211_channel_to_freq_khz(ieee->current_network.channel,
+ NL80211_BAND_2GHZ) * 100;
fwrq->e = 1;
return 0;
}
@@ -113,8 +95,8 @@ int rtllib_wx_get_wap(struct rtllib_device *ieee,
/* We want avoid to give to the user inconsistent infos*/
spin_lock_irqsave(&ieee->lock, flags);
- if (ieee->state != RTLLIB_LINKED &&
- ieee->state != RTLLIB_LINKED_SCANNING &&
+ if (ieee->link_state != MAC80211_LINKED &&
+ ieee->link_state != MAC80211_LINKED_SCANNING &&
ieee->wap_set == 0)
eth_zero_addr(wrqu->ap_addr.sa_data);
@@ -128,13 +110,11 @@ int rtllib_wx_get_wap(struct rtllib_device *ieee,
}
EXPORT_SYMBOL(rtllib_wx_get_wap);
-
int rtllib_wx_set_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
-
int ret = 0;
unsigned long flags;
@@ -164,7 +144,6 @@ int rtllib_wx_set_wap(struct rtllib_device *ieee,
goto out;
}
-
if (ifup)
rtllib_stop_protocol(ieee, true);
@@ -205,8 +184,8 @@ int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
goto out;
}
- if (ieee->state != RTLLIB_LINKED &&
- ieee->state != RTLLIB_LINKED_SCANNING &&
+ if (ieee->link_state != MAC80211_LINKED &&
+ ieee->link_state != MAC80211_LINKED_SCANNING &&
ieee->ssid_set == 0) {
ret = -1;
goto out;
@@ -220,7 +199,6 @@ out:
spin_unlock_irqrestore(&ieee->lock, flags);
return ret;
-
}
EXPORT_SYMBOL(rtllib_wx_get_essid);
@@ -228,10 +206,9 @@ int rtllib_wx_set_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-
u32 target_rate = wrqu->bitrate.value;
- ieee->rate = target_rate/100000;
+ ieee->rate = target_rate / 100000;
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_rate);
@@ -250,14 +227,13 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
}
EXPORT_SYMBOL(rtllib_wx_get_rate);
-
int rtllib_wx_set_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- if (wrqu->rts.disabled || !wrqu->rts.fixed)
+ if (wrqu->rts.disabled || !wrqu->rts.fixed) {
ieee->rts = DEFAULT_RTS_THRESHOLD;
- else {
+ } else {
if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
wrqu->rts.value > MAX_RTS_THRESHOLD)
return -EINVAL;
@@ -332,6 +308,7 @@ void rtllib_wx_sync_scan_wq(void *data)
enum ht_channel_width bandwidth = 0;
int b40M = 0;
+ mutex_lock(&ieee->wx_mutex);
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
rtllib_start_scan_syncro(ieee, 0);
goto out;
@@ -339,17 +316,14 @@ void rtllib_wx_sync_scan_wq(void *data)
chan = ieee->current_network.channel;
- ieee->LeisurePSLeave(ieee->dev);
+ ieee->leisure_ps_leave(ieee->dev);
/* notify AP to be in PS mode */
rtllib_sta_ps_send_null_frame(ieee, 1);
rtllib_sta_ps_send_null_frame(ieee, 1);
rtllib_stop_all_queues(ieee);
-
- if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
rtllib_stop_send_beacons(ieee);
- ieee->state = RTLLIB_LINKED_SCANNING;
+ ieee->link_state = MAC80211_LINKED_SCANNING;
ieee->link_change(ieee->dev);
/* wait for ps packet to be kicked out successfully */
msleep(50);
@@ -361,7 +335,7 @@ void rtllib_wx_sync_scan_wq(void *data)
b40M = 1;
chan_offset = ieee->ht_info->CurSTAExtChnlOffset;
bandwidth = (enum ht_channel_width)ieee->ht_info->bCurBW40MHz;
- ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
+ ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
@@ -374,14 +348,14 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->set_chan(ieee->dev, chan - 2);
else
ieee->set_chan(ieee->dev, chan);
- ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset);
+ ieee->set_bw_mode_handler(ieee->dev, bandwidth, chan_offset);
} else {
ieee->set_chan(ieee->dev, chan);
}
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
- ieee->state = RTLLIB_LINKED;
+ ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
/* Notify AP that I wake up again */
@@ -392,10 +366,6 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->link_detect_info.NumRecvBcnInPeriod = 1;
ieee->link_detect_info.NumRecvDataInPeriod = 1;
}
-
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
rtllib_start_send_beacons(ieee);
@@ -403,7 +373,6 @@ void rtllib_wx_sync_scan_wq(void *data)
out:
mutex_unlock(&ieee->wx_mutex);
-
}
int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
@@ -411,21 +380,18 @@ int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
{
int ret = 0;
- mutex_lock(&ieee->wx_mutex);
-
if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
ret = -1;
goto out;
}
- if (ieee->state == RTLLIB_LINKED) {
+ if (ieee->link_state == MAC80211_LINKED) {
schedule_work(&ieee->wx_sync_scan_wq);
/* intentionally forget to up sem */
return 0;
}
out:
- mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_scan);
@@ -434,7 +400,6 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
-
int ret = 0, len;
short proto_started;
unsigned long flags;
@@ -454,7 +419,6 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
if (proto_started)
rtllib_stop_protocol(ieee, true);
-
/* this is just to be sure that the GET wx callback
* has consistent infos. not needed otherwise
*/
@@ -492,7 +456,6 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-
int *parms = (int *)extra;
int enable = (parms[0] > 0);
short prev = ieee->raw_tx;
@@ -508,12 +471,8 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
ieee->raw_tx ? "enabled" : "disabled");
if (ieee->iw_mode == IW_MODE_MONITOR) {
- if (prev == 0 && ieee->raw_tx) {
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
-
+ if (prev == 0 && ieee->raw_tx)
netif_carrier_on(ieee->dev);
- }
if (prev && ieee->raw_tx == 1)
netif_carrier_off(ieee->dev);
@@ -530,14 +489,13 @@ int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
{
const char *b = ieee->modulation & RTLLIB_CCK_MODULATION ? "b" : "";
const char *g = ieee->modulation & RTLLIB_OFDM_MODULATION ? "g" : "";
- const char *n = ieee->mode & (IEEE_N_24G | IEEE_N_5G) ? "n" : "";
+ const char *n = ieee->mode & (WIRELESS_MODE_N_24G) ? "n" : "";
scnprintf(wrqu->name, sizeof(wrqu->name), "802.11%s%s%s", b, g, n);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_name);
-
/* this is mostly stolen from hostap */
int rtllib_wx_set_power(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -583,12 +541,10 @@ int rtllib_wx_set_power(struct rtllib_device *ieee,
default:
ret = -EINVAL;
goto exit;
-
}
exit:
mutex_unlock(&ieee->wx_mutex);
return ret;
-
}
EXPORT_SYMBOL(rtllib_wx_set_power);
@@ -625,6 +581,5 @@ int rtllib_wx_get_power(struct rtllib_device *ieee,
exit:
mutex_unlock(&ieee->wx_mutex);
return 0;
-
}
EXPORT_SYMBOL(rtllib_wx_get_power);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 9ab8ee46ef66..ec038ef806c3 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -406,7 +406,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
if (is_broadcast_ether_addr(skb->data + 16))
return;
- if (ieee->mode < IEEE_N_24G) {
+ if (ieee->mode < WIRELESS_MODE_N_24G) {
if (skb->len > ieee->rts) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
@@ -486,7 +486,7 @@ static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
!tcb_desc->tx_use_drv_assinged_rate) {
if (ieee->iw_mode == IW_MODE_INFRA ||
ieee->iw_mode == IW_MODE_ADHOC)
- tcb_desc->RATRIndex = 0;
+ tcb_desc->ratr_index = 0;
}
}
@@ -572,8 +572,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
/* If there is no driver handler to take the TXB, don't bother
* creating it...
*/
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
- IEEE_SOFTMAC_TX_QUEUE)) ||
+ if (!(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) ||
((!ieee->softmac_data_hard_start_xmit &&
(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
netdev_warn(ieee->dev, "No xmit handler.\n");
@@ -892,7 +891,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->tx_dis_rate_fallback = 1;
}
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_use_drv_assinged_rate = 1;
} else {
if (is_multicast_ether_addr(header.addr1))
@@ -916,7 +915,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->tx_dis_rate_fallback = 1;
}
- tcb_desc->RATRIndex = 7;
+ tcb_desc->ratr_index = 7;
tcb_desc->tx_use_drv_assinged_rate = 1;
tcb_desc->bdhcp = 1;
}
@@ -938,11 +937,6 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
rtllib_softmac_xmit(txb, ieee);
} else {
- if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
- stats->tx_packets++;
- stats->tx_bytes += le16_to_cpu(txb->payload_size);
- return 0;
- }
rtllib_txb_free(txb);
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index d6691f3c7c70..e9469bfef3dd 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -19,7 +19,7 @@
#include "rtllib.h"
static const char * const rtllib_modes[] = {
- "a", "b", "g", "?", "N-24G", "N-5G"
+ "a", "b", "g", "?", "N-24G"
};
#define MAX_CUSTOM_LEN 64
@@ -118,7 +118,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
max_rate = rate;
}
- if (network->mode >= IEEE_N_24G) {
+ if (network->mode >= WIRELESS_MODE_N_24G) {
struct ht_capab_ele *ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
@@ -416,22 +416,6 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
*/
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
-
- if (ieee->set_security)
- ieee->set_security(dev, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack.
- */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- netdev_dbg(dev, "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
return 0;
}
EXPORT_SYMBOL(rtllib_wx_set_encode);
@@ -623,15 +607,6 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
sec.flags &= ~SEC_LEVEL;
}
done:
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
-
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- netdev_dbg(ieee->dev, "Port reset failed\n");
- return -EINVAL;
- }
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_encode_ext);
@@ -644,7 +619,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
bool deauth = false;
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- if (ieee->state != RTLLIB_LINKED)
+ if (ieee->link_state != MAC80211_LINKED)
return -ENOLINK;
mutex_lock(&ieee->wx_mutex);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index c3c1b49674d3..1e9e1089032b 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2275,7 +2275,10 @@ void Hal_EfuseParseBTCoexistInfo_8723B(
pHalData->EEPROMBluetoothAntNum = tempval & BIT(0);
/* EFUSE_0xC3[6] == 0, S1(Main)-RF_PATH_A; */
/* EFUSE_0xC3[6] == 1, S0(Aux)-RF_PATH_B */
- pHalData->ant_path = (tempval & BIT(6))? RF_PATH_B : RF_PATH_A;
+ if (tempval & BIT(6))
+ pHalData->ant_path = RF_PATH_B;
+ else
+ pHalData->ant_path = RF_PATH_A;
} else {
pHalData->EEPROMBluetoothAntNum = Ant_x1;
if (pHalData->PackageType == PACKAGE_QFN68)
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index 69c377eeeaf0..1ea3fe22b99a 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -16,9 +16,9 @@
/* if mode == 0, then the sta is allowed once the addr is hit. */
/* if mode == 1, then the sta is rejected once the addr is non-hit. */
struct rtw_wlan_acl_node {
- struct list_head list;
- u8 addr[ETH_ALEN];
- u8 valid;
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 valid;
};
/* mode = 0, disable */
@@ -340,19 +340,19 @@ struct sta_priv {
static inline u32 wifi_mac_hash(u8 *mac)
{
- u32 x;
+ u32 x;
- x = mac[0];
- x = (x << 2) ^ mac[1];
- x = (x << 2) ^ mac[2];
- x = (x << 2) ^ mac[3];
- x = (x << 2) ^ mac[4];
- x = (x << 2) ^ mac[5];
+ x = mac[0];
+ x = (x << 2) ^ mac[1];
+ x = (x << 2) ^ mac[2];
+ x = (x << 2) ^ mac[3];
+ x = (x << 2) ^ mac[4];
+ x = (x << 2) ^ mac[5];
- x ^= x >> 8;
- x = x & (NUM_STA - 1);
+ x ^= x >> 8;
+ x = x & (NUM_STA - 1);
- return x;
+ return x;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 84a9f4dd8f95..2ae7843abdf7 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -305,7 +305,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
memcpy(pwlanhdr->addr2, pnetwork->network.mac_address, ETH_ALEN);
memcpy(pwlanhdr->addr3, pnetwork->network.mac_address, ETH_ALEN);
-
pbuf += sizeof(struct ieee80211_hdr_3addr);
len = sizeof(struct ieee80211_hdr_3addr);
@@ -325,15 +324,14 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
exit:
return bss;
-
}
/*
- Check the given bss is valid by kernel API cfg80211_get_bss()
- @padapter : the given adapter
-
- return true if bss is valid, false for not found.
-*/
+ * Check the given bss is valid by kernel API cfg80211_get_bss()
+ * @padapter : the given adapter
+ *
+ * return true if bss is valid, false for not found.
+ */
int rtw_cfg80211_check_bss(struct adapter *padapter)
{
struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
@@ -374,7 +372,6 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
struct wlan_network *scanned = pmlmepriv->cur_network_scanned;
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
-
memcpy(&cur_network->network, pnetwork, sizeof(struct wlan_bssid_ex));
rtw_cfg80211_inform_bss(padapter, cur_network);
} else {
@@ -495,7 +492,6 @@ void rtw_cfg80211_indicate_disconnect(struct adapter *padapter)
}
}
-
static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
{
int ret = 0;
@@ -569,7 +565,6 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
rtw_ap_set_wep_key(padapter, param->u.crypt.key, wep_key_len, wep_key_idx, 1);
goto exit;
-
}
/* group key */
@@ -581,7 +576,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
@@ -616,11 +611,9 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
pbcmc_sta->ieee8021x_blocked = false;
pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
}
-
}
goto exit;
-
}
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
@@ -643,7 +636,6 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
-
psta->dot118021XPrivacy = _AES_;
} else {
psta->dot118021XPrivacy = _NO_PRIVACY_;
@@ -695,17 +687,13 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
pbcmc_sta->ieee8021x_blocked = false;
pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
}
-
}
-
}
-
}
exit:
return ret;
-
}
static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
@@ -789,7 +777,6 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
if (strcmp(param->u.crypt.alg, "none") != 0)
psta->ieee8021x_blocked = false;
-
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
@@ -900,7 +887,6 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
-
if (!mac_addr || is_broadcast_ether_addr(mac_addr))
param->u.crypt.set_tx = 0; /* for wpa/wpa2 group key */
else
@@ -932,7 +918,6 @@ addkey_end:
kfree(param);
return ret;
-
}
static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -983,7 +968,6 @@ static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
}
return 0;
-
}
static int cfg80211_rtw_get_station(struct wiphy *wiphy,
@@ -1168,7 +1152,6 @@ void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
/* ev =translate_scan(padapter, a, pnetwork, ev, stop); */
rtw_cfg80211_inform_bss(padapter, pnetwork);
}
-
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
@@ -1200,7 +1183,6 @@ static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *b
}
return ret;
-
}
static int cfg80211_rtw_scan(struct wiphy *wiphy
@@ -1305,14 +1287,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
} else if (request->n_channels <= 4) {
for (j = request->n_channels - 1; j >= 0; j--)
for (i = 0; i < survey_times; i++)
- memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
+ memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * request->n_channels);
} else {
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0);
}
spin_unlock_bh(&pmlmepriv->lock);
-
if (_status == false)
ret = -1;
@@ -1327,7 +1308,6 @@ check_need_indicate_scan_done:
exit:
return ret;
-
}
static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
@@ -1342,12 +1322,10 @@ static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32
return 0;
}
-
if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
return 0;
-
}
static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
@@ -1373,7 +1351,6 @@ static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
-
break;
default:
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
@@ -1381,7 +1358,6 @@ static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
}
return 0;
-
}
static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 cipher, bool ucast)
@@ -1391,7 +1367,6 @@ static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 ciph
u32 *profile_cipher = ucast ? &psecuritypriv->dot11PrivacyAlgrthm :
&psecuritypriv->dot118021XGrpPrivacy;
-
if (!cipher) {
*profile_cipher = _NO_PRIVACY_;
psecuritypriv->ndisencryptstatus = ndisencryptstatus;
@@ -1603,7 +1578,6 @@ static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
}
if (params->ssid_len > IW_ESSID_MAX_SIZE) {
-
ret = -E2BIG;
goto exit;
}
@@ -1671,7 +1645,6 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
padapter->mlmepriv.not_indic_disco = true;
-
if (adapter_wdev_data(padapter)->block == true) {
ret = -EBUSY;
goto exit;
@@ -1694,7 +1667,6 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
}
if (sme->ssid_len > IW_ESSID_MAX_SIZE) {
-
ret = -E2BIG;
goto exit;
}
@@ -1889,7 +1861,6 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
/* overwrite PMKID */
for (index = 0 ; index < NUM_PMKID_CACHE; index++) {
if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN)) {
-
memcpy(psecuritypriv->PMKIDList[index].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
psecuritypriv->PMKIDList[index].bUsed = true;
psecuritypriv->PMKIDIndex = index+1;
@@ -1899,7 +1870,6 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
}
if (!blInserted) {
-
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, (u8 *)pmksa->bssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
@@ -2135,11 +2105,9 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
pattrib->seqnum = pmlmeext->mgnt_seq;
pmlmeext->mgnt_seq++;
-
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
-
}
fail:
@@ -2147,11 +2115,8 @@ fail:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
-
}
-
-
static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
.ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
};
@@ -2324,7 +2289,6 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l
else
ret = -EINVAL;
-
kfree(pbuf);
return ret;
@@ -2404,7 +2368,6 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
return -EINVAL;
}
-
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
@@ -2423,9 +2386,7 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
break;
}
-
}
-
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
@@ -2433,7 +2394,6 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
associated_clients_update(padapter, updated);
return ret;
-
}
static int cfg80211_rtw_change_station(struct wiphy *wiphy, struct net_device *ndev,
@@ -2465,7 +2425,6 @@ static struct sta_info *rtw_sta_info_get_by_idx(const int idx, struct sta_priv *
static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *ndev,
int idx, u8 *mac, struct station_info *sinfo)
{
-
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(ndev);
struct sta_info *psta = NULL;
@@ -2568,7 +2527,6 @@ static int _cfg80211_rtw_mgmt_tx(struct adapter *padapter, u8 tx_ch, const u8 *b
exit:
return ret;
-
}
static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
@@ -2640,7 +2598,6 @@ exit:
static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, enum nl80211_band band)
{
-
#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
@@ -2692,12 +2649,10 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
/* copy mac_addr to wiphy */
memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
-
}
static void rtw_cfg80211_preinit_wiphy(struct adapter *padapter, struct wiphy *wiphy)
{
-
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->max_scan_ssids = RTW_SSID_SCAN_AMOUNT;
@@ -2810,7 +2765,7 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
wdev->netdev = pnetdev;
wdev->iftype = NL80211_IFTYPE_STATION; /* will be init in rtw_hal_init() */
- /* Must sync with _rtw_init_mlme_priv() */
+ /* Must sync with _rtw_init_mlme_priv() */
/* pmlmepriv->fw_state = WIFI_STATION_STATE */
padapter->rtw_wdev = wdev;
pnetdev->ieee80211_ptr = wdev;
@@ -2844,7 +2799,6 @@ unregister_wiphy:
wiphy_free(wiphy);
exit:
return ret;
-
}
void rtw_wdev_free(struct wireless_dev *wdev)
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index db2dd0baa8be..08543a3936da 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -382,27 +382,21 @@ static int rtsx_control_thread(void *__dev)
if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
dev_err(&dev->pci->dev, "UNKNOWN data direction\n");
chip->srb->result = DID_ERROR << 16;
- }
-
- /* reject if target != 0 or if LUN is higher than
- * the maximum known LUN
- */
- else if (chip->srb->device->id) {
+ } else if (chip->srb->device->id) {
+ /* reject if target != 0 or if LUN is higher than
+ * the maximum known LUN
+ */
dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
chip->srb->device->id,
(u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
- }
-
- else if (chip->srb->device->lun > chip->max_lun) {
+ } else if (chip->srb->device->lun > chip->max_lun) {
dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
chip->srb->device->id,
(u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
- }
-
- /* we've got a command, let's do it! */
- else {
+ } else {
+ /* we've got a command, let's do it! */
scsi_show_command(chip);
rtsx_invoke_transport(chip->srb, chip);
}
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index 1461c89701c3..ab3d9b057d56 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FB_SM750
tristate "Silicon Motion SM750 framebuffer support"
- depends on FB && PCI
+ depends on FB && PCI && HAS_IOPORT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index 66965da11443..52e106f117da 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -78,8 +78,6 @@ struct vchiq_service_params_kernel {
short version_min; /* Update for incompatible changes */
};
-struct vchiq_instance;
-
extern int vchiq_initialise(struct vchiq_instance **pinstance);
extern int vchiq_shutdown(struct vchiq_instance *instance);
extern int vchiq_connect(struct vchiq_instance *instance);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 90a3958d1f29..aa2313f3bcab 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -415,7 +415,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
pagelistinfo->scatterlist_mapped = 0;
/* Deal with any partial cache lines (fragments) */
- if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
+ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
char *fragments = g_fragments_base +
(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
g_fragments_size;
@@ -462,7 +462,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
cleanup_pagelistinfo(instance, pagelistinfo);
}
-int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
+static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
{
struct device *dev = &pdev->dev;
struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index d1cd5de46dcf..077f62ebe80c 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config VT6655
tristate "VIA Technologies VT6655 support"
- depends on PCI && MAC80211 && m
+ depends on PCI && HAS_IOPORT && MAC80211 && m
help
This is a vendor-written driver for VIA VT6655.
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index e33dd1b9c40e..a4799589e469 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index c7cd54171d99..3e8c92675c82 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index cd271b1da69f..048e1c3fe19b 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index dfb762bce84d..45234769f45d 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 93195a4c5b01..7ea1c8ec05ed 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index b50ce11147dd..176e327a45bc 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index 1b91b64c12ed..1cbb4b67a9a6 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -6,27 +6,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211metastruct.h b/drivers/staging/wlan-ng/p80211metastruct.h
index 4adc64580185..a52217c9b953 100644
--- a/drivers/staging/wlan-ng/p80211metastruct.h
+++ b/drivers/staging/wlan-ng/p80211metastruct.h
@@ -6,27 +6,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
@@ -114,22 +93,8 @@ struct p80211msg_dot11req_scan_results {
struct p80211item_uint32 cfpollreq;
struct p80211item_uint32 privacy;
struct p80211item_uint32 capinfo;
- struct p80211item_uint32 basicrate1;
- struct p80211item_uint32 basicrate2;
- struct p80211item_uint32 basicrate3;
- struct p80211item_uint32 basicrate4;
- struct p80211item_uint32 basicrate5;
- struct p80211item_uint32 basicrate6;
- struct p80211item_uint32 basicrate7;
- struct p80211item_uint32 basicrate8;
- struct p80211item_uint32 supprate1;
- struct p80211item_uint32 supprate2;
- struct p80211item_uint32 supprate3;
- struct p80211item_uint32 supprate4;
- struct p80211item_uint32 supprate5;
- struct p80211item_uint32 supprate6;
- struct p80211item_uint32 supprate7;
- struct p80211item_uint32 supprate8;
+ struct p80211item_uint32 basicrate[8];
+ struct p80211item_uint32 supprate[8];
} __packed;
struct p80211msg_dot11req_start {
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index fc23fae5651b..7ffc202d9007 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index f68d8b7d5ad8..d56bc6079ed4 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 6bef419e8ad0..8634fc89a6c2 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1cee51a1075e..f5186380b629 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 809cf3d480e9..6ec559ffd2f9 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index bc45cd5f91e4..39213f73913c 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index b2ed96960413..5e4ea5f92058 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -9,27 +9,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 3ff7ee7011df..e7b26b057124 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 11658865ca50..5d03b2b9aab4 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 9030a8939a9b..d5737166564e 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
@@ -437,42 +416,22 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
if (item->supprates[count] == 0)
break;
-#define REQBASICRATE(N) \
- do { \
- if ((count >= (N)) && DOT11_RATE5_ISBASIC_GET( \
- item->supprates[(N) - 1])) { \
- req->basicrate ## N .data = item->supprates[(N) - 1]; \
- req->basicrate ## N .status = \
- P80211ENUM_msgitem_status_data_ok; \
- } \
- } while (0)
-
- REQBASICRATE(1);
- REQBASICRATE(2);
- REQBASICRATE(3);
- REQBASICRATE(4);
- REQBASICRATE(5);
- REQBASICRATE(6);
- REQBASICRATE(7);
- REQBASICRATE(8);
-
-#define REQSUPPRATE(N) \
- do { \
- if (count >= (N)) { \
- req->supprate ## N .data = item->supprates[(N) - 1]; \
- req->supprate ## N .status = \
- P80211ENUM_msgitem_status_data_ok; \
- } \
- } while (0)
-
- REQSUPPRATE(1);
- REQSUPPRATE(2);
- REQSUPPRATE(3);
- REQSUPPRATE(4);
- REQSUPPRATE(5);
- REQSUPPRATE(6);
- REQSUPPRATE(7);
- REQSUPPRATE(8);
+ for (int i = 0; i < 8; i++) {
+ if (count > i &&
+ DOT11_RATE5_ISBASIC_GET(item->supprates[i])) {
+ req->basicrate[i].data = item->supprates[i];
+ req->basicrate[i].status =
+ P80211ENUM_msgitem_status_data_ok;
+ }
+ }
+
+ for (int i = 0; i < 8; i++) {
+ if (count > i) {
+ req->supprate[i].data = item->supprates[i];
+ req->supprate[i].status =
+ P80211ENUM_msgitem_status_data_ok;
+ }
+ }
/* beacon period */
req->beaconperiod.status = P80211ENUM_msgitem_status_data_ok;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 7132cec2d7eb..083a055ee986 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index fcf8313870af..4346b90c1a77 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index daa7cc4e897c..57180bb71699 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -8,27 +8,6 @@
*
* linux-wlan
*
- * The contents of this file are subject to the Mozilla Public
- * License Version 1.1 (the "License"); you may not use this file
- * except in compliance with the License. You may obtain a copy of
- * the License at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS
- * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * rights and limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use
- * your version of this file under the MPL, indicate your decision
- * by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL. If you do not delete
- * the provisions above, a recipient may use your version of this
- * file under either the MPL or the GPL.
- *
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a3c5f3558a33..3d1b511ea284 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -888,7 +888,7 @@ static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
ret = ops->pr_preempt(bdev, key, sa_key,
scsi_pr_type_to_block(type),
- sa == PRO_PREEMPT ? false : true);
+ sa == PRO_PREEMPT_AND_ABORT);
break;
case PRO_RELEASE:
if (!ops->pr_clear) {
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 15ffc8d2ac7b..22cc6cac0ba2 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2820,14 +2820,14 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
pr_err("Unable to reconfigure device\n");
return ret;
}
- strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
ret = tcmu_update_uio_info(udev);
if (ret)
return ret;
return count;
}
- strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
return count;
}
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 78fd365893c1..c8b3d7b78098 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -2,7 +2,7 @@
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
-thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
+thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o clx.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index 3514bf65b7a4..38fefd0e5268 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -296,16 +296,15 @@ static bool tb_acpi_bus_match(struct device *dev)
static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
{
+ struct tb_switch *parent_sw = tb_switch_parent(sw);
struct acpi_device *adev = NULL;
- struct tb_switch *parent_sw;
/*
* Device routers exists under the downstream facing USB4 port
* of the parent router. Their _ADR is always 0.
*/
- parent_sw = tb_switch_parent(sw);
if (parent_sw) {
- struct tb_port *port = tb_port_at(tb_route(sw), parent_sw);
+ struct tb_port *port = tb_switch_downstream_port(sw);
struct acpi_device *port_adev;
port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),
diff --git a/drivers/thunderbolt/clx.c b/drivers/thunderbolt/clx.c
new file mode 100644
index 000000000000..13d217ae98e6
--- /dev/null
+++ b/drivers/thunderbolt/clx.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CLx support
+ *
+ * Copyright (C) 2020 - 2023, Intel Corporation
+ * Authors: Gil Fine <gil.fine@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/module.h>
+
+#include "tb.h"
+
+static bool clx_enabled = true;
+module_param_named(clx, clx_enabled, bool, 0444);
+MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
+
+static const char *clx_name(unsigned int clx)
+{
+ switch (clx) {
+ case TB_CL0S | TB_CL1 | TB_CL2:
+ return "CL0s/CL1/CL2";
+ case TB_CL1 | TB_CL2:
+ return "CL1/CL2";
+ case TB_CL0S | TB_CL2:
+ return "CL0s/CL2";
+ case TB_CL0S | TB_CL1:
+ return "CL0s/CL1";
+ case TB_CL0S:
+ return "CL0s";
+ case 0:
+ return "disabled";
+ default:
+ return "unknown";
+ }
+}
+
+static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
+{
+ u32 phy;
+ int ret;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (secondary)
+ phy |= LANE_ADP_CS_1_PMS;
+ else
+ phy &= ~LANE_ADP_CS_1_PMS;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_pm_secondary_enable(struct tb_port *port)
+{
+ return tb_port_pm_secondary_set(port, true);
+}
+
+static int tb_port_pm_secondary_disable(struct tb_port *port)
+{
+ return tb_port_pm_secondary_set(port, false);
+}
+
+/* Called for USB4 or Titan Ridge routers only */
+static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
+{
+ u32 val, mask = 0;
+ bool ret;
+
+ /* Don't enable CLx in case of two single-lane links */
+ if (!port->bonded && port->dual_link_port)
+ return false;
+
+ /* Don't enable CLx in case of inter-domain link */
+ if (port->xdomain)
+ return false;
+
+ if (tb_switch_is_usb4(port->sw)) {
+ if (!usb4_port_clx_supported(port))
+ return false;
+ } else if (!tb_lc_is_clx_supported(port)) {
+ return false;
+ }
+
+ if (clx & TB_CL0S)
+ mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
+ if (clx & TB_CL1)
+ mask |= LANE_ADP_CS_0_CL1_SUPPORT;
+ if (clx & TB_CL2)
+ mask |= LANE_ADP_CS_0_CL2_SUPPORT;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
+static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
+{
+ u32 phy, mask = 0;
+ int ret;
+
+ if (clx & TB_CL0S)
+ mask |= LANE_ADP_CS_1_CL0S_ENABLE;
+ if (clx & TB_CL1)
+ mask |= LANE_ADP_CS_1_CL1_ENABLE;
+ if (clx & TB_CL2)
+ mask |= LANE_ADP_CS_1_CL2_ENABLE;
+
+ if (!mask)
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy |= mask;
+ else
+ phy &= ~mask;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
+{
+ return tb_port_clx_set(port, clx, false);
+}
+
+static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
+{
+ return tb_port_clx_set(port, clx, true);
+}
+
+static int tb_port_clx(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
+ return 0;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (val & LANE_ADP_CS_1_CL0S_ENABLE)
+ ret |= TB_CL0S;
+ if (val & LANE_ADP_CS_1_CL1_ENABLE)
+ ret |= TB_CL1;
+ if (val & LANE_ADP_CS_1_CL2_ENABLE)
+ ret |= TB_CL2;
+
+ return ret;
+}
+
+/**
+ * tb_port_clx_is_enabled() - Is given CL state enabled
+ * @port: USB4 port to check
+ * @clx: Mask of CL states to check
+ *
+ * Returns true if any of the given CL states is enabled for @port.
+ */
+bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
+{
+ return !!(tb_port_clx(port) & clx);
+}
+
+/**
+ * tb_switch_clx_init() - Initialize router CL states
+ * @sw: Router
+ *
+ * Can be called for any router. Initializes the current CL state by
+ * reading it from the hardware.
+ *
+ * Returns %0 in case of success and negative errno in case of failure.
+ */
+int tb_switch_clx_init(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ unsigned int clx, tmp;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_clx_is_supported(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ clx = tb_port_clx(up);
+ tmp = tb_port_clx(down);
+ if (clx != tmp)
+ tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
+ clx, tmp);
+
+ tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
+
+ sw->clx = clx;
+ return 0;
+}
+
+static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+ ret = tb_port_pm_secondary_enable(up);
+ if (ret)
+ return ret;
+
+ return tb_port_pm_secondary_disable(down);
+}
+
+static int tb_switch_mask_clx_objections(struct tb_switch *sw)
+{
+ int up_port = sw->config.upstream_port_number;
+ u32 offset, val[2], mask_obj, unmask_obj;
+ int ret, i;
+
+ /* Only Titan Ridge of pre-USB4 devices support CLx states */
+ if (!tb_switch_is_titan_ridge(sw))
+ return 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ /*
+ * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
+ * Port A consists of lane adapters 1,2 and
+ * Port B consists of lane adapters 3,4
+ * If upstream port is A, (lanes are 1,2), we mask objections from
+ * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
+ */
+ if (up_port == 1) {
+ mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ offset = TB_LOW_PWR_C1_CL1;
+ } else {
+ mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ offset = TB_LOW_PWR_C3_CL1;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] |= mask_obj;
+ val[i] &= ~unmask_obj;
+ }
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+}
+
+/**
+ * tb_switch_clx_is_supported() - Is CLx supported on this type of router
+ * @sw: The router to check CLx support for
+ */
+bool tb_switch_clx_is_supported(const struct tb_switch *sw)
+{
+ if (!clx_enabled)
+ return false;
+
+ if (sw->quirks & QUIRK_NO_CLX)
+ return false;
+
+ /*
+ * CLx is not enabled and validated on Intel USB4 platforms
+ * before Alder Lake.
+ */
+ if (tb_switch_is_tiger_lake(sw))
+ return false;
+
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+static bool validate_mask(unsigned int clx)
+{
+ /* Previous states need to be enabled */
+ if (clx & TB_CL1)
+ return (clx & TB_CL0S) == TB_CL0S;
+ return true;
+}
+
+/**
+ * tb_switch_clx_enable() - Enable CLx on upstream port of specified router
+ * @sw: Router to enable CLx for
+ * @clx: The CLx state to enable
+ *
+ * CLx is enabled only if both sides of the link support CLx, and if both sides
+ * of the link are not configured as two single lane links and only if the link
+ * is not inter-domain link. The complete set of conditions is described in CM
+ * Guide 1.0 section 8.1.
+ *
+ * Returns %0 on success or an error code on failure.
+ */
+int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
+{
+ bool up_clx_support, down_clx_support;
+ struct tb_switch *parent_sw;
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!clx || sw->clx == clx)
+ return 0;
+
+ if (!validate_mask(clx))
+ return -EINVAL;
+
+ parent_sw = tb_switch_parent(sw);
+ if (!parent_sw)
+ return 0;
+
+ if (!tb_switch_clx_is_supported(parent_sw) ||
+ !tb_switch_clx_is_supported(sw))
+ return 0;
+
+ /* Only support CL2 for v2 routers */
+ if ((clx & TB_CL2) &&
+ (usb4_switch_version(parent_sw) < 2 ||
+ usb4_switch_version(sw) < 2))
+ return -EOPNOTSUPP;
+
+ ret = tb_switch_pm_secondary_resolve(sw);
+ if (ret)
+ return ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ up_clx_support = tb_port_clx_supported(up, clx);
+ down_clx_support = tb_port_clx_supported(down, clx);
+
+ tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
+ up_clx_support ? "" : "not ");
+ tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
+ down_clx_support ? "" : "not ");
+
+ if (!up_clx_support || !down_clx_support)
+ return -EOPNOTSUPP;
+
+ ret = tb_port_clx_enable(up, clx);
+ if (ret)
+ return ret;
+
+ ret = tb_port_clx_enable(down, clx);
+ if (ret) {
+ tb_port_clx_disable(up, clx);
+ return ret;
+ }
+
+ ret = tb_switch_mask_clx_objections(sw);
+ if (ret) {
+ tb_port_clx_disable(up, clx);
+ tb_port_clx_disable(down, clx);
+ return ret;
+ }
+
+ sw->clx |= clx;
+
+ tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
+ return 0;
+}
+
+/**
+ * tb_switch_clx_disable() - Disable CLx on upstream port of specified router
+ * @sw: Router to disable CLx for
+ *
+ * Disables all CL states of the given router. Can be called on any
+ * router and if the states were not enabled already does nothing.
+ *
+ * Returns the CL states that were disabled or negative errno in case of
+ * failure.
+ */
+int tb_switch_clx_disable(struct tb_switch *sw)
+{
+ unsigned int clx = sw->clx;
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_switch_clx_is_supported(sw))
+ return 0;
+
+ if (!clx)
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ ret = tb_port_clx_disable(up, clx);
+ if (ret)
+ return ret;
+
+ ret = tb_port_clx_disable(down, clx);
+ if (ret)
+ return ret;
+
+ sw->clx = 0;
+
+ tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
+ return clx;
+}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 3a213322ec7a..d997a4c545f7 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -409,6 +409,13 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
case TB_CFG_ERROR_DP_BW:
+ case TB_CFG_ERROR_ROP_CMPLT:
+ case TB_CFG_ERROR_POP_CMPLT:
+ case TB_CFG_ERROR_PCIE_WAKE:
+ case TB_CFG_ERROR_DP_CON_CHANGE:
+ case TB_CFG_ERROR_DPTX_DISCOVERY:
+ case TB_CFG_ERROR_LINK_RECOVERY:
+ case TB_CFG_ERROR_ASYM_LINK:
return true;
default:
@@ -758,6 +765,27 @@ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
case TB_CFG_ERROR_DP_BW:
name = "DP_BW";
break;
+ case TB_CFG_ERROR_ROP_CMPLT:
+ name = "router operation completion";
+ break;
+ case TB_CFG_ERROR_POP_CMPLT:
+ name = "port operation completion";
+ break;
+ case TB_CFG_ERROR_PCIE_WAKE:
+ name = "PCIe wake";
+ break;
+ case TB_CFG_ERROR_DP_CON_CHANGE:
+ name = "DP connector change";
+ break;
+ case TB_CFG_ERROR_DPTX_DISCOVERY:
+ name = "DPTX discovery";
+ break;
+ case TB_CFG_ERROR_LINK_RECOVERY:
+ name = "link recovery";
+ break;
+ case TB_CFG_ERROR_ASYM_LINK:
+ name = "asymmetric link";
+ break;
default:
name = "unknown";
break;
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index f92ad71ef983..c9ddd49138d8 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -14,12 +14,15 @@
#include "tb.h"
#include "sb_regs.h"
-#define PORT_CAP_PCIE_LEN 1
+#define PORT_CAP_V1_PCIE_LEN 1
+#define PORT_CAP_V2_PCIE_LEN 2
#define PORT_CAP_POWER_LEN 2
#define PORT_CAP_LANE_LEN 3
#define PORT_CAP_USB3_LEN 5
-#define PORT_CAP_DP_LEN 8
-#define PORT_CAP_TMU_LEN 8
+#define PORT_CAP_DP_V1_LEN 9
+#define PORT_CAP_DP_V2_LEN 14
+#define PORT_CAP_TMU_V1_LEN 8
+#define PORT_CAP_TMU_V2_LEN 10
#define PORT_CAP_BASIC_LEN 9
#define PORT_CAP_USB4_LEN 20
@@ -553,8 +556,9 @@ static int margining_run_write(void *data, u64 val)
struct usb4_port *usb4 = port->usb4;
struct tb_switch *sw = port->sw;
struct tb_margining *margining;
+ struct tb_switch *down_sw;
struct tb *tb = sw->tb;
- int ret;
+ int ret, clx;
if (val != 1)
return -EINVAL;
@@ -566,15 +570,24 @@ static int margining_run_write(void *data, u64 val)
goto out_rpm_put;
}
- /*
- * CL states may interfere with lane margining so inform the user know
- * and bail out.
- */
- if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
- tb_port_warn(port,
- "CL states are enabled, Disable them with clx=0 and re-connect\n");
- ret = -EINVAL;
- goto out_unlock;
+ if (tb_is_upstream_port(port))
+ down_sw = sw;
+ else if (port->remote)
+ down_sw = port->remote->sw;
+ else
+ down_sw = NULL;
+
+ if (down_sw) {
+ /*
+ * CL states may interfere with lane margining so
+ * disable them temporarily now.
+ */
+ ret = tb_switch_clx_disable(down_sw);
+ if (ret < 0) {
+ tb_sw_warn(down_sw, "failed to disable CL states\n");
+ goto out_unlock;
+ }
+ clx = ret;
}
margining = usb4->margining;
@@ -586,7 +599,7 @@ static int margining_run_write(void *data, u64 val)
margining->right_high,
USB4_MARGIN_SW_COUNTER_CLEAR);
if (ret)
- goto out_unlock;
+ goto out_clx;
ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
} else {
@@ -600,6 +613,9 @@ static int margining_run_write(void *data, u64 val)
margining->right_high, margining->results);
}
+out_clx:
+ if (down_sw)
+ tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
@@ -1148,7 +1164,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
break;
case TB_PORT_CAP_TIME1:
- length = PORT_CAP_TMU_LEN;
+ if (usb4_switch_version(port->sw) < 2)
+ length = PORT_CAP_TMU_V1_LEN;
+ else
+ length = PORT_CAP_TMU_V2_LEN;
break;
case TB_PORT_CAP_POWER:
@@ -1157,12 +1176,17 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
case TB_PORT_CAP_ADAP:
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
- length = PORT_CAP_PCIE_LEN;
- } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
- if (usb4_dp_port_bw_mode_supported(port))
- length = PORT_CAP_DP_LEN + 1;
+ if (usb4_switch_version(port->sw) < 2)
+ length = PORT_CAP_V1_PCIE_LEN;
+ else
+ length = PORT_CAP_V2_PCIE_LEN;
+ } else if (tb_port_is_dpin(port)) {
+ if (usb4_switch_version(port->sw) < 2)
+ length = PORT_CAP_DP_V1_LEN;
else
- length = PORT_CAP_DP_LEN;
+ length = PORT_CAP_DP_V2_LEN;
+ } else if (tb_port_is_dpout(port)) {
+ length = PORT_CAP_DP_V1_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
index 14bb6dec6c4b..39476fc48801 100644
--- a/drivers/thunderbolt/dma_test.c
+++ b/drivers/thunderbolt/dma_test.c
@@ -412,6 +412,7 @@ static void speed_get(const struct dma_test *dt, u64 *val)
static int speed_validate(u64 val)
{
switch (val) {
+ case 40:
case 20:
case 10:
case 0:
@@ -489,9 +490,12 @@ static void dma_test_check_errors(struct dma_test *dt, int ret)
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
- } else if (dt->link_width &&
- dt->xd->link_width != dt->link_width) {
- dt->error_code = DMA_TEST_WIDTH_ERROR;
+ } else if (dt->link_width) {
+ const struct tb_xdomain *xd = dt->xd;
+
+ if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
+ (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
+ dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
@@ -756,5 +760,5 @@ module_exit(dma_test_exit);
MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_DESCRIPTION("DMA traffic test driver");
+MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 0f6099c18a94..eb241b270f79 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -605,9 +605,8 @@ static int usb4_drom_parse(struct tb_switch *sw)
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
- "DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
+ "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
- return -EINVAL;
}
return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 86521ebb2579..dbdcad8d73bf 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -644,13 +644,14 @@ static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
return ret;
}
-static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
- u64 route, u8 connection_id, u8 connection_key,
- u8 link, u8 depth, bool boot)
+static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id,
+ u8 connection_key, u8 link, u8 depth, bool boot)
{
+ struct tb_switch *parent_sw = tb_switch_parent(sw);
+
/* Disconnect from parent */
- tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
- /* Re-connect via updated port*/
+ tb_switch_downstream_port(sw)->remote = NULL;
+ /* Re-connect via updated port */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
/* Update with the new addressing information */
@@ -671,10 +672,7 @@ static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
static void remove_switch(struct tb_switch *sw)
{
- struct tb_switch *parent_sw;
-
- parent_sw = tb_to_switch(sw->dev.parent);
- tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ tb_switch_downstream_port(sw)->remote = NULL;
tb_switch_remove(sw);
}
@@ -755,7 +753,6 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (sw) {
u8 phy_port, sw_phy_port;
- parent_sw = tb_to_switch(sw->dev.parent);
sw_phy_port = tb_phy_port_from_link(sw->link);
phy_port = tb_phy_port_from_link(link);
@@ -785,7 +782,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
route = tb_route(sw);
}
- update_switch(parent_sw, sw, route, pkg->connection_id,
+ update_switch(sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
return;
@@ -853,7 +850,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
- sw->link_width = dual_lane ? 2 : 1;
+ sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
+ TB_LINK_WIDTH_SINGLE;
sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
if (add_switch(parent_sw, sw))
@@ -1236,9 +1234,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
if (sw) {
/* Update the switch if it is still in the same place */
if (tb_route(sw) == route && !!sw->authorized == authorized) {
- parent_sw = tb_to_switch(sw->dev.parent);
- update_switch(parent_sw, sw, route, pkg->connection_id,
- 0, 0, 0, boot);
+ update_switch(sw, route, pkg->connection_id, 0, 0, 0,
+ boot);
tb_switch_put(sw);
return;
}
@@ -1276,7 +1273,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
- sw->link_width = dual_lane ? 2 : 1;
+ sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
+ TB_LINK_WIDTH_SINGLE;
sw->rpm = force_rtd3;
if (!sw->rpm)
sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index e58beac44295..4b7bec74e89f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -46,6 +46,10 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
+static bool host_reset = true;
+module_param(host_reset, bool, 0444);
+MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+
static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
@@ -1217,6 +1221,37 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
+static void nhi_reset(struct tb_nhi *nhi)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_CAPS);
+ /* Reset only v2 and later routers */
+ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
+ return;
+
+ if (!host_reset) {
+ dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
+ return;
+ }
+
+ iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
+ msleep(100);
+
+ timeout = ktime_add_ms(ktime_get(), 500);
+ do {
+ val = ioread32(nhi->iobase + REG_RESET);
+ if (!(val & REG_RESET_HRR)) {
+ dev_warn(&nhi->pdev->dev, "host router reset successful\n");
+ return;
+ }
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
+}
+
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
@@ -1317,7 +1352,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
- nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+ nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
@@ -1330,6 +1365,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
+ nhi_reset(nhi);
+
res = nhi_init_msi(nhi);
if (res)
return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
@@ -1480,6 +1517,8 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
@@ -1488,6 +1527,7 @@ static struct pci_device_id nhi_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b0718020c6f5..0f029ce75882 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
+#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
+#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
+#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
+#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 6ba295815477..297a3e440648 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -37,7 +37,7 @@ struct ring_desc {
/* NHI registers in bar 0 */
/*
- * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring tail (set by NHI)
* 10: ring head (index of first non posted descriptor)
@@ -46,7 +46,7 @@ struct ring_desc {
#define REG_TX_RING_BASE 0x00000
/*
- * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring head (index of first not posted descriptor)
* 10: ring tail (set by NHI)
@@ -56,7 +56,7 @@ struct ring_desc {
#define REG_RX_RING_BASE 0x08000
/*
- * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum_ring_flags
* 04: isoch time stamp ?? (write 0)
* ..: unknown
@@ -64,7 +64,7 @@ struct ring_desc {
#define REG_TX_OPTIONS_BASE 0x19800
/*
- * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum ring_flags
* If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
* the corresponding TX hop id.
@@ -77,7 +77,7 @@ struct ring_desc {
/*
* three bitfields: tx, rx, rx overflow
- * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
+ * Every bitfield contains one bit for every hop (REG_CAPS).
* New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
@@ -87,7 +87,7 @@ struct ring_desc {
/*
* two bitfields: rx, tx
- * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
+ * Both bitfields contains one bit for every hop (REG_CAPS). To
* enable/disable interrupts set/clear the corresponding bits.
*/
#define REG_RING_INTERRUPT_BASE 0x38200
@@ -104,12 +104,17 @@ struct ring_desc {
#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS)
/* The last 11 bits contain the number of hops supported by the NHI port. */
-#define REG_HOP_COUNT 0x39640
+#define REG_CAPS 0x39640
+#define REG_CAPS_VERSION_MASK GENMASK(23, 16)
+#define REG_CAPS_VERSION_2 0x40
#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
+#define REG_RESET 0x39898
+#define REG_RESET_HRR BIT(0)
+
#define REG_INMAIL_DATA 0x39900
#define REG_INMAIL_CMD 0x39904
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 3dd5f81bd629..69fb3b0fa34f 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -12,6 +12,10 @@
#include "tb.h"
+#define NVM_MIN_SIZE SZ_32K
+#define NVM_MAX_SIZE SZ_1M
+#define NVM_DATA_DWORDS 16
+
/* Intel specific NVM offsets */
#define INTEL_NVM_DEVID 0x05
#define INTEL_NVM_VERSION 0x08
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 1157b8869bcc..488138a28ae1 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -10,6 +10,7 @@
static void quirk_force_power_link(struct tb_switch *sw)
{
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
+ tb_sw_dbg(sw, "forcing power to link controller\n");
}
static void quirk_dp_credit_allocation(struct tb_switch *sw)
@@ -74,6 +75,14 @@ static const struct tb_quirk tb_quirks[] = {
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
/*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
@@ -105,6 +114,7 @@ void tb_check_quirks(struct tb_switch *sw)
if (q->device && q->device != sw->device)
continue;
+ tb_sw_dbg(sw, "running %ps\n", q->hook);
q->hook(sw);
}
}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 9cc28197dbc4..47becb363ada 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -187,10 +187,34 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret;
}
+static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
+{
+ int i;
+
+ tb_port_dbg(port, "reading NVM authentication status of retimers\n");
+
+ /*
+ * Before doing anything else, read the authentication status.
+ * If the retimer has it set, store it for the new retimer
+ * device instance.
+ */
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
+}
+
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
int i;
+ /*
+ * When USB4 port is online sideband communications are
+ * already up.
+ */
+ if (!usb4_port_device_is_offline(port->usb4))
+ return;
+
+ tb_port_dbg(port, "enabling sideband transactions\n");
+
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_set_inbound_sbtx(port, i);
}
@@ -199,6 +223,16 @@ static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
int i;
+ /*
+ * When USB4 port is offline we need to keep the sideband
+ * communications up to make it possible to communicate with
+ * the connected retimers.
+ */
+ if (usb4_port_device_is_offline(port->usb4))
+ return;
+
+ tb_port_dbg(port, "disabling sideband transactions\n");
+
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
usb4_port_retimer_unset_inbound_sbtx(port, i);
}
@@ -229,6 +263,13 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;
if (val) {
+ /*
+ * When NVM authentication starts the retimer is not
+ * accessible so calling tb_retimer_unset_inbound_sbtx()
+ * will fail and therefore we do not call it. Exception
+ * is when the validation fails or we only write the new
+ * NVM image without authentication.
+ */
tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
@@ -249,7 +290,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}
exit_unlock:
- tb_retimer_unset_inbound_sbtx(rt->port);
+ if (ret || val == WRITE_ONLY)
+ tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
@@ -341,12 +383,6 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
return ret;
}
- if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
- tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
- vendor);
- return -EOPNOTSUPP;
- }
-
/*
* Check that it supports NVM operations. If not then don't add
* the device at all.
@@ -455,18 +491,16 @@ int tb_retimer_scan(struct tb_port *port, bool add)
return ret;
/*
- * Enable sideband channel for each retimer. We can do this
- * regardless whether there is device connected or not.
+ * Immediately after sending enumerate retimers read the
+ * authentication status of each retimer.
*/
- tb_retimer_set_inbound_sbtx(port);
+ tb_retimer_nvm_authenticate_status(port, status);
/*
- * Before doing anything else, read the authentication status.
- * If the retimer has it set, store it for the new retimer
- * device instance.
+ * Enable sideband channel for each retimer. We can do this
+ * regardless whether there is device connected or not.
*/
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
- usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
+ tb_retimer_set_inbound_sbtx(port);
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 51e86b5171c7..7ea63bb31714 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -26,10 +26,6 @@ struct nvm_auth_status {
u32 status;
};
-static bool clx_enabled = true;
-module_param_named(clx, clx_enabled, bool, 0444);
-MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
-
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
@@ -727,7 +723,7 @@ static int tb_init_port(struct tb_port *port)
* can be read from the path config space. Legacy
* devices we use hard-coded value.
*/
- if (tb_switch_is_usb4(port->sw)) {
+ if (port->cap_usb4) {
struct tb_regs_hop hop;
if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
@@ -907,15 +903,23 @@ int tb_port_get_link_speed(struct tb_port *port)
speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
- return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+
+ switch (speed) {
+ case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
+ return 40;
+ case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
+ return 20;
+ default:
+ return 10;
+ }
}
/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
- * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
- * or negative errno in case of failure.
+ * Returns link width. Return the link width as encoded in &enum
+ * tb_link_width or negative errno in case of failure.
*/
int tb_port_get_link_width(struct tb_port *port)
{
@@ -930,11 +934,13 @@ int tb_port_get_link_width(struct tb_port *port)
if (ret)
return ret;
+ /* Matches the values in enum tb_link_width */
return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
}
-static bool tb_port_is_width_supported(struct tb_port *port, int width)
+static bool tb_port_is_width_supported(struct tb_port *port,
+ unsigned int width_mask)
{
u32 phy, widths;
int ret;
@@ -950,20 +956,25 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
- return !!(widths & width);
+ return widths & width_mask;
+}
+
+static bool is_gen4_link(struct tb_port *port)
+{
+ return tb_port_get_link_speed(port) > 20;
}
/**
* tb_port_set_link_width() - Set target link width of the lane adapter
* @port: Lane adapter
- * @width: Target link width (%1 or %2)
+ * @width: Target link width
*
* Sets the target link width of the lane adapter to @width. Does not
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
{
u32 val;
int ret;
@@ -978,11 +989,14 @@ int tb_port_set_link_width(struct tb_port *port, unsigned int width)
val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
switch (width) {
- case 1:
+ case TB_LINK_WIDTH_SINGLE:
+ /* Gen 4 link cannot be single */
+ if (is_gen4_link(port))
+ return -EOPNOTSUPP;
val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
- case 2:
+ case TB_LINK_WIDTH_DUAL:
val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
@@ -1004,12 +1018,9 @@ int tb_port_set_link_width(struct tb_port *port, unsigned int width)
* cases one should use tb_port_lane_bonding_enable() instead to enable
* lane bonding.
*
- * As a side effect sets @port->bonding accordingly (and does the same
- * for lane 1 too).
- *
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
+static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
{
u32 val;
int ret;
@@ -1027,19 +1038,8 @@ int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
else
val &= ~LANE_ADP_CS_1_LB;
- ret = tb_port_write(port, &val, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- /*
- * When lane 0 bonding is set it will affect lane 1 too so
- * update both.
- */
- port->bonded = bonding;
- port->dual_link_port->bonded = bonding;
-
- return 0;
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
@@ -1056,36 +1056,52 @@ int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
*/
int tb_port_lane_bonding_enable(struct tb_port *port)
{
+ enum tb_link_width width;
int ret;
/*
* Enable lane bonding for both links if not already enabled by
* for example the boot firmware.
*/
- ret = tb_port_get_link_width(port);
- if (ret == 1) {
- ret = tb_port_set_link_width(port, 2);
+ width = tb_port_get_link_width(port);
+ if (width == TB_LINK_WIDTH_SINGLE) {
+ ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
- ret = tb_port_get_link_width(port->dual_link_port);
- if (ret == 1) {
- ret = tb_port_set_link_width(port->dual_link_port, 2);
+ width = tb_port_get_link_width(port->dual_link_port);
+ if (width == TB_LINK_WIDTH_SINGLE) {
+ ret = tb_port_set_link_width(port->dual_link_port,
+ TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
- ret = tb_port_set_lane_bonding(port, true);
- if (ret)
- goto err_lane1;
+ /*
+ * Only set bonding if the link was not already bonded. This
+ * avoids the lane adapter to re-enter bonding state.
+ */
+ if (width == TB_LINK_WIDTH_SINGLE) {
+ ret = tb_port_set_lane_bonding(port, true);
+ if (ret)
+ goto err_lane1;
+ }
+
+ /*
+ * When lane 0 bonding is set it will affect lane 1 too so
+ * update both.
+ */
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
return 0;
err_lane1:
- tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
err_lane0:
- tb_port_set_link_width(port, 1);
+ tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
+
return ret;
}
@@ -1099,27 +1115,34 @@ err_lane0:
void tb_port_lane_bonding_disable(struct tb_port *port)
{
tb_port_set_lane_bonding(port, false);
- tb_port_set_link_width(port->dual_link_port, 1);
- tb_port_set_link_width(port, 1);
+ tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
+ tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
}
/**
* tb_port_wait_for_link_width() - Wait until link reaches specific width
* @port: Port to wait for
- * @width: Expected link width (%1 or %2)
+ * @width_mask: Expected link width mask
* @timeout_msec: Timeout in ms how long to wait
*
* Should be used after both ends of the link have been bonded (or
* bonding has been disabled) to wait until the link actually reaches
- * the expected state. Returns %-ETIMEDOUT if the @width was not reached
- * within the given timeout, %0 if it did.
+ * the expected state. Returns %-ETIMEDOUT if the width was not reached
+ * within the given timeout, %0 if it did. Can be passed a mask of
+ * expected widths and succeeds if any of the widths is reached.
*/
-int tb_port_wait_for_link_width(struct tb_port *port, int width,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
int ret;
+ /* Gen 4 link does not support single lane */
+ if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
+ return -EOPNOTSUPP;
+
do {
ret = tb_port_get_link_width(port);
if (ret < 0) {
@@ -1130,7 +1153,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
*/
if (ret != -EACCES)
return ret;
- } else if (ret == width) {
+ } else if (ret & width_mask) {
return 0;
}
@@ -1183,135 +1206,6 @@ int tb_port_update_credits(struct tb_port *port)
return tb_port_do_update_credits(port->dual_link_port);
}
-static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
-{
- u32 phy;
- int ret;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (secondary)
- phy |= LANE_ADP_CS_1_PMS;
- else
- phy &= ~LANE_ADP_CS_1_PMS;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_pm_secondary_enable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, true);
-}
-
-static int tb_port_pm_secondary_disable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, false);
-}
-
-/* Called for USB4 or Titan Ridge routers only */
-static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
-{
- u32 val, mask = 0;
- bool ret;
-
- /* Don't enable CLx in case of two single-lane links */
- if (!port->bonded && port->dual_link_port)
- return false;
-
- /* Don't enable CLx in case of inter-domain link */
- if (port->xdomain)
- return false;
-
- if (tb_switch_is_usb4(port->sw)) {
- if (!usb4_port_clx_supported(port))
- return false;
- } else if (!tb_lc_is_clx_supported(port)) {
- return false;
- }
-
- if (clx_mask & TB_CL1) {
- /* CL0s and CL1 are enabled and supported together */
- mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
- }
- if (clx_mask & TB_CL2)
- mask |= LANE_ADP_CS_0_CL2_SUPPORT;
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_0, 1);
- if (ret)
- return false;
-
- return !!(val & mask);
-}
-
-static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
-{
- u32 phy, mask;
- int ret;
-
- /* CL0s and CL1 are enabled and supported together */
- if (clx == TB_CL1)
- mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
- else
- /* For now we support only CL0s and CL1. Not CL2 */
- return -EOPNOTSUPP;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (enable)
- phy |= mask;
- else
- phy &= ~mask;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, false);
-}
-
-static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, true);
-}
-
-/**
- * tb_port_is_clx_enabled() - Is given CL state enabled
- * @port: USB4 port to check
- * @clx_mask: Mask of CL states to check
- *
- * Returns true if any of the given CL states is enabled for @port.
- */
-bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
-{
- u32 val, mask = 0;
- int ret;
-
- if (!tb_port_clx_supported(port, clx_mask))
- return false;
-
- if (clx_mask & TB_CL1)
- mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
- if (clx_mask & TB_CL2)
- mask |= LANE_ADP_CS_1_CL2_ENABLE;
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return false;
-
- return !!(val & mask);
-}
-
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@@ -1911,20 +1805,57 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
-static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
+ unsigned int width;
+
+ switch (sw->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
- return sysfs_emit(buf, "%u\n", sw->link_width);
+ return sysfs_emit(buf, "%u\n", width);
}
+static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
-/*
- * Currently link has same amount of lanes both directions (1 or 2) but
- * expose them separately to allow possible asymmetric links in the future.
- */
-static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
-static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ unsigned int width;
+
+ switch (sw->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%u\n", width);
+}
+static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -2189,8 +2120,9 @@ static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *en
const struct tb_switch *sw = tb_to_switch(dev);
const char *type;
- if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
- if (add_uevent_var(env, "USB4_VERSION=1.0"))
+ if (tb_switch_is_usb4(sw)) {
+ if (add_uevent_var(env, "USB4_VERSION=%u.0",
+ usb4_switch_version(sw)))
return -ENOMEM;
}
@@ -2498,9 +2430,13 @@ int tb_switch_configure(struct tb_switch *sw)
/*
* For USB4 devices, we need to program the CM version
* accordingly so that it knows to expose all the
- * additional capabilities.
+ * additional capabilities. Program it according to USB4
+ * version to avoid changing existing (v1) routers behaviour.
*/
- sw->config.cmuv = USB4_VERSION_1_0;
+ if (usb4_switch_version(sw) < 2)
+ sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
+ else
+ sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
@@ -2530,6 +2466,22 @@ int tb_switch_configure(struct tb_switch *sw)
return tb_plug_events_active(sw, true);
}
+/**
+ * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
+ * @sw: Router to configure
+ *
+ * Needs to be called before any tunnels can be setup through the
+ * router. Can be called to any router.
+ *
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_configuration_valid(struct tb_switch *sw)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_configuration_valid(sw);
+ return 0;
+}
+
static int tb_switch_set_uuid(struct tb_switch *sw)
{
bool uid = false;
@@ -2754,9 +2706,9 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
*/
int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_to_switch(sw->dev.parent);
struct tb_port *up, *down;
u64 route = tb_route(sw);
+ unsigned int width_mask;
int ret;
if (!route)
@@ -2766,10 +2718,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
return 0;
up = tb_upstream_port(sw);
- down = tb_port_at(route, parent);
+ down = tb_switch_downstream_port(sw);
- if (!tb_port_is_width_supported(up, 2) ||
- !tb_port_is_width_supported(down, 2))
+ if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
+ !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
ret = tb_port_lane_bonding_enable(up);
@@ -2785,7 +2737,11 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
return ret;
}
- ret = tb_port_wait_for_link_width(down, 2, 100);
+ /* Any of the widths are all bonded */
+ width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+ TB_LINK_WIDTH_ASYM_RX;
+
+ ret = tb_port_wait_for_link_width(down, width_mask, 100);
if (ret) {
tb_port_warn(down, "timeout enabling lane bonding\n");
return ret;
@@ -2808,8 +2764,8 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
*/
void tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_to_switch(sw->dev.parent);
struct tb_port *up, *down;
+ int ret;
if (!tb_route(sw))
return;
@@ -2818,7 +2774,7 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
if (!up->bonded)
return;
- down = tb_port_at(tb_route(sw), parent);
+ down = tb_switch_downstream_port(sw);
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
@@ -2827,7 +2783,8 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
* It is fine if we get other errors as the router might have
* been unplugged.
*/
- if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
+ ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
+ if (ret == -ETIMEDOUT)
tb_sw_warn(sw, "timeout disabling lane bonding\n");
tb_port_update_credits(down);
@@ -2994,6 +2951,10 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
+ ret = tb_switch_clx_init(sw);
+ if (ret)
+ return ret;
+
ret = tb_switch_tmu_init(sw);
if (ret)
return ret;
@@ -3246,13 +3207,8 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
/*
* Actually only needed for Titan Ridge but for simplicity can be
* done for USB4 device too as CLx is re-enabled at resume.
- * CL0s and CL1 are enabled and supported together.
*/
- if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
- if (tb_switch_disable_clx(sw, TB_CL1))
- tb_sw_warn(sw, "failed to disable %s on upstream port\n",
- tb_switch_clx_name(TB_CL1));
- }
+ tb_switch_clx_disable(sw);
err = tb_plug_events_active(sw, false);
if (err)
@@ -3474,234 +3430,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
-static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
-{
- struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
- int ret;
-
- if (!tb_route(sw))
- return 0;
-
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
- ret = tb_port_pm_secondary_enable(up);
- if (ret)
- return ret;
-
- return tb_port_pm_secondary_disable(down);
-}
-
-static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
-{
- struct tb_switch *parent = tb_switch_parent(sw);
- bool up_clx_support, down_clx_support;
- struct tb_port *up, *down;
- int ret;
-
- if (!tb_switch_is_clx_supported(sw))
- return 0;
-
- /*
- * Enable CLx for host router's downstream port as part of the
- * downstream router enabling procedure.
- */
- if (!tb_route(sw))
- return 0;
-
- /* Enable CLx only for first hop router (depth = 1) */
- if (tb_route(parent))
- return 0;
-
- ret = tb_switch_pm_secondary_resolve(sw);
- if (ret)
- return ret;
-
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
-
- up_clx_support = tb_port_clx_supported(up, clx);
- down_clx_support = tb_port_clx_supported(down, clx);
-
- tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
- up_clx_support ? "" : "not ");
- tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
- down_clx_support ? "" : "not ");
-
- if (!up_clx_support || !down_clx_support)
- return -EOPNOTSUPP;
-
- ret = tb_port_clx_enable(up, clx);
- if (ret)
- return ret;
-
- ret = tb_port_clx_enable(down, clx);
- if (ret) {
- tb_port_clx_disable(up, clx);
- return ret;
- }
-
- ret = tb_switch_mask_clx_objections(sw);
- if (ret) {
- tb_port_clx_disable(up, clx);
- tb_port_clx_disable(down, clx);
- return ret;
- }
-
- sw->clx = clx;
-
- tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
- return 0;
-}
-
-/**
- * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
- * @sw: Router to enable CLx for
- * @clx: The CLx state to enable
- *
- * Enable CLx state only for first hop router. That is the most common
- * use-case, that is intended for better thermal management, and so helps
- * to improve performance. CLx is enabled only if both sides of the link
- * support CLx, and if both sides of the link are not configured as two
- * single lane links and only if the link is not inter-domain link. The
- * complete set of conditions is described in CM Guide 1.0 section 8.1.
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
-{
- struct tb_switch *root_sw = sw->tb->root_switch;
-
- if (!clx_enabled)
- return 0;
-
- /*
- * CLx is not enabled and validated on Intel USB4 platforms before
- * Alder Lake.
- */
- if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
- return 0;
-
- switch (clx) {
- case TB_CL1:
- /* CL0s and CL1 are enabled and supported together */
- return __tb_switch_enable_clx(sw, clx);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
-{
- struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
- int ret;
-
- if (!tb_switch_is_clx_supported(sw))
- return 0;
-
- /*
- * Disable CLx for host router's downstream port as part of the
- * downstream router enabling procedure.
- */
- if (!tb_route(sw))
- return 0;
-
- /* Disable CLx only for first hop router (depth = 1) */
- if (tb_route(parent))
- return 0;
-
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
- ret = tb_port_clx_disable(up, clx);
- if (ret)
- return ret;
-
- ret = tb_port_clx_disable(down, clx);
- if (ret)
- return ret;
-
- sw->clx = TB_CLX_DISABLE;
-
- tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
- return 0;
-}
-
-/**
- * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
- * @sw: Router to disable CLx for
- * @clx: The CLx state to disable
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
-{
- if (!clx_enabled)
- return 0;
-
- switch (clx) {
- case TB_CL1:
- /* CL0s and CL1 are enabled and supported together */
- return __tb_switch_disable_clx(sw, clx);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * tb_switch_mask_clx_objections() - Mask CLx objections for a router
- * @sw: Router to mask objections for
- *
- * Mask the objections coming from the second depth routers in order to
- * stop these objections from interfering with the CLx states of the first
- * depth link.
- */
-int tb_switch_mask_clx_objections(struct tb_switch *sw)
-{
- int up_port = sw->config.upstream_port_number;
- u32 offset, val[2], mask_obj, unmask_obj;
- int ret, i;
-
- /* Only Titan Ridge of pre-USB4 devices support CLx states */
- if (!tb_switch_is_titan_ridge(sw))
- return 0;
-
- if (!tb_route(sw))
- return 0;
-
- /*
- * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
- * Port A consists of lane adapters 1,2 and
- * Port B consists of lane adapters 3,4
- * If upstream port is A, (lanes are 1,2), we mask objections from
- * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
- */
- if (up_port == 1) {
- mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
- unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
- offset = TB_LOW_PWR_C1_CL1;
- } else {
- mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
- unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
- offset = TB_LOW_PWR_C3_CL1;
- }
-
- ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
- sw->cap_lp + offset, ARRAY_SIZE(val));
- if (ret)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(val); i++) {
- val[i] |= mask_obj;
- val[i] &= ~unmask_obj;
- }
-
- return tb_sw_write(sw, &val, TB_CFG_SWITCH,
- sw->cap_lp + offset, ARRAY_SIZE(val));
-}
-
/*
* Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
* device. For now used only for Titan Ridge.
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index c1af712ca728..62b26b7998fd 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -131,7 +131,7 @@ tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
- if (usb4_dp_port_bw_mode_enabled(in)) {
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
int index, i;
index = usb4_dp_port_group_id(in);
@@ -240,6 +240,147 @@ static void tb_discover_dp_resources(struct tb *tb)
}
}
+/* Enables CL states up to host router */
+static int tb_enable_clx(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ unsigned int clx = TB_CL0S | TB_CL1;
+ const struct tb_tunnel *tunnel;
+ int ret;
+
+ /*
+ * Currently only enable CLx for the first link. This is enough
+ * to allow the CPU to save energy at least on Intel hardware
+ * and makes it slightly simpler to implement. We may change
+ * this in the future to cover the whole topology if it turns
+ * out to be beneficial.
+ */
+ while (sw && sw->config.depth > 1)
+ sw = tb_switch_parent(sw);
+
+ if (!sw)
+ return 0;
+
+ if (sw->config.depth != 1)
+ return 0;
+
+ /*
+ * If we are re-enabling then check if there is an active DMA
+ * tunnel and in that case bail out.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dma(tunnel)) {
+ if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
+ return 0;
+ }
+ }
+
+ /*
+ * Initially try with CL2. If that's not supported by the
+ * topology try with CL0s and CL1 and then give up.
+ */
+ ret = tb_switch_clx_enable(sw, clx | TB_CL2);
+ if (ret == -EOPNOTSUPP)
+ ret = tb_switch_clx_enable(sw, clx);
+ return ret == -EOPNOTSUPP ? 0 : ret;
+}
+
+/* Disables CL states up to the host router */
+static void tb_disable_clx(struct tb_switch *sw)
+{
+ do {
+ if (tb_switch_clx_disable(sw) < 0)
+ tb_sw_warn(sw, "failed to disable CL states\n");
+ sw = tb_switch_parent(sw);
+ } while (sw);
+}
+
+static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
+{
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(dev);
+ if (!sw)
+ return 0;
+
+ if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
+ enum tb_switch_tmu_mode mode;
+ int ret;
+
+ if (tb_switch_clx_is_enabled(sw, TB_CL1))
+ mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
+ else
+ mode = TB_SWITCH_TMU_MODE_HIFI_BI;
+
+ ret = tb_switch_tmu_configure(sw, mode);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+ }
+
+ return 0;
+}
+
+static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
+{
+ struct tb_switch *sw;
+
+ if (!tunnel)
+ return;
+
+ /*
+ * Once first DP tunnel is established we change the TMU
+ * accuracy of first depth child routers (and the host router)
+ * to the highest. This is needed for the DP tunneling to work
+ * but also allows CL0s.
+ *
+ * If both routers are v2 then we don't need to do anything as
+ * they are using enhanced TMU mode that allows all CLx.
+ */
+ sw = tunnel->tb->root_switch;
+ device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
+}
+
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+ int ret;
+
+ /*
+ * If both routers at the end of the link are v2 we simply
+ * enable the enhanched uni-directional mode. That covers all
+ * the CL states. For v1 and before we need to use the normal
+ * rate to allow CL1 (when supported). Otherwise we keep the TMU
+ * running at the highest accuracy.
+ */
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
+ if (ret == -EOPNOTSUPP) {
+ if (tb_switch_clx_is_enabled(sw, TB_CL1))
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_LOWRES);
+ else
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_HIFI_BI);
+ }
+ if (ret)
+ return ret;
+
+ /* If it is already enabled in correct mode, don't touch it */
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_disable(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_post_time(sw);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+}
+
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
@@ -253,13 +394,7 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
- /*
- * In case of DP tunnel exists, change host router's
- * 1st children TMU mode to HiFi for CL0s to work.
- */
- if (tunnel)
- tb_switch_enable_tmu_1st_child(tb->root_switch,
- TB_SWITCH_TMU_RATE_HIFI);
+ tb_increase_tmu_accuracy(tunnel);
break;
case TB_TYPE_PCIE_DOWN:
@@ -357,25 +492,6 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
-static int tb_enable_tmu(struct tb_switch *sw)
-{
- int ret;
-
- /* If it is already enabled in correct mode, don't touch it */
- if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
- return 0;
-
- ret = tb_switch_tmu_disable(sw);
- if (ret)
- return ret;
-
- ret = tb_switch_tmu_post_time(sw);
- if (ret)
- return ret;
-
- return tb_switch_tmu_enable(sw);
-}
-
/**
* tb_find_unused_port() - return the first inactive port on @sw
* @sw: Switch to find the port on
@@ -480,7 +596,8 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
usb3_consumed_down = 0;
}
- *available_up = *available_down = 40000;
+ /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
+ *available_up = *available_down = 120000;
/* Find the minimum available bandwidth over all links */
tb_for_each_port_on_path(src_port, dst_port, port) {
@@ -491,18 +608,45 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
if (tb_is_upstream_port(port)) {
link_speed = port->sw->link_speed;
+ /*
+ * sw->link_width is from upstream perspective
+ * so we use the opposite for downstream of the
+ * host router.
+ */
+ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else {
+ up_bw = link_speed * port->sw->link_width * 1000;
+ down_bw = up_bw;
+ }
} else {
link_speed = tb_port_get_link_speed(port);
if (link_speed < 0)
return link_speed;
- }
- link_width = port->bonded ? 2 : 1;
+ link_width = tb_port_get_link_width(port);
+ if (link_width < 0)
+ return link_width;
+
+ if (link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else {
+ up_bw = link_speed * link_width * 1000;
+ down_bw = up_bw;
+ }
+ }
- up_bw = link_speed * link_width * 1000; /* Mb/s */
/* Leave 10% guard band */
up_bw -= up_bw / 10;
- down_bw = up_bw;
+ down_bw -= down_bw / 10;
tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
down_bw);
@@ -628,7 +772,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
- port = tb_port_at(tb_route(sw), parent);
+ port = tb_switch_downstream_port(sw);
down = tb_find_usb3_down(parent, port);
if (!down)
return 0;
@@ -739,7 +883,6 @@ static void tb_scan_port(struct tb_port *port)
struct tb_port *upstream_port;
bool discovery = false;
struct tb_switch *sw;
- int ret;
if (tb_is_upstream_port(port))
return;
@@ -838,28 +981,20 @@ static void tb_scan_port(struct tb_port *port)
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
- if (discovery) {
+ if (discovery)
tb_sw_dbg(sw, "discovery, not touching CL states\n");
- } else {
- ret = tb_switch_enable_clx(sw, TB_CL1);
- if (ret && ret != -EOPNOTSUPP)
- tb_sw_warn(sw, "failed to enable %s on upstream port\n",
- tb_switch_clx_name(TB_CL1));
- }
-
- if (tb_switch_is_clx_enabled(sw, TB_CL1))
- /*
- * To support highest CLx state, we set router's TMU to
- * Normal-Uni mode.
- */
- tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
- else
- /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
- tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
+ else if (tb_enable_clx(sw))
+ tb_sw_warn(sw, "failed to enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
+ /*
+ * Configuration valid needs to be set after the TMU has been
+ * enabled for the upstream port of the router so we do it here.
+ */
+ tb_switch_configuration_valid(sw);
+
/* Scan upstream retimers */
tb_retimer_scan(upstream_port, true);
@@ -1034,7 +1169,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
struct tb_tunnel *tunnel;
struct tb_port *out;
- if (!usb4_dp_port_bw_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
@@ -1082,7 +1217,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
else
estimated_bw = estimated_up;
- if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
+ if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
tb_port_warn(in, "failed to update estimated bandwidth\n");
}
@@ -1263,8 +1398,7 @@ static void tb_tunnel_dp(struct tb *tb)
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
*/
- tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
-
+ tb_increase_tmu_accuracy(tunnel);
return;
err_free:
@@ -1378,7 +1512,6 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_port *up, *down, *port;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_switch *parent_sw;
struct tb_tunnel *tunnel;
up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
@@ -1389,9 +1522,8 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
- parent_sw = tb_to_switch(sw->dev.parent);
- port = tb_port_at(tb_route(sw), parent_sw);
- down = tb_find_pcie_down(parent_sw, port);
+ port = tb_switch_downstream_port(sw);
+ down = tb_find_pcie_down(tb_switch_parent(sw), port);
if (!down)
return 0;
@@ -1428,30 +1560,45 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel;
struct tb_switch *sw;
+ int ret;
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
+
+ /*
+ * When tunneling DMA paths the link should not enter CL states
+ * so disable them now.
+ */
+ tb_disable_clx(sw);
+
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
transmit_ring, receive_path, receive_ring);
if (!tunnel) {
- mutex_unlock(&tb->lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_clx;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(nhi_port,
"DMA tunnel activation failed, aborting\n");
- tb_tunnel_free(tunnel);
- mutex_unlock(&tb->lock);
- return -EIO;
+ ret = -EIO;
+ goto err_free;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
mutex_unlock(&tb->lock);
return 0;
+
+err_free:
+ tb_tunnel_free(tunnel);
+err_clx:
+ tb_enable_clx(sw);
+ mutex_unlock(&tb->lock);
+
+ return ret;
}
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
@@ -1477,6 +1624,13 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
receive_path, receive_ring))
tb_deactivate_and_free_tunnel(tunnel);
}
+
+ /*
+ * Try to re-enable CL states now, it is OK if this fails
+ * because we may still have another DMA tunnel active through
+ * the same host router USB4 downstream port.
+ */
+ tb_enable_clx(sw);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
@@ -1758,12 +1912,12 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
- if (!usb4_dp_port_bw_mode_enabled(in)) {
+ if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
goto unlock;
}
- ret = usb4_dp_port_requested_bw(in);
+ ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
if (ret == -ENODATA)
tb_port_dbg(in, "no bandwidth request active\n");
@@ -1830,17 +1984,26 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
static void tb_handle_notification(struct tb *tb, u64 route,
const struct cfg_error_pkg *error)
{
- if (tb_cfg_ack_notification(tb->ctl, route, error))
- tb_warn(tb, "could not ack notification on %llx\n", route);
switch (error->error) {
+ case TB_CFG_ERROR_PCIE_WAKE:
+ case TB_CFG_ERROR_DP_CON_CHANGE:
+ case TB_CFG_ERROR_DPTX_DISCOVERY:
+ if (tb_cfg_ack_notification(tb->ctl, route, error))
+ tb_warn(tb, "could not ack notification on %llx\n",
+ route);
+ break;
+
case TB_CFG_ERROR_DP_BW:
+ if (tb_cfg_ack_notification(tb->ctl, route, error))
+ tb_warn(tb, "could not ack notification on %llx\n",
+ route);
tb_queue_dp_bandwidth_request(tb, route, error->port);
break;
default:
- /* Ack is enough */
- return;
+ /* Ignore for now */
+ break;
}
}
@@ -1955,8 +2118,7 @@ static int tb_start(struct tb *tb)
* To support highest CLx state, we set host router's TMU to
* Normal mode.
*/
- tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
- false);
+ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
@@ -1997,34 +2159,19 @@ static int tb_suspend_noirq(struct tb *tb)
static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
- int ret;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
- /*
- * CL0s and CL1 are enabled and supported together.
- * Silently ignore CLx re-enabling in case CLx is not supported.
- */
- ret = tb_switch_enable_clx(sw, TB_CL1);
- if (ret && ret != -EOPNOTSUPP)
- tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
- tb_switch_clx_name(TB_CL1));
-
- if (tb_switch_is_clx_enabled(sw, TB_CL1))
- /*
- * To support highest CLx state, we set router's TMU to
- * Normal-Uni mode.
- */
- tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
- else
- /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
- tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
+ if (tb_enable_clx(sw))
+ tb_sw_warn(sw, "failed to re-enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
+ tb_switch_configuration_valid(sw);
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 275ff5219a3a..57a9b272cb94 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -19,10 +19,6 @@
#include "ctl.h"
#include "dma_port.h"
-#define NVM_MIN_SIZE SZ_32K
-#define NVM_MAX_SIZE SZ_512K
-#define NVM_DATA_DWORDS 16
-
/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
@@ -77,51 +73,37 @@ enum tb_nvm_write_ops {
#define USB4_SWITCH_MAX_DEPTH 5
/**
- * enum tb_switch_tmu_rate - TMU refresh rate
- * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
- * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
- * transmission of the Delay Request TSNOS
- * (Time Sync Notification Ordered Set) on a Link
- * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
- * transmission of the Delay Request TSNOS on
- * a Link
+ * enum tb_switch_tmu_mode - TMU mode
+ * @TB_SWITCH_TMU_MODE_OFF: TMU is off
+ * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode
+ * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode
+ * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode
+ * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode
+ *
+ * Ordering is based on TMU accuracy level (highest last).
*/
-enum tb_switch_tmu_rate {
- TB_SWITCH_TMU_RATE_OFF = 0,
- TB_SWITCH_TMU_RATE_HIFI = 16,
- TB_SWITCH_TMU_RATE_NORMAL = 1000,
+enum tb_switch_tmu_mode {
+ TB_SWITCH_TMU_MODE_OFF,
+ TB_SWITCH_TMU_MODE_LOWRES,
+ TB_SWITCH_TMU_MODE_HIFI_UNI,
+ TB_SWITCH_TMU_MODE_HIFI_BI,
+ TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI,
};
/**
- * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * struct tb_switch_tmu - Structure holding router TMU configuration
* @cap: Offset to the TMU capability (%0 if not found)
* @has_ucap: Does the switch support uni-directional mode
- * @rate: TMU refresh rate related to upstream switch. In case of root
- * switch this holds the domain rate. Reflects the HW setting.
- * @unidirectional: Is the TMU in uni-directional or bi-directional mode
- * related to upstream switch. Don't care for root switch.
- * Reflects the HW setting.
- * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
- * that is requested to be set. Related to upstream switch.
- * Don't care for root switch.
- * @rate_request: TMU new refresh rate related to upstream switch that is
- * requested to be set. In case of root switch, this holds
- * the new domain rate that is requested to be set.
+ * @mode: TMU mode related to the upstream router. Reflects the HW
+ * setting. Don't care for host router.
+ * @mode_request: TMU mode requested to set. Related to upstream router.
+ * Don't care for host router.
*/
struct tb_switch_tmu {
int cap;
bool has_ucap;
- enum tb_switch_tmu_rate rate;
- bool unidirectional;
- bool unidirectional_request;
- enum tb_switch_tmu_rate rate_request;
-};
-
-enum tb_clx {
- TB_CLX_DISABLE,
- /* CL0s and CL1 are enabled and supported together */
- TB_CL1 = BIT(0),
- TB_CL2 = BIT(1),
+ enum tb_switch_tmu_mode mode;
+ enum tb_switch_tmu_mode mode_request;
};
/**
@@ -142,7 +124,7 @@ enum tb_clx {
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
- * @link_width: Width of the link (1 or 2)
+ * @link_width: Width of the upstream facing link
* @link_usb4: Upstream link is USB4
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
@@ -174,12 +156,17 @@ enum tb_clx {
* @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
- * @clx: CLx state on the upstream link of the router
+ * @clx: CLx states on the upstream link of the router
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
*
* In USB4 terminology this structure represents a router.
+ *
+ * Note @link_width is not the same as whether link is bonded or not.
+ * For Gen 4 links the link is also bonded when it is asymmetric. The
+ * correct way to find out whether the link is bonded or not is to look
+ * @bonded field of the upstream port.
*/
struct tb_switch {
struct device dev;
@@ -195,7 +182,7 @@ struct tb_switch {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
- unsigned int link_width;
+ enum tb_link_width link_width;
bool link_usb4;
unsigned int generation;
int cap_plug_events;
@@ -225,7 +212,7 @@ struct tb_switch {
unsigned int min_dp_main_credits;
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
- enum tb_clx clx;
+ unsigned int clx;
};
/**
@@ -455,6 +442,11 @@ struct tb_path {
#define TB_WAKE_ON_PCIE BIT(4)
#define TB_WAKE_ON_DP BIT(5)
+/* CL states */
+#define TB_CL0S BIT(0)
+#define TB_CL1 BIT(1)
+#define TB_CL2 BIT(2)
+
/**
* struct tb_cm_ops - Connection manager specific operations vector
* @driver_ready: Called right after control channel is started. Used by
@@ -802,6 +794,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
struct device *parent, u64 route);
int tb_switch_configure(struct tb_switch *sw);
+int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
@@ -857,6 +850,20 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
+/**
+ * tb_switch_downstream_port() - Return downstream facing port of parent router
+ * @sw: Device router pointer
+ *
+ * Only call for device routers. Returns the downstream facing port of
+ * the parent router.
+ */
+static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
+{
+ if (WARN_ON(!tb_route(sw)))
+ return NULL;
+ return tb_port_at(tb_route(sw), tb_switch_parent(sw));
+}
+
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
@@ -936,17 +943,6 @@ static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
}
/**
- * tb_switch_is_usb4() - Is the switch USB4 compliant
- * @sw: Switch to check
- *
- * Returns true if the @sw is USB4 compliant router, false otherwise.
- */
-static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
-{
- return sw->config.thunderbolt_version == USB4_VERSION_1_0;
-}
-
-/**
* tb_switch_is_icm() - Is the switch handled by ICM firmware
* @sw: Switch to check
*
@@ -973,68 +969,58 @@ int tb_switch_tmu_init(struct tb_switch *sw);
int tb_switch_tmu_post_time(struct tb_switch *sw);
int tb_switch_tmu_disable(struct tb_switch *sw);
int tb_switch_tmu_enable(struct tb_switch *sw);
-void tb_switch_tmu_configure(struct tb_switch *sw,
- enum tb_switch_tmu_rate rate,
- bool unidirectional);
-void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
- enum tb_switch_tmu_rate rate);
+int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode);
+
/**
- * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
- * @sw: Router whose TMU mode to check
- * @unidirectional: If uni-directional (bi-directional otherwise)
+ * tb_switch_tmu_is_configured() - Is given TMU mode configured
+ * @sw: Router whose mode to check
+ * @mode: Mode to check
*
- * Return true if hardware TMU configuration matches the one passed in
- * as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
+ * Checks if given router TMU mode is configured to @mode. Note the
+ * router TMU might not be enabled to this mode.
*/
-static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
- bool unidirectional)
+static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
+ enum tb_switch_tmu_mode mode)
{
- return sw->tmu.rate == sw->tmu.rate_request &&
- sw->tmu.unidirectional == unidirectional;
+ return sw->tmu.mode_request == mode;
}
-static inline const char *tb_switch_clx_name(enum tb_clx clx)
+/**
+ * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
+ * @sw: Router whose TMU mode to check
+ *
+ * Return true if hardware TMU configuration matches the requested
+ * configuration (and is not %TB_SWITCH_TMU_MODE_OFF).
+ */
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
{
- switch (clx) {
- /* CL0s and CL1 are enabled and supported together */
- case TB_CL1:
- return "CL0s/CL1";
- default:
- return "unknown";
- }
+ return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF &&
+ sw->tmu.mode == sw->tmu.mode_request;
}
-int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
-int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
+bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx);
+
+int tb_switch_clx_init(struct tb_switch *sw);
+bool tb_switch_clx_is_supported(const struct tb_switch *sw);
+int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx);
+int tb_switch_clx_disable(struct tb_switch *sw);
/**
- * tb_switch_is_clx_enabled() - Checks if the CLx is enabled
+ * tb_switch_clx_is_enabled() - Checks if the CLx is enabled
* @sw: Router to check for the CLx
- * @clx: The CLx state to check for
+ * @clx: The CLx states to check for
*
* Checks if the specified CLx is enabled on the router upstream link.
+ * Returns true if any of the given states is enabled.
+ *
* Not applicable for a host router.
*/
-static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
- enum tb_clx clx)
+static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw,
+ unsigned int clx)
{
- return sw->clx == clx;
+ return sw->clx & clx;
}
-/**
- * tb_switch_is_clx_supported() - Is CLx supported on this type of router
- * @sw: The router to check CLx support for
- */
-static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
-{
- if (sw->quirks & QUIRK_NO_CLX)
- return false;
-
- return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
-}
-
-int tb_switch_mask_clx_objections(struct tb_switch *sw);
-
int tb_switch_pcie_l1_enable(struct tb_switch *sw);
int tb_switch_xhci_connect(struct tb_switch *sw);
@@ -1073,14 +1059,12 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
-int tb_port_set_link_width(struct tb_port *port, unsigned int width);
-int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
+int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
-int tb_port_wait_for_link_width(struct tb_port *port, int width,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
-bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -1183,6 +1167,17 @@ static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
return tb_to_switch(xd->dev.parent);
}
+/**
+ * tb_xdomain_downstream_port() - Return downstream facing port of parent router
+ * @xd: Xdomain pointer
+ *
+ * Returns the downstream port the XDomain is connected to.
+ */
+static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd)
+{
+ return tb_port_at(xd->route, tb_xdomain_parent(xd));
+}
+
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size);
int tb_retimer_scan(struct tb_port *port, bool add);
@@ -1200,7 +1195,31 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
return NULL;
}
+/**
+ * usb4_switch_version() - Returns USB4 version of the router
+ * @sw: Router to check
+ *
+ * Returns major version of USB4 router (%1 for v1, %2 for v2 and so
+ * on). Can be called to pre-USB4 router too and in that case returns %0.
+ */
+static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
+{
+ return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version);
+}
+
+/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+ return usb4_switch_version(sw) > 0;
+}
+
int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
@@ -1273,19 +1292,22 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
-bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
-bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
-int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
+bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port);
+bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port);
+int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
+ bool supported);
int usb4_dp_port_group_id(struct tb_port *port);
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
int usb4_dp_port_granularity(struct tb_port *port);
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
-int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
-int usb4_dp_port_allocated_bw(struct tb_port *port);
-int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
-int usb4_dp_port_requested_bw(struct tb_port *port);
+int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw);
+int usb4_dp_port_allocated_bandwidth(struct tb_port *port);
+int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw);
+int usb4_dp_port_requested_bandwidth(struct tb_port *port);
+
+int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable);
static inline bool tb_is_usb4_port_device(const struct device *dev)
{
@@ -1303,6 +1325,11 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
+static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
+{
+ return usb4->offline;
+}
+
void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 3234bff07899..cd750e4b3440 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -30,6 +30,13 @@ enum tb_cfg_error {
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
TB_CFG_ERROR_DP_BW = 32,
+ TB_CFG_ERROR_ROP_CMPLT = 33,
+ TB_CFG_ERROR_POP_CMPLT = 34,
+ TB_CFG_ERROR_PCIE_WAKE = 35,
+ TB_CFG_ERROR_DP_CON_CHANGE = 36,
+ TB_CFG_ERROR_DPTX_DISCOVERY = 37,
+ TB_CFG_ERROR_LINK_RECOVERY = 38,
+ TB_CFG_ERROR_ASYM_LINK = 39,
};
/* common header */
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 2636423748cd..cf9f2370878a 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -190,11 +190,14 @@ struct tb_regs_switch_header {
u32 thunderbolt_version:8;
} __packed;
-/* USB4 version 1.0 */
-#define USB4_VERSION_1_0 0x20
+/* Used with the router thunderbolt_version */
+#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
#define ROUTER_CS_4 0x04
+/* Used with the router cmuv field */
+#define ROUTER_CS_4_CMUV_V1 0x10
+#define ROUTER_CS_4_CMUV_V2 0x20
#define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
@@ -249,11 +252,13 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
-#define TMU_RTR_CS_15 0xf
+#define TMU_RTR_CS_15 0x0f
#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0)
#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6)
#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12)
#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18)
+#define TMU_RTR_CS_18 0x12
+#define TMU_RTR_CS_18_DELTA_AVG_CONST_MASK GENMASK(23, 16)
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
#define TMU_RTR_CS_25 0x19
@@ -319,6 +324,14 @@ struct tb_regs_port_header {
#define TMU_ADP_CS_3_UDM BIT(29)
#define TMU_ADP_CS_6 0x06
#define TMU_ADP_CS_6_DTS BIT(1)
+#define TMU_ADP_CS_8 0x08
+#define TMU_ADP_CS_8_REPL_TIMEOUT_MASK GENMASK(14, 0)
+#define TMU_ADP_CS_8_EUDM BIT(15)
+#define TMU_ADP_CS_8_REPL_THRESHOLD_MASK GENMASK(25, 16)
+#define TMU_ADP_CS_9 0x09
+#define TMU_ADP_CS_9_REPL_N_MASK GENMASK(7, 0)
+#define TMU_ADP_CS_9_DIRSWITCH_N_MASK GENMASK(15, 8)
+#define TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK GENMASK(31, 16)
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
@@ -346,6 +359,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN4 0x2
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
#define LANE_ADP_CS_1_PMS BIT(30)
@@ -436,6 +450,9 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_1_LANE 0x0
#define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_UHBR10 BIT(17)
+#define DP_COMMON_CAP_UHBR20 BIT(18)
+#define DP_COMMON_CAP_UHBR13_5 BIT(19)
#define DP_COMMON_CAP_LTTPR_NS BIT(27)
#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31)
@@ -447,6 +464,8 @@ struct tb_regs_port_header {
/* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
+#define ADP_PCIE_CS_1 0x01
+#define ADP_PCIE_CS_1_EE BIT(0)
/* USB adapter registers */
#define ADP_USB3_CS_0 0x00
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 24c06e7354cd..9475c6698c7d 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -170,6 +170,23 @@ static struct tb_switch *alloc_host_usb4(struct kunit *test)
return sw;
}
+static struct tb_switch *alloc_host_br(struct kunit *test)
+{
+ struct tb_switch *sw;
+
+ sw = alloc_host_usb4(test);
+ if (!sw)
+ return NULL;
+
+ sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
+ sw->ports[10].config.max_in_hop_id = 9;
+ sw->ports[10].config.max_out_hop_id = 9;
+ sw->ports[10].cap_adap = -1;
+ sw->ports[10].disabled = false;
+
+ return sw;
+}
+
static struct tb_switch *alloc_dev_default(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
@@ -1583,6 +1600,71 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
tb_tunnel_free(tunnel);
}
+static void tb_test_tunnel_3dp(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
+ struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
+ struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
+
+ /*
+ * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
+ *
+ * [Host]
+ * 3 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #4]
+ * | 1
+ * [Device #3]
+ * | 5
+ * | 1
+ * [Device #5]
+ */
+ host = alloc_host_br(test);
+ dev1 = alloc_dev_default(test, host, 0x3, true);
+ dev2 = alloc_dev_default(test, dev1, 0x303, true);
+ dev3 = alloc_dev_default(test, dev1, 0x503, true);
+ dev4 = alloc_dev_default(test, dev1, 0x703, true);
+ dev5 = alloc_dev_default(test, dev3, 0x50503, true);
+
+ in1 = &host->ports[5];
+ in2 = &host->ports[6];
+ in3 = &host->ports[10];
+
+ out1 = &dev2->ports[13];
+ out2 = &dev5->ports[13];
+ out3 = &dev4->ports[14];
+
+ tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
+ KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
+
+ tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
+ KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
+
+ tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
+ KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
+ KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
+
+ tb_tunnel_free(tunnel2);
+ tb_tunnel_free(tunnel1);
+}
+
static void tb_test_tunnel_usb3(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
@@ -2790,6 +2872,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_tunnel_dp_chain),
KUNIT_CASE(tb_test_tunnel_dp_tree),
KUNIT_CASE(tb_test_tunnel_dp_max_length),
+ KUNIT_CASE(tb_test_tunnel_3dp),
KUNIT_CASE(tb_test_tunnel_port_on_path),
KUNIT_CASE(tb_test_tunnel_usb3),
KUNIT_CASE(tb_test_tunnel_dma),
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 626aca3124b1..1269f417515b 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -11,23 +11,63 @@
#include "tb.h"
+static const unsigned int tmu_rates[] = {
+ [TB_SWITCH_TMU_MODE_OFF] = 0,
+ [TB_SWITCH_TMU_MODE_LOWRES] = 1000,
+ [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
+ [TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
+ [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
+};
+
+const struct {
+ unsigned int freq_meas_window;
+ unsigned int avg_const;
+ unsigned int delta_avg_const;
+ unsigned int repl_timeout;
+ unsigned int repl_threshold;
+ unsigned int repl_n;
+ unsigned int dirswitch_n;
+} tmu_params[] = {
+ [TB_SWITCH_TMU_MODE_OFF] = { },
+ [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
+ [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
+ [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
+ [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
+ 800, 4, 0, 3125, 25, 128, 255,
+ },
+};
+
+static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
+{
+ switch (mode) {
+ case TB_SWITCH_TMU_MODE_OFF:
+ return "off";
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ return "uni-directional, LowRes";
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ return "uni-directional, HiFi";
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ return "bi-directional, HiFi";
+ case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
+ return "enhanced uni-directional, MedRes";
+ default:
+ return "unknown";
+ }
+}
+
+static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
+{
+ return usb4_switch_version(sw) > 1;
+}
+
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
- enum tb_switch_tmu_rate rate)
+ enum tb_switch_tmu_mode mode)
{
- u32 freq_meas_wind[2] = { 30, 800 };
- u32 avg_const[2] = { 4, 8 };
u32 freq, avg, val;
int ret;
- if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
- freq = freq_meas_wind[0];
- avg = avg_const[0];
- } else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
- freq = freq_meas_wind[1];
- avg = avg_const[1];
- } else {
- return 0;
- }
+ freq = tmu_params[mode].freq_meas_window;
+ avg = tmu_params[mode].avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
@@ -56,37 +96,30 @@ static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
- return tb_sw_write(sw, &val, TB_CFG_SWITCH,
- sw->tmu.cap + TMU_RTR_CS_15, 1);
-}
-
-static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
-{
- bool root_switch = !tb_route(sw);
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_15, 1);
+ if (ret)
+ return ret;
- switch (sw->tmu.rate) {
- case TB_SWITCH_TMU_RATE_OFF:
- return "off";
+ if (tb_switch_tmu_enhanced_is_supported(sw)) {
+ u32 delta_avg = tmu_params[mode].delta_avg_const;
- case TB_SWITCH_TMU_RATE_HIFI:
- /* Root switch does not have upstream directionality */
- if (root_switch)
- return "HiFi";
- if (sw->tmu.unidirectional)
- return "uni-directional, HiFi";
- return "bi-directional, HiFi";
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_18, 1);
+ if (ret)
+ return ret;
- case TB_SWITCH_TMU_RATE_NORMAL:
- if (root_switch)
- return "normal";
- return "uni-directional, normal";
+ val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
+ val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
- default:
- return "unknown";
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_18, 1);
}
+
+ return ret;
}
-static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
{
int ret;
u32 val;
@@ -182,6 +215,103 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
return val & TMU_ADP_CS_3_UDM;
}
+static bool tb_port_tmu_is_enhanced(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_8, 1);
+ if (ret)
+ return false;
+
+ return val & TMU_ADP_CS_8_EUDM;
+}
+
+/* Can be called to non-v2 lane adapters too */
+static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
+{
+ int ret;
+ u32 val;
+
+ if (!tb_switch_tmu_enhanced_is_supported(port->sw))
+ return 0;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ val |= TMU_ADP_CS_8_EUDM;
+ else
+ val &= ~TMU_ADP_CS_8_EUDM;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_8, 1);
+}
+
+static int tb_port_set_tmu_mode_params(struct tb_port *port,
+ enum tb_switch_tmu_mode mode)
+{
+ u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
+ int ret;
+
+ repl_timeout = tmu_params[mode].repl_timeout;
+ repl_threshold = tmu_params[mode].repl_threshold;
+ repl_n = tmu_params[mode].repl_n;
+ dirswitch_n = tmu_params[mode].dirswitch_n;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
+ val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
+ val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
+ val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_9, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_ADP_CS_9_REPL_N_MASK;
+ val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
+ val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
+ val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_9, 1);
+}
+
+/* Can be called to non-v2 lane adapters too */
+static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
+{
+ int ret;
+ u32 val;
+
+ if (!tb_switch_tmu_enhanced_is_supported(port->sw))
+ return 0;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_9, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
+ val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_9, 1);
+}
+
static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
{
u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
@@ -224,6 +354,50 @@ static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
}
+static int tmu_mode_init(struct tb_switch *sw)
+{
+ bool enhanced, ucap;
+ int ret, rate;
+
+ ucap = tb_switch_tmu_ucap_is_supported(sw);
+ if (ucap)
+ tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+ enhanced = tb_switch_tmu_enhanced_is_supported(sw);
+ if (enhanced)
+ tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
+
+ ret = tb_switch_tmu_rate_read(sw);
+ if (ret < 0)
+ return ret;
+ rate = ret;
+
+ /* Off by default */
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
+
+ if (tb_route(sw)) {
+ struct tb_port *up = tb_upstream_port(sw);
+
+ if (enhanced && tb_port_tmu_is_enhanced(up)) {
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
+ } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
+ if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
+ else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
+ } else if (rate) {
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
+ }
+ } else if (rate) {
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
+ }
+
+ /* Update the initial request to match the current mode */
+ sw->tmu.mode_request = sw->tmu.mode;
+ sw->tmu.has_ucap = ucap;
+
+ return 0;
+}
+
/**
* tb_switch_tmu_init() - Initialize switch TMU structures
* @sw: Switch to initialized
@@ -252,27 +426,11 @@ int tb_switch_tmu_init(struct tb_switch *sw)
port->cap_tmu = cap;
}
- ret = tb_switch_tmu_rate_read(sw);
- if (ret < 0)
+ ret = tmu_mode_init(sw);
+ if (ret)
return ret;
- sw->tmu.rate = ret;
-
- sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
- if (sw->tmu.has_ucap) {
- tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
-
- if (tb_route(sw)) {
- struct tb_port *up = tb_upstream_port(sw);
-
- sw->tmu.unidirectional =
- tb_port_tmu_is_unidirectional(up);
- }
- } else {
- sw->tmu.unidirectional = false;
- }
-
- tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+ tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
return 0;
}
@@ -308,7 +466,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
return ret;
for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
- tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+ tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
gm_local_time[i]);
/* Convert to nanoseconds (drop fractional part) */
@@ -375,6 +533,23 @@ out:
return ret;
}
+static int disable_enhanced(struct tb_port *up, struct tb_port *down)
+{
+ int ret;
+
+ /*
+ * Router may already been disconnected so ignore errors on the
+ * upstream port.
+ */
+ tb_port_tmu_rate_write(up, 0);
+ tb_port_tmu_enhanced_enable(up, false);
+
+ ret = tb_port_tmu_rate_write(down, 0);
+ if (ret)
+ return ret;
+ return tb_port_tmu_enhanced_enable(down, false);
+}
+
/**
* tb_switch_tmu_disable() - Disable TMU of a switch
* @sw: Switch whose TMU to disable
@@ -383,26 +558,15 @@ out:
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
- /*
- * No need to disable TMU on devices that don't support CLx since
- * on these devices e.g. Alpine Ridge and earlier, the TMU mode
- * HiFi bi-directional is enabled by default and we don't change it.
- */
- if (!tb_switch_is_clx_supported(sw))
- return 0;
-
/* Already disabled? */
- if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+ if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
return 0;
-
if (tb_route(sw)) {
- bool unidirectional = sw->tmu.unidirectional;
- struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down, *up;
int ret;
- down = tb_port_at(tb_route(sw), parent);
+ down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of uni-directional time sync, TMU handshake is
@@ -415,37 +579,49 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);
if (ret)
return ret;
- if (unidirectional) {
+ switch (sw->tmu.mode) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
/* The switch may be unplugged so ignore any errors */
tb_port_tmu_unidirectional_disable(up);
ret = tb_port_tmu_unidirectional_disable(down);
if (ret)
return ret;
+ break;
+
+ case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
+ ret = disable_enhanced(up, down);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ break;
}
} else {
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
}
- sw->tmu.unidirectional = false;
- sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+ sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
tb_sw_dbg(sw, "TMU: disabled\n");
return 0;
}
-static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
+/* Called only when there is failure enabling requested mode */
+static void tb_switch_tmu_off(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_switch_parent(sw);
+ unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
struct tb_port *down, *up;
- down = tb_port_at(tb_route(sw), parent);
+ down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when setting
@@ -456,28 +632,38 @@ static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
*/
tb_port_tmu_time_sync_disable(down);
tb_port_tmu_time_sync_disable(up);
- if (unidirectional)
- tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
- else
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
- tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
+ switch (sw->tmu.mode_request) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
+ break;
+ case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
+ disable_enhanced(up, down);
+ break;
+ default:
+ break;
+ }
+
+ /* Always set the rate to 0 */
+ tb_switch_tmu_rate_write(sw, rate);
+
+ tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
tb_port_tmu_unidirectional_disable(down);
tb_port_tmu_unidirectional_disable(up);
}
/*
* This function is called when the previous TMU mode was
- * TB_SWITCH_TMU_RATE_OFF.
+ * TB_SWITCH_TMU_MODE_OFF.
*/
-static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
+static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
+ down = tb_switch_downstream_port(sw);
ret = tb_port_tmu_unidirectional_disable(up);
if (ret)
@@ -487,7 +673,7 @@ static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
if (ret)
goto out;
- ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
if (ret)
goto out;
@@ -502,12 +688,14 @@ static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
return 0;
out:
- __tb_switch_tmu_off(sw, false);
+ tb_switch_tmu_off(sw);
return ret;
}
-static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
+/* Only needed for Titan Ridge */
+static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
{
+ struct tb_port *up = tb_upstream_port(sw);
u32 val;
int ret;
@@ -518,36 +706,34 @@ static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
- return tb_sw_write(sw, &val, TB_CFG_SWITCH,
- sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
-}
-
-static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
-{
- struct tb_port *up = tb_upstream_port(sw);
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
+ if (ret)
+ return ret;
return tb_port_tmu_write(up, TMU_ADP_CS_6,
TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
- TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
}
/*
* This function is called when the previous TMU mode was
- * TB_SWITCH_TMU_RATE_OFF.
+ * TB_SWITCH_TMU_MODE_OFF.
*/
-static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
+static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
- ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
+ down = tb_switch_downstream_port(sw);
+ ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
+ tmu_rates[sw->tmu.mode_request]);
if (ret)
return ret;
- ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
+ ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
@@ -570,16 +756,65 @@ static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
return 0;
out:
- __tb_switch_tmu_off(sw, true);
+ tb_switch_tmu_off(sw);
+ return ret;
+}
+
+/*
+ * This function is called when the previous TMU mode was
+ * TB_SWITCH_TMU_RATE_OFF.
+ */
+static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
+{
+ unsigned int rate = tmu_rates[sw->tmu.mode_request];
+ struct tb_port *up, *down;
+ int ret;
+
+ /* Router specific parameters first */
+ ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
+ if (ret)
+ return ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_rate_write(up, rate);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_enhanced_enable(up, true);
+ if (ret)
+ goto out;
+
+ ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_rate_write(down, rate);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_enhanced_enable(down, true);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ tb_switch_tmu_off(sw);
return ret;
}
-static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
+static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_switch_parent(sw);
+ unsigned int rate = tmu_rates[sw->tmu.mode];
struct tb_port *down, *up;
- down = tb_port_at(tb_route(sw), parent);
+ down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when change mode,
@@ -587,42 +822,97 @@ static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
* In case of additional failures in the functions below,
* ignore them since the caller shall already report a failure.
*/
- tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
- if (sw->tmu.unidirectional_request)
- tb_switch_tmu_rate_write(parent, sw->tmu.rate);
- else
- tb_switch_tmu_rate_write(sw, sw->tmu.rate);
+ switch (sw->tmu.mode) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ tb_port_tmu_set_unidirectional(down, true);
+ tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
+ break;
+
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ tb_port_tmu_set_unidirectional(down, false);
+ tb_switch_tmu_rate_write(sw, rate);
+ break;
+
+ default:
+ break;
+ }
+
+ tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
+
+ switch (sw->tmu.mode) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ tb_port_tmu_set_unidirectional(up, true);
+ break;
+
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ tb_port_tmu_set_unidirectional(up, false);
+ break;
- tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
- tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
+ default:
+ break;
+ }
}
-static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
+static int tb_switch_tmu_change_mode(struct tb_switch *sw)
{
- struct tb_switch *parent = tb_switch_parent(sw);
+ unsigned int rate = tmu_rates[sw->tmu.mode_request];
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
- ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
- if (ret)
- goto out;
+ down = tb_switch_downstream_port(sw);
- if (sw->tmu.unidirectional_request)
- ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
- else
- ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
- if (ret)
- return ret;
+ /* Program the upstream router downstream facing lane adapter */
+ switch (sw->tmu.mode_request) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ ret = tb_port_tmu_set_unidirectional(down, true);
+ if (ret)
+ goto out;
+ ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
+ if (ret)
+ goto out;
+ break;
- ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ ret = tb_port_tmu_set_unidirectional(down, false);
+ if (ret)
+ goto out;
+ ret = tb_switch_tmu_rate_write(sw, rate);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ /* Not allowed to change modes from other than above */
+ return -EINVAL;
+ }
+
+ ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
- ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
- if (ret)
- goto out;
+ /* Program the new mode and the downstream router lane adapter */
+ switch (sw->tmu.mode_request) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ ret = tb_port_tmu_set_unidirectional(up, true);
+ if (ret)
+ goto out;
+ break;
+
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ ret = tb_port_tmu_set_unidirectional(up, false);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ /* Not allowed to change modes from other than above */
+ return -EINVAL;
+ }
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
@@ -635,7 +925,7 @@ static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
return 0;
out:
- __tb_switch_tmu_change_mode_prev(sw);
+ tb_switch_tmu_change_mode_prev(sw);
return ret;
}
@@ -643,45 +933,21 @@ out:
* tb_switch_tmu_enable() - Enable TMU on a router
* @sw: Router whose TMU to enable
*
- * Enables TMU of a router to be in uni-directional Normal/HiFi
- * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
- * before calling this function, to select the mode Normal/HiFi and
- * directionality (uni-directional/bi-directional).
- * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
- * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
+ * Enables TMU of a router to be in uni-directional Normal/HiFi or
+ * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
+ * required before calling this function.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
- bool unidirectional = sw->tmu.unidirectional_request;
int ret;
- if (unidirectional && !sw->tmu.has_ucap)
- return -EOPNOTSUPP;
-
- /*
- * No need to enable TMU on devices that don't support CLx since on
- * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
- * bi-directional is enabled by default.
- */
- if (!tb_switch_is_clx_supported(sw))
- return 0;
-
- if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
+ if (tb_switch_tmu_is_enabled(sw))
return 0;
- if (tb_switch_is_titan_ridge(sw) && unidirectional) {
- /*
- * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
- * enabled and supported together.
- */
- if (!tb_switch_is_clx_enabled(sw, TB_CL1))
- return -EOPNOTSUPP;
-
- ret = tb_switch_tmu_objection_mask(sw);
- if (ret)
- return ret;
-
- ret = tb_switch_tmu_unidirectional_enable(sw);
+ if (tb_switch_is_titan_ridge(sw) &&
+ (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
+ sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
+ ret = tb_switch_tmu_disable_objections(sw);
if (ret)
return ret;
}
@@ -696,19 +962,30 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
* HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
* HiFi-Uni.
*/
- if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
- if (unidirectional)
- ret = __tb_switch_tmu_enable_unidirectional(sw);
- else
- ret = __tb_switch_tmu_enable_bidirectional(sw);
- if (ret)
- return ret;
- } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
- ret = __tb_switch_tmu_change_mode(sw);
- if (ret)
- return ret;
+ if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
+ switch (sw->tmu.mode_request) {
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ ret = tb_switch_tmu_enable_unidirectional(sw);
+ break;
+
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ ret = tb_switch_tmu_enable_bidirectional(sw);
+ break;
+ case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
+ ret = tb_switch_tmu_enable_enhanced(sw);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
+ sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
+ sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
+ ret = tb_switch_tmu_change_mode(sw);
+ } else {
+ ret = -EINVAL;
}
- sw->tmu.unidirectional = unidirectional;
} else {
/*
* Host router port configurations are written as
@@ -716,58 +993,68 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
* of the child node - see above.
* Here only the host router' rate configuration is written.
*/
- ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
- if (ret)
- return ret;
+ ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
}
- sw->tmu.rate = sw->tmu.rate_request;
+ if (ret) {
+ tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
+ tmu_mode_name(sw->tmu.mode_request), ret);
+ } else {
+ sw->tmu.mode = sw->tmu.mode_request;
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
+ }
- tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
/**
- * tb_switch_tmu_configure() - Configure the TMU rate and directionality
+ * tb_switch_tmu_configure() - Configure the TMU mode
* @sw: Router whose mode to change
- * @rate: Rate to configure Off/Normal/HiFi
- * @unidirectional: If uni-directional (bi-directional otherwise)
+ * @mode: Mode to configure
*
- * Selects the rate of the TMU and directionality (uni-directional or
- * bi-directional). Must be called before tb_switch_tmu_enable().
+ * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
+ * next called.
+ *
+ * Returns %0 in success and negative errno otherwise. Specifically
+ * returns %-EOPNOTSUPP if the requested mode is not possible (not
+ * supported by the router and/or topology).
*/
-void tb_switch_tmu_configure(struct tb_switch *sw,
- enum tb_switch_tmu_rate rate, bool unidirectional)
+int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
{
- sw->tmu.unidirectional_request = unidirectional;
- sw->tmu.rate_request = rate;
-}
+ switch (mode) {
+ case TB_SWITCH_TMU_MODE_OFF:
+ break;
-static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
-{
- if (tb_is_switch(dev)) {
- struct tb_switch *sw = tb_to_switch(dev);
+ case TB_SWITCH_TMU_MODE_LOWRES:
+ case TB_SWITCH_TMU_MODE_HIFI_UNI:
+ if (!sw->tmu.has_ucap)
+ return -EOPNOTSUPP;
+ break;
- tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
- tb_switch_is_clx_enabled(sw, TB_CL1));
- if (tb_switch_tmu_enable(sw))
- tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
+ case TB_SWITCH_TMU_MODE_HIFI_BI:
+ break;
+
+ case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
+ const struct tb_switch *parent_sw = tb_switch_parent(sw);
+
+ if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
+ return -EOPNOTSUPP;
+ if (!tb_switch_tmu_enhanced_is_supported(sw))
+ return -EOPNOTSUPP;
+
+ break;
}
- return 0;
-}
+ default:
+ tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
+ return -EINVAL;
+ }
-/**
- * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
- * @sw: The router to configure and enable it's children TMU
- * @rate: Rate of the TMU to configure the router's chidren to
- *
- * Configures and enables the TMU mode of 1st depth children of the specified
- * router to the specified rate.
- */
-void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
- enum tb_switch_tmu_rate rate)
-{
- device_for_each_child(&sw->dev, &rate,
- tb_switch_tmu_config_enable);
+ if (sw->tmu.mode_request != mode) {
+ tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
+ tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
+ sw->tmu.mode_request = mode;
+ }
+
+ return 0;
}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 4f222673d651..a6810fb36860 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/ktime.h>
+#include <linux/string_helpers.h>
#include "tunnel.h"
#include "tb.h"
@@ -41,9 +42,14 @@
* Number of credits we try to allocate for each DMA path if not limited
* by the host router baMaxHI.
*/
-#define TB_DMA_CREDITS 14U
+#define TB_DMA_CREDITS 14
/* Minimum number of credits for DMA path */
-#define TB_MIN_DMA_CREDITS 1U
+#define TB_MIN_DMA_CREDITS 1
+
+static unsigned int dma_credits = TB_DMA_CREDITS;
+module_param(dma_credits, uint, 0444);
+MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
+ __MODULE_STRING(TB_DMA_CREDITS) ")");
static bool bw_alloc_mode = true;
module_param(bw_alloc_mode, bool, 0444);
@@ -95,7 +101,7 @@ static unsigned int tb_available_credits(const struct tb_port *port,
pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
if (tb_acpi_is_xdomain_allowed()) {
- spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
+ spare = min_not_zero(sw->max_dma_credits, dma_credits);
/* Add some credits for potential second DMA tunnel */
spare += TB_MIN_DMA_CREDITS;
} else {
@@ -148,18 +154,49 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
return tunnel;
}
+static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
+{
+ int ret;
+
+ /* Only supported of both routers are at least USB4 v2 */
+ if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
+ usb4_switch_version(tunnel->dst_port->sw) < 2)
+ return 0;
+
+ ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
+ if (ret)
+ return ret;
+
+ ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
+ if (ret)
+ return ret;
+
+ tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
+ str_enabled_disabled(enable));
+ return 0;
+}
+
static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
{
int res;
+ if (activate) {
+ res = tb_pci_set_ext_encapsulation(tunnel, activate);
+ if (res)
+ return res;
+ }
+
res = tb_pci_port_enable(tunnel->src_port, activate);
if (res)
return res;
- if (tb_port_is_pcie_up(tunnel->dst_port))
- return tb_pci_port_enable(tunnel->dst_port, activate);
+ if (tb_port_is_pcie_up(tunnel->dst_port)) {
+ res = tb_pci_port_enable(tunnel->dst_port, activate);
+ if (res)
+ return res;
+ }
- return 0;
+ return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
}
static int tb_pci_init_credits(struct tb_path_hop *hop)
@@ -381,6 +418,10 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
return -ETIMEDOUT;
}
+/*
+ * Returns maximum possible rate from capability supporting only DP 2.0
+ * and below. Used when DP BW allocation mode is not enabled.
+ */
static inline u32 tb_dp_cap_get_rate(u32 val)
{
u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
@@ -399,6 +440,28 @@ static inline u32 tb_dp_cap_get_rate(u32 val)
}
}
+/*
+ * Returns maximum possible rate from capability supporting DP 2.1
+ * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
+ * mode is enabled.
+ */
+static inline u32 tb_dp_cap_get_rate_ext(u32 val)
+{
+ if (val & DP_COMMON_CAP_UHBR20)
+ return 20000;
+ else if (val & DP_COMMON_CAP_UHBR13_5)
+ return 13500;
+ else if (val & DP_COMMON_CAP_UHBR10)
+ return 10000;
+
+ return tb_dp_cap_get_rate(val);
+}
+
+static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
+{
+ return rate >= 10000;
+}
+
static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
{
val &= ~DP_COMMON_CAP_RATE_MASK;
@@ -461,7 +524,9 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
{
- /* Tunneling removes the DP 8b/10b encoding */
+ /* Tunneling removes the DP 8b/10b 128/132b encoding */
+ if (tb_dp_is_uhbr_rate(rate))
+ return rate * lanes * 128 / 132;
return rate * lanes * 8 / 10;
}
@@ -604,7 +669,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in->cap_adap + DP_REMOTE_CAP, 1);
}
-static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
+static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
{
int ret, estimated_bw, granularity, tmp;
struct tb_port *out = tunnel->dst_port;
@@ -616,7 +681,7 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
if (!bw_alloc_mode)
return 0;
- ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
+ ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
if (ret)
return ret;
@@ -654,6 +719,19 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
if (ret)
return ret;
+ /*
+ * Pick up granularity that supports maximum possible bandwidth.
+ * For that we use the UHBR rates too.
+ */
+ in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
+ out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
+ rate = min(in_rate, out_rate);
+ tmp = tb_dp_bandwidth(rate, lanes);
+
+ tb_port_dbg(in,
+ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
+
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
@@ -680,12 +758,12 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
- ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
+ ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
return ret;
/* Initial allocation should be 0 according the spec */
- ret = usb4_dp_port_allocate_bw(in, 0);
+ ret = usb4_dp_port_allocate_bandwidth(in, 0);
if (ret)
return ret;
@@ -707,7 +785,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (!tb_switch_is_usb4(sw))
return 0;
- if (!usb4_dp_port_bw_mode_supported(in))
+ if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
tb_port_dbg(in, "bandwidth allocation mode supported\n");
@@ -716,17 +794,17 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (ret)
return ret;
- return tb_dp_bw_alloc_mode_enable(tunnel);
+ return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
static void tb_dp_deinit(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
- if (!usb4_dp_port_bw_mode_supported(in))
+ if (!usb4_dp_port_bandwidth_mode_supported(in))
return;
- if (usb4_dp_port_bw_mode_enabled(in)) {
- usb4_dp_port_set_cm_bw_mode_supported(in, false);
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
tb_port_dbg(in, "bandwidth allocation mode disabled\n");
}
}
@@ -769,15 +847,42 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
}
/* max_bw is rounded up to next granularity */
-static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
+static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
+ int *max_bw)
{
struct tb_port *in = tunnel->src_port;
int ret, rate, lanes, nrd_bw;
+ u32 cap;
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
+ /*
+ * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
+ * read parameter values so this so we can use this to determine
+ * the maximum possible bandwidth over this link.
+ *
+ * See USB4 v2 spec 1.0 10.4.4.5.
+ */
+ ret = tb_port_read(in, &cap, TB_CFG_PORT,
+ in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
+ rate = tb_dp_cap_get_rate_ext(cap);
+ if (tb_dp_is_uhbr_rate(rate)) {
+ /*
+ * When UHBR is used there is no reduction in lanes so
+ * we can use this directly.
+ */
+ lanes = tb_dp_cap_get_lanes(cap);
+ } else {
+ /*
+ * If there is no UHBR supported then check the
+ * non-reduced rate and lanes.
+ */
+ ret = usb4_dp_port_nrd(in, &rate, &lanes);
+ if (ret)
+ return ret;
+ }
+
nrd_bw = tb_dp_bandwidth(rate, lanes);
if (max_bw) {
@@ -790,26 +895,27 @@ static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
return nrd_bw;
}
-static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
- int *consumed_up, int *consumed_down)
+static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
+ int *consumed_up,
+ int *consumed_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int ret, allocated_bw, max_bw;
- if (!usb4_dp_port_bw_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
if (!tunnel->bw_mode)
return -EOPNOTSUPP;
/* Read what was allocated previously if any */
- ret = usb4_dp_port_allocated_bw(in);
+ ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
@@ -839,15 +945,15 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* If we have already set the allocated bandwidth then use that.
* Otherwise we read it from the DPRX.
*/
- if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
+ if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
int ret, allocated_bw, max_bw;
- ret = usb4_dp_port_allocated_bw(in);
+ ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
@@ -874,23 +980,23 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
struct tb_port *in = tunnel->src_port;
int max_bw, ret, tmp;
- if (!usb4_dp_port_bw_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (in->sw->config.depth < out->sw->config.depth) {
tmp = min(*alloc_down, max_bw);
- ret = usb4_dp_port_allocate_bw(in, tmp);
+ ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
tmp = min(*alloc_up, max_bw);
- ret = usb4_dp_port_allocate_bw(in, tmp);
+ ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = 0;
@@ -900,6 +1006,9 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
+
+ tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
+ tmp);
return 0;
}
@@ -974,23 +1083,20 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
struct tb_port *in = tunnel->src_port;
- u32 rate, lanes;
int ret;
- /*
- * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
- * parameter values so this so we can use this to determine the
- * maximum possible bandwidth over this link.
- */
- ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
- if (ret)
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ return -EOPNOTSUPP;
+
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
+ if (ret < 0)
return ret;
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
*max_up = 0;
- *max_down = tb_dp_bandwidth(rate, lanes);
+ *max_down = ret;
} else {
- *max_up = tb_dp_bandwidth(rate, lanes);
+ *max_up = ret;
*max_down = 0;
}
@@ -1011,8 +1117,8 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
* mode is enabled first and then read the bandwidth
* through those registers.
*/
- ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
return ret;
@@ -1132,6 +1238,47 @@ static int tb_dp_init_video_path(struct tb_path *path)
return 0;
}
+static void tb_dp_dump(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in, *out;
+ u32 dp_cap, rate, lanes;
+
+ in = tunnel->src_port;
+ out = tunnel->dst_port;
+
+ if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_LOCAL_CAP, 1))
+ return;
+
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
+
+ out = tunnel->dst_port;
+
+ if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
+ out->cap_adap + DP_LOCAL_CAP, 1))
+ return;
+
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
+
+ if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1))
+ return;
+
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+ tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
+}
+
/**
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure
@@ -1209,6 +1356,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
goto err_deactivate;
}
+ tb_dp_dump(tunnel);
+
tb_tunnel_dbg(tunnel, "discovered\n");
return tunnel;
@@ -1452,6 +1601,10 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_path *path;
int credits;
+ /* Ring 0 is reserved for control channel */
+ if (WARN_ON(!receive_ring || !transmit_ring))
+ return NULL;
+
if (receive_ring > 0)
npaths++;
if (transmit_ring > 0)
@@ -1468,7 +1621,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->dst_port = dst;
tunnel->deinit = tb_dma_deinit;
- credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
+ credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 485b6e430686..05ddb224c464 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -15,6 +15,7 @@
#include "tb.h"
#define USB4_DATA_RETRIES 3
+#define USB4_DATA_DWORDS 16
enum usb4_sb_target {
USB4_SB_TARGET_ROUTER,
@@ -112,7 +113,7 @@ static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
- if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
+ if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
return -EINVAL;
/*
@@ -231,11 +232,14 @@ static bool link_is_usb4(struct tb_port *port)
* is not available for some reason (like that there is Thunderbolt 3
* switch upstream) then the internal xHCI controller is enabled
* instead.
+ *
+ * This does not set the configuration valid bit of the router. To do
+ * that call usb4_switch_configuration_valid().
*/
int usb4_switch_setup(struct tb_switch *sw)
{
- struct tb_port *downstream_port;
- struct tb_switch *parent;
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *down;
bool tbt3, xhci;
u32 val = 0;
int ret;
@@ -249,9 +253,8 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
- parent = tb_switch_parent(sw);
- downstream_port = tb_port_at(tb_route(sw), parent);
- sw->link_usb4 = link_is_usb4(downstream_port);
+ down = tb_switch_downstream_port(sw);
+ sw->link_usb4 = link_is_usb4(down);
tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
xhci = val & ROUTER_CS_6_HCI;
@@ -288,7 +291,33 @@ int usb4_switch_setup(struct tb_switch *sw)
/* TBT3 supported by the CM */
val |= ROUTER_CS_5_C3S;
- /* Tunneling configuration is ready now */
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+}
+
+/**
+ * usb4_switch_configuration_valid() - Set tunneling configuration to be valid
+ * @sw: USB4 router
+ *
+ * Sets configuration valid bit for the router. Must be called before
+ * any tunnels can be set through the router and after
+ * usb4_switch_setup() has been called. Can be called to host and device
+ * routers (does nothing for the latter).
+ *
+ * Returns %0 in success and negative errno otherwise.
+ */
+int usb4_switch_configuration_valid(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
val |= ROUTER_CS_5_CV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
@@ -703,7 +732,7 @@ int usb4_switch_credits_init(struct tb_switch *sw)
int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
int ret, length, i, nports;
const struct tb_port *port;
- u32 data[NVM_DATA_DWORDS];
+ u32 data[USB4_DATA_DWORDS];
u32 metadata = 0;
u8 status = 0;
@@ -1199,7 +1228,7 @@ static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
{
- if (dwords > NVM_DATA_DWORDS)
+ if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@@ -1209,7 +1238,7 @@ static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
static int usb4_port_write_data(struct tb_port *port, const void *data,
size_t dwords)
{
- if (dwords > NVM_DATA_DWORDS)
+ if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@@ -1845,7 +1874,7 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
int ret;
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
- if (dwords < NVM_DATA_DWORDS)
+ if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
@@ -2265,13 +2294,14 @@ int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
}
/**
- * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
+ * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
+ * supported
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the adapter
* supports USB4 bandwidth allocation mode, false otherwise.
*/
-bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
+bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
{
int ret;
u32 val;
@@ -2288,13 +2318,14 @@ bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
}
/**
- * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
+ * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
+ * enabled
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the bandwidth
* allocation mode has been enabled, false otherwise.
*/
-bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
+bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
{
int ret;
u32 val;
@@ -2311,7 +2342,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
}
/**
- * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
+ * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
+ * bandwidth allocation mode
* @port: DP IN adapter
* @supported: Does the CM support bandwidth allocation mode
*
@@ -2320,7 +2352,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
* does not support this.
*/
-int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
+int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
+ bool supported)
{
u32 val;
int ret;
@@ -2594,7 +2627,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
}
/**
- * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
+ * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
* @port: DP IN adapter
* @bw: Estimated bandwidth in Mb/s.
*
@@ -2604,7 +2637,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
* the adapter does not support this.
*/
-int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
+int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
@@ -2630,14 +2663,14 @@ int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
}
/**
- * usb4_dp_port_allocated_bw() - Return allocated bandwidth
+ * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
* @port: DP IN adapter
*
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
* account the programmed granularity). Returns negative errno in case
* of error.
*/
-int usb4_dp_port_allocated_bw(struct tb_port *port)
+int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
@@ -2723,7 +2756,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
}
/**
- * usb4_dp_port_allocate_bw() - Set allocated bandwidth
+ * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
* @port: DP IN adapter
* @bw: New allocated bandwidth in Mb/s
*
@@ -2731,7 +2764,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
* driver). Takes into account the programmed granularity. Returns %0 in
* success and negative errno in case of error.
*/
-int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
+int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
@@ -2765,7 +2798,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
}
/**
- * usb4_dp_port_requested_bw() - Read requested bandwidth
+ * usb4_dp_port_requested_bandwidth() - Read requested bandwidth
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
@@ -2774,7 +2807,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
* the adapter does not support bandwidth allocation mode, and %ENODATA
* if there is no active bandwidth request from the graphics driver.
*/
-int usb4_dp_port_requested_bw(struct tb_port *port)
+int usb4_dp_port_requested_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
@@ -2797,3 +2830,34 @@ int usb4_dp_port_requested_bw(struct tb_port *port)
return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
}
+
+/**
+ * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
+ * @port: PCIe adapter
+ * @enable: Enable/disable extended encapsulation
+ *
+ * Enables or disables extended encapsulation used in PCIe tunneling. Caller
+ * needs to make sure both adapters support this before enabling. Returns %0 on
+ * success and negative errno otherwise.
+ */
+int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
+{
+ u32 val;
+ int ret;
+
+ if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ val |= ADP_PCIE_CS_1_EE;
+ else
+ val &= ~ADP_PCIE_CS_1_EE;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_1, 1);
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index e2b54887d331..5b5566862318 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -537,9 +537,8 @@ static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
struct tb_xdomain *xd, u8 sequence)
{
- struct tb_switch *sw = tb_to_switch(xd->dev.parent);
struct tb_xdp_link_state_status_response res;
- struct tb_port *port = tb_port_at(xd->route, sw);
+ struct tb_port *port = tb_xdomain_downstream_port(xd);
u32 val[2];
int ret;
@@ -1137,7 +1136,7 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
struct tb_port *port;
int ret;
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ port = tb_xdomain_downstream_port(xd);
ret = tb_port_get_link_speed(port);
if (ret < 0)
@@ -1251,8 +1250,7 @@ static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
unsigned int width)
{
- struct tb_switch *sw = tb_to_switch(xd->dev.parent);
- struct tb_port *port = tb_port_at(xd->route, sw);
+ struct tb_port *port = tb_xdomain_downstream_port(xd);
struct tb *tb = xd->tb;
u8 tlw, tls;
u32 val;
@@ -1292,13 +1290,16 @@ static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
{
+ unsigned int width, width_mask;
struct tb_port *port;
- int ret, width;
+ int ret;
if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
- width = 1;
+ width = TB_LINK_WIDTH_SINGLE;
+ width_mask = width;
} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
- width = 2;
+ width = TB_LINK_WIDTH_DUAL;
+ width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
} else {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
@@ -1309,7 +1310,7 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return -ETIMEDOUT;
}
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ port = tb_xdomain_downstream_port(xd);
/*
* We can't use tb_xdomain_lane_bonding_enable() here because it
@@ -1330,15 +1331,16 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return ret;
}
- ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
+ ret = tb_port_wait_for_link_width(port, width_mask,
+ XDOMAIN_BONDING_TIMEOUT);
if (ret) {
dev_warn(&xd->dev, "error waiting for link width to become %d\n",
- width);
+ width_mask);
return ret;
}
- port->bonded = width == 2;
- port->dual_link_port->bonded = width == 2;
+ port->bonded = width > TB_LINK_WIDTH_SINGLE;
+ port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
@@ -1425,7 +1427,7 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->bonding_possible) {
struct tb_port *port;
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ port = tb_xdomain_downstream_port(xd);
if (!port->bonded)
tb_port_disable(port->dual_link_port);
}
@@ -1737,16 +1739,57 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
-static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ unsigned int width;
- return sysfs_emit(buf, "%u\n", xd->link_width);
+ switch (xd->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%u\n", width);
}
+static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
+
+static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ unsigned int width;
-static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
-static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+ switch (xd->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%u\n", width);
+}
+static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
@@ -1976,10 +2019,11 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
+ unsigned int width_mask;
struct tb_port *port;
int ret;
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ port = tb_xdomain_downstream_port(xd);
if (!port->dual_link_port)
return -ENODEV;
@@ -1999,7 +2043,12 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
- ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
+ /* Any of the widths are all bonded */
+ width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+ TB_LINK_WIDTH_ASYM_RX;
+
+ ret = tb_port_wait_for_link_width(port, width_mask,
+ XDOMAIN_BONDING_TIMEOUT);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
@@ -2024,10 +2073,13 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
struct tb_port *port;
- port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+ port = tb_xdomain_downstream_port(xd);
if (port->dual_link_port) {
+ int ret;
+
tb_port_lane_bonding_disable(port);
- if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
+ ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
+ if (ret == -ETIMEDOUT)
tb_port_warn(port, "timeout disabling lane bonding\n");
tb_port_disable(port->dual_link_port);
tb_port_update_credits(port);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 1c9e5d2ea7de..552e8a741562 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -203,8 +203,8 @@ static void n_tty_kick_worker(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
/* Did the input worker stop? Restart it */
- if (unlikely(ldata->no_room)) {
- ldata->no_room = 0;
+ if (unlikely(READ_ONCE(ldata->no_room))) {
+ WRITE_ONCE(ldata->no_room, 0);
WARN_RATELIMIT(tty->port->itty == NULL,
"scheduling with invalid itty\n");
@@ -1697,7 +1697,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
if (overflow && room < 0)
ldata->read_head--;
room = overflow;
- ldata->no_room = flow && !room;
+ WRITE_ONCE(ldata->no_room, flow && !room);
} else
overflow = 0;
@@ -1728,6 +1728,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
} else
n_tty_check_throttle(tty);
+ if (unlikely(ldata->no_room)) {
+ /*
+ * Barrier here is to ensure to read the latest read_tail in
+ * chars_in_buffer() and to make sure that read_tail is not loaded
+ * before ldata->no_room is set.
+ */
+ smp_mb();
+ if (!chars_in_buffer(tty))
+ n_tty_kick_worker(tty);
+ }
+
up_read(&tty->termios_rwsem);
return rcvd;
@@ -2281,8 +2292,14 @@ more_to_be_read:
if (time)
timeout = time;
}
- if (old_tail != ldata->read_tail)
+ if (old_tail != ldata->read_tail) {
+ /*
+ * Make sure no_room is not read in n_tty_kick_worker()
+ * before setting ldata->read_tail in copy_from_read_buf().
+ */
+ smp_mb();
n_tty_kick_worker(tty);
+ }
up_read(&tty->termios_rwsem);
remove_wait_queue(&tty->read_wait, &wait);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 1e8fe44a7099..1aa3e55c8b47 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -91,7 +91,6 @@ struct serial8250_config {
#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */
#define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */
#define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY BIT(4) /* UART mishandles parity if FIFO enabled */
#define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */
@@ -167,18 +166,21 @@ static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up,
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
-static inline int serial_dl_read(struct uart_8250_port *up)
+static inline u32 serial_dl_read(struct uart_8250_port *up)
{
return up->dl_read(up);
}
-static inline void serial_dl_write(struct uart_8250_port *up, int value)
+static inline void serial_dl_write(struct uart_8250_port *up, u32 value)
{
up->dl_write(up, value);
}
static inline bool serial8250_set_THRI(struct uart_8250_port *up)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
if (up->ier & UART_IER_THRI)
return false;
up->ier |= UART_IER_THRI;
@@ -188,6 +190,9 @@ static inline bool serial8250_set_THRI(struct uart_8250_port *up)
static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
if (!(up->ier & UART_IER_THRI))
return false;
up->ier &= ~UART_IER_THRI;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 9d2a7856784f..4a9e71b2dbbc 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -275,6 +275,9 @@ static void __aspeed_vuart_set_throttle(struct uart_8250_port *up,
{
unsigned char irqs = UART_IER_RLSI | UART_IER_RDI;
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
up->ier &= ~irqs;
if (!throttle)
up->ier |= irqs;
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index af0e1c070187..d4b05d7ad9e8 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -605,9 +605,13 @@ static int brcmuart_startup(struct uart_port *port)
/*
* Disable the Receive Data Interrupt because the DMA engine
* will handle this.
+ *
+ * Synchronize UART_IER access against the console.
*/
+ spin_lock_irq(&port->lock);
up->ier &= ~UART_IER_RDI;
serial_port_out(port, UART_IER, up->ier);
+ spin_unlock_irq(&port->lock);
priv->tx_running = false;
priv->dma.rx_dma = NULL;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 13bf535eedcd..914e0e6251bf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -488,6 +488,34 @@ static inline void serial8250_apply_quirks(struct uart_8250_port *up)
up->port.quirks |= skip_txen_test ? UPQ_NO_TXEN_TEST : 0;
}
+static struct uart_8250_port *serial8250_setup_port(int index)
+{
+ struct uart_8250_port *up;
+
+ if (index >= UART_NR)
+ return NULL;
+
+ up = &serial8250_ports[index];
+ up->port.line = index;
+
+ serial8250_init_port(up);
+ if (!base_ops)
+ base_ops = up->port.ops;
+ up->port.ops = &univ8250_port_ops;
+
+ timer_setup(&up->timer, serial8250_timeout, 0);
+
+ up->ops = &univ8250_driver_ops;
+
+ if (IS_ENABLED(CONFIG_ALPHA_JENSEN) ||
+ (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen()))
+ up->port.set_mctrl = alpha_jensen_set_mctrl;
+
+ serial8250_set_defaults(up);
+
+ return up;
+}
+
static void __init serial8250_isa_init_ports(void)
{
struct uart_8250_port *up;
@@ -501,26 +529,13 @@ static void __init serial8250_isa_init_ports(void)
if (nr_uarts > UART_NR)
nr_uarts = UART_NR;
- for (i = 0; i < nr_uarts; i++) {
- struct uart_8250_port *up = &serial8250_ports[i];
- struct uart_port *port = &up->port;
-
- port->line = i;
- serial8250_init_port(up);
- if (!base_ops)
- base_ops = port->ops;
- port->ops = &univ8250_port_ops;
-
- timer_setup(&up->timer, serial8250_timeout, 0);
-
- up->ops = &univ8250_driver_ops;
-
- if (IS_ENABLED(CONFIG_ALPHA_JENSEN) ||
- (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen()))
- port->set_mctrl = alpha_jensen_set_mctrl;
-
- serial8250_set_defaults(up);
- }
+ /*
+ * Set up initial isa ports based on nr_uart module param, or else
+ * default to CONFIG_SERIAL_8250_RUNTIME_UARTS. Note that we do not
+ * need to increase nr_uarts when setting up the initial isa ports.
+ */
+ for (i = 0; i < nr_uarts; i++)
+ serial8250_setup_port(i);
/* chain base port ops to support Remote Supervisor Adapter */
univ8250_port_ops = *base_ops;
@@ -586,16 +601,29 @@ static void univ8250_console_write(struct console *co, const char *s,
static int univ8250_console_setup(struct console *co, char *options)
{
+ struct uart_8250_port *up;
struct uart_port *port;
- int retval;
+ int retval, i;
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
- if (co->index >= nr_uarts)
+ if (co->index >= UART_NR)
co->index = 0;
+
+ /*
+ * If the console is past the initial isa ports, init more ports up to
+ * co->index as needed and increment nr_uarts accordingly.
+ */
+ for (i = nr_uarts; i <= co->index; i++) {
+ up = serial8250_setup_port(i);
+ if (!up)
+ return -ENODEV;
+ nr_uarts++;
+ }
+
port = &serial8250_ports[co->index].port;
/* link port to console */
port->cons = co;
@@ -822,12 +850,16 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.iotype = p->iotype;
uart.port.flags = p->flags;
uart.port.mapbase = p->mapbase;
+ uart.port.mapsize = p->mapsize;
uart.port.hub6 = p->hub6;
uart.port.has_sysrq = p->has_sysrq;
uart.port.private_data = p->private_data;
uart.port.type = p->type;
+ uart.bugs = p->bugs;
uart.port.serial_in = p->serial_in;
uart.port.serial_out = p->serial_out;
+ uart.dl_read = p->dl_read;
+ uart.dl_write = p->dl_write;
uart.port.handle_irq = p->handle_irq;
uart.port.handle_break = p->handle_break;
uart.port.set_termios = p->set_termios;
@@ -990,12 +1022,24 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
mutex_lock(&serial_mutex);
uart = serial8250_find_match_or_unused(&up->port);
- if (uart && uart->port.type != PORT_8250_CIR) {
+ if (!uart) {
+ /*
+ * If the port is past the initial isa ports, initialize a new
+ * port and increment nr_uarts accordingly.
+ */
+ uart = serial8250_setup_port(nr_uarts);
+ if (!uart)
+ goto unlock;
+ nr_uarts++;
+ }
+
+ if (uart->port.type != PORT_8250_CIR) {
struct mctrl_gpios *gpios;
if (uart->port.dev)
uart_remove_one_port(&serial8250_reg, &uart->port);
+ uart->port.ctrl_id = up->port.ctrl_id;
uart->port.iobase = up->port.iobase;
uart->port.membase = up->port.membase;
uart->port.irq = up->port.irq;
@@ -1120,6 +1164,7 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
}
}
+unlock:
mutex_unlock(&serial_mutex);
return ret;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 0ebde0ab8167..4299a8bd83d9 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -36,7 +36,6 @@
static unsigned int serial8250_early_in(struct uart_port *port, int offset)
{
- int reg_offset = offset;
offset <<= port->regshift;
switch (port->iotype) {
@@ -50,8 +49,6 @@ static unsigned int serial8250_early_in(struct uart_port *port, int offset)
return ioread32be(port->membase + offset);
case UPIO_PORT:
return inb(port->iobase + offset);
- case UPIO_AU:
- return port->serial_in(port, reg_offset);
default:
return 0;
}
@@ -59,7 +56,6 @@ static unsigned int serial8250_early_in(struct uart_port *port, int offset)
static void serial8250_early_out(struct uart_port *port, int offset, int value)
{
- int reg_offset = offset;
offset <<= port->regshift;
switch (port->iotype) {
@@ -78,9 +74,6 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
case UPIO_PORT:
outb(value, port->iobase + offset);
break;
- case UPIO_AU:
- port->serial_out(port, reg_offset, value);
- break;
}
}
@@ -199,17 +192,3 @@ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
#endif
-
-#ifdef CONFIG_SERIAL_8250_RT288X
-
-static int __init early_au_setup(struct earlycon_device *dev, const char *opt)
-{
- dev->port.serial_in = au_serial_in;
- dev->port.serial_out = au_serial_out;
- dev->port.iotype = UPIO_AU;
- dev->con->write = early_serial8250_write;
- return 0;
-}
-OF_EARLYCON_DECLARE(palmchip, "ralink,rt2880-uart", early_au_setup);
-
-#endif
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 25a9ecf26be6..ef5019e944ea 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -139,12 +139,12 @@ static void serial8250_em_serial_out(struct uart_port *p, int offset, int value)
}
}
-static int serial8250_em_serial_dl_read(struct uart_8250_port *up)
+static u32 serial8250_em_serial_dl_read(struct uart_8250_port *up)
{
return serial_in(up, UART_DLL_EM) | serial_in(up, UART_DLM_EM) << 8;
}
-static void serial8250_em_serial_dl_write(struct uart_8250_port *up, int value)
+static void serial8250_em_serial_dl_write(struct uart_8250_port *up, u32 value)
{
serial_out(up, UART_DLL_EM, value & 0xff);
serial_out(up, UART_DLM_EM, value >> 8 & 0xff);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index b406cba10b0e..077c3ba3539e 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -198,8 +198,12 @@ static int xr17v35x_startup(struct uart_port *port)
/*
* Make sure all interrups are masked until initialization is
* complete and the FIFOs are cleared
+ *
+ * Synchronize UART_IER access against the console.
*/
+ spin_lock_irq(&port->lock);
serial_port_out(port, UART_IER, 0);
+ spin_unlock_irq(&port->lock);
return serial8250_do_startup(port);
}
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 8adfaa183f77..6af4e1c1210a 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -38,7 +38,19 @@ int fsl8250_handle_irq(struct uart_port *port)
return 0;
}
- /* This is the WAR; if last event was BRK, then read and return */
+ /*
+ * For a single break the hardware reports LSR.BI for each character
+ * time. This is described in the MPC8313E chip errata as "General17".
+ * A typical break has a duration of 0.3s, with a 115200n8 configuration
+ * that (theoretically) corresponds to ~3500 interrupts in these 0.3s.
+ * In practise it's less (around 500) because of hardware
+ * and software latencies. The workaround recommended by the vendor is
+ * to read the RX register (to clear LSR.DR and thus prevent a FIFO
+ * aging interrupt). To prevent the irq from retriggering LSR must not be
+ * read. (This would clear LSR.BI, hardware would reassert the BI event
+ * immediately and interrupt the CPU again. The hardware clears LSR.BI
+ * when the next valid char is read.)
+ */
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
@@ -172,3 +184,6 @@ static struct platform_driver fsl8250_platform_driver = {
module_platform_driver(fsl8250_platform_driver);
#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Handling of Freescale specific 8250 variants");
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index fb1d5ec0940e..74da5676ce67 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -222,11 +222,17 @@ static void mtk8250_shutdown(struct uart_port *port)
static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
}
static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
}
@@ -235,6 +241,9 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
struct uart_port *port = &up->port;
int lcr = serial_in(up, UART_LCR);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&port->lock);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
@@ -422,12 +431,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
while
(serial_in(up, MTK_UART_DEBUG0));
- if (data->clk_count == 0U) {
- dev_dbg(dev, "%s clock count is 0\n", __func__);
- } else {
- clk_disable_unprepare(data->bus_clk);
- data->clk_count--;
- }
+ clk_disable_unprepare(data->bus_clk);
return 0;
}
@@ -435,19 +439,8 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
- int err;
- if (data->clk_count > 0U) {
- dev_dbg(dev, "%s clock count is %d\n", __func__,
- data->clk_count);
- } else {
- err = clk_prepare_enable(data->bus_clk);
- if (err) {
- dev_warn(dev, "Can't enable bus clock\n");
- return err;
- }
- data->clk_count++;
- }
+ clk_prepare_enable(data->bus_clk);
return 0;
}
@@ -456,14 +449,12 @@ static void
mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
{
if (!state)
- if (!mtk8250_runtime_resume(port->dev))
- pm_runtime_get_sync(port->dev);
+ pm_runtime_get_sync(port->dev);
serial8250_do_pm(port, state, old);
if (state)
- if (!pm_runtime_put_sync_suspend(port->dev))
- mtk8250_runtime_suspend(port->dev);
+ pm_runtime_put_sync_suspend(port->dev);
}
#ifdef CONFIG_SERIAL_8250_DMA
@@ -495,7 +486,7 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
return 0;
}
- data->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ data->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(data->bus_clk))
return PTR_ERR(data->bus_clk);
@@ -578,25 +569,16 @@ static int mtk8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- pm_runtime_enable(&pdev->dev);
- err = mtk8250_runtime_resume(&pdev->dev);
- if (err)
- goto err_pm_disable;
-
data->line = serial8250_register_8250_port(&uart);
- if (data->line < 0) {
- err = data->line;
- goto err_pm_disable;
- }
+ if (data->line < 0)
+ return data->line;
data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
- return 0;
-
-err_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- return err;
+ return 0;
}
static int mtk8250_remove(struct platform_device *pdev)
@@ -610,9 +592,6 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- mtk8250_runtime_suspend(&pdev->dev);
-
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 1b461fba15a3..51329625c48a 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -171,11 +171,13 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
switch (type) {
case PORT_RT2880:
- port->iotype = UPIO_AU;
+ ret = rt288x_setup(port);
+ if (ret)
+ goto err_unprepare;
break;
}
- if (IS_ENABLED(CONFIG_SERIAL_8250_FSL) &&
+ if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
of_device_is_compatible(np, "fsl,16550-FIFO64"))) {
port->handle_irq = fsl8250_handle_irq;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 734f092ef839..d48a82f1634e 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -32,6 +32,7 @@
#include "8250.h"
#define DEFAULT_CLK_SPEED 48000000
+#define OMAP_UART_REGSHIFT 2
#define UART_ERRATA_i202_MDR1_ACCESS (1 << 0)
#define OMAP_UART_WER_HAS_TX_WAKEUP (1 << 1)
@@ -115,6 +116,7 @@
#define UART_OMAP_RX_LVL 0x19
struct omap8250_priv {
+ void __iomem *membase;
int line;
u8 habit;
u8 mdr1;
@@ -159,9 +161,9 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { }
#endif
-static u32 uart_read(struct uart_8250_port *up, u32 reg)
+static u32 uart_read(struct omap8250_priv *priv, u32 reg)
{
- return readl(up->port.membase + (reg << up->port.regshift));
+ return readl(priv->membase + (reg << OMAP_UART_REGSHIFT));
}
/*
@@ -302,6 +304,9 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
struct uart_8250_dma *dma = up->dma;
u8 mcr = serial8250_in_MCR(up);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
if (dma && dma->tx_running) {
/*
* TCSANOW requests the change to occur immediately however if
@@ -523,6 +528,10 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
u8 efr;
pm_runtime_get_sync(port->dev);
+
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
@@ -533,6 +542,8 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
+ spin_unlock_irq(&port->lock);
+
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
}
@@ -548,7 +559,7 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
u32 mvr, scheme;
u16 revision, major, minor;
- mvr = uart_read(up, UART_OMAP_MVER);
+ mvr = uart_read(priv, UART_OMAP_MVER);
/* Check revision register scheme */
scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
@@ -616,9 +627,9 @@ static int omap_8250_dma_handle_irq(struct uart_port *port);
static irqreturn_t omap8250_irq(int irq, void *dev_id)
{
- struct uart_port *port = dev_id;
- struct omap8250_priv *priv = port->private_data;
- struct uart_8250_port *up = up_to_u8250p(port);
+ struct omap8250_priv *priv = dev_id;
+ struct uart_8250_port *up = serial8250_get_port(priv->line);
+ struct uart_port *port = &up->port;
unsigned int iir, lsr;
int ret;
@@ -649,6 +660,8 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
unsigned long delay;
+ /* Synchronize UART_IER access against the console. */
+ spin_lock(&port->lock);
up->ier = port->serial_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
@@ -658,6 +671,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
*/
cancel_delayed_work(&up->overrun_backoff);
}
+ spin_unlock(&port->lock);
delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
schedule_delayed_work(&up->overrun_backoff, delay);
@@ -672,6 +686,7 @@ static int omap_8250_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = port->private_data;
+ struct uart_8250_dma *dma = &priv->omap8250_dma;
int ret;
if (priv->wakeirq) {
@@ -690,25 +705,23 @@ static int omap_8250_startup(struct uart_port *port)
up->msr_saved_flags = 0;
/* Disable DMA for console UART */
- if (uart_console(port))
- up->dma = NULL;
-
- if (up->dma) {
+ if (dma->fn && !uart_console(port)) {
+ up->dma = &priv->omap8250_dma;
ret = serial8250_request_dma(up);
if (ret) {
dev_warn_ratelimited(port->dev,
"failed to request DMA\n");
up->dma = NULL;
}
+ } else {
+ up->dma = NULL;
}
- ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
- dev_name(port->dev), port);
- if (ret < 0)
- goto err;
-
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
+ spin_unlock_irq(&port->lock);
#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
@@ -720,17 +733,17 @@ static int omap_8250_startup(struct uart_port *port)
priv->wer |= OMAP_UART_TX_WAKEUP_EN;
serial_out(up, UART_OMAP_WER, priv->wer);
- if (up->dma && !(priv->habit & UART_HAS_EFR2))
+ if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
+ spin_lock_irq(&port->lock);
up->dma->rx_dma(up);
+ spin_unlock_irq(&port->lock);
+ }
+
+ enable_irq(up->port.irq);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
return 0;
-err:
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- dev_pm_clear_wake_irq(port->dev);
- return ret;
}
static void omap_8250_shutdown(struct uart_port *port)
@@ -748,11 +761,16 @@ static void omap_8250_shutdown(struct uart_port *port)
if (priv->habit & UART_HAS_EFR2)
serial_out(up, UART_OMAP_EFR2, 0x0);
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
up->ier = 0;
serial_out(up, UART_IER, 0);
+ spin_unlock_irq(&port->lock);
+ disable_irq_nosync(up->port.irq);
+ dev_pm_clear_wake_irq(port->dev);
- if (up->dma)
- serial8250_release_dma(up);
+ serial8250_release_dma(up);
+ up->dma = NULL;
/*
* Disable break condition and FIFOs
@@ -763,8 +781,6 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
- free_irq(port->irq, port);
- dev_pm_clear_wake_irq(port->dev);
}
static void omap_8250_throttle(struct uart_port *port)
@@ -791,6 +807,7 @@ static void omap_8250_unthrottle(struct uart_port *port)
pm_runtime_get_sync(port->dev);
+ /* Synchronize UART_IER access against the console. */
spin_lock_irqsave(&port->lock, flags);
priv->throttled = false;
if (up->dma)
@@ -941,6 +958,7 @@ static void __dma_rx_complete(void *param)
struct dma_tx_state state;
unsigned long flags;
+ /* Synchronize UART_IER access against the console. */
spin_lock_irqsave(&p->port.lock, flags);
/*
@@ -998,6 +1016,9 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
unsigned long flags;
u32 reg;
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&p->port.lock);
+
if (priv->rx_dma_broken)
return -EINVAL;
@@ -1212,6 +1233,9 @@ static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status
static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
u16 status)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
/*
* Queue a new transfer if FIFO has data.
*/
@@ -1394,7 +1418,7 @@ static int omap8250_probe(struct platform_device *pdev)
UPF_HARD_FLOW;
up.port.private_data = priv;
- up.port.regshift = 2;
+ up.port.regshift = OMAP_UART_REGSHIFT;
up.port.fifosize = 64;
up.tx_loadsz = 64;
up.capabilities = UART_CAP_FIFO;
@@ -1444,8 +1468,6 @@ static int omap8250_probe(struct platform_device *pdev)
&up.overrun_backoff_time_ms) != 0)
up.overrun_backoff_time_ms = 0;
- priv->wakeirq = irq_of_parse_and_map(np, 1);
-
pdata = of_device_get_match_data(&pdev->dev);
if (pdata)
priv->habit |= pdata->habit;
@@ -1457,6 +1479,8 @@ static int omap8250_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}
+ priv->membase = membase;
+ priv->line = -ENODEV;
priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
@@ -1464,6 +1488,8 @@ static int omap8250_probe(struct platform_device *pdev)
spin_lock_init(&priv->rx_dma_lock);
+ platform_set_drvdata(pdev, priv);
+
device_init_wakeup(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1498,64 +1524,77 @@ static int omap8250_probe(struct platform_device *pdev)
ret = of_property_count_strings(np, "dma-names");
if (ret == 2) {
struct omap8250_dma_params *dma_params = NULL;
+ struct uart_8250_dma *dma = &priv->omap8250_dma;
- up.dma = &priv->omap8250_dma;
- up.dma->fn = the_no_dma_filter_fn;
- up.dma->tx_dma = omap_8250_tx_dma;
- up.dma->rx_dma = omap_8250_rx_dma;
+ dma->fn = the_no_dma_filter_fn;
+ dma->tx_dma = omap_8250_tx_dma;
+ dma->rx_dma = omap_8250_rx_dma;
if (pdata)
dma_params = pdata->dma_params;
if (dma_params) {
- up.dma->rx_size = dma_params->rx_size;
- up.dma->rxconf.src_maxburst = dma_params->rx_trigger;
- up.dma->txconf.dst_maxburst = dma_params->tx_trigger;
+ dma->rx_size = dma_params->rx_size;
+ dma->rxconf.src_maxburst = dma_params->rx_trigger;
+ dma->txconf.dst_maxburst = dma_params->tx_trigger;
priv->rx_trigger = dma_params->rx_trigger;
priv->tx_trigger = dma_params->tx_trigger;
} else {
- up.dma->rx_size = RX_TRIGGER;
- up.dma->rxconf.src_maxburst = RX_TRIGGER;
- up.dma->txconf.dst_maxburst = TX_TRIGGER;
+ dma->rx_size = RX_TRIGGER;
+ dma->rxconf.src_maxburst = RX_TRIGGER;
+ dma->txconf.dst_maxburst = TX_TRIGGER;
}
}
#endif
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0,
+ dev_name(&pdev->dev), priv);
+ if (ret < 0)
+ return ret;
+
+ priv->wakeirq = irq_of_parse_and_map(np, 1);
+
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(&pdev->dev, "unable to register 8250 port\n");
goto err;
}
priv->line = ret;
- platform_set_drvdata(pdev, priv);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
+ cpu_latency_qos_remove_request(&priv->pm_qos_request);
return ret;
}
static int omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ struct uart_8250_port *up;
int err;
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
+ up = serial8250_get_port(priv->line);
+ omap_8250_shutdown(&up->port);
+ serial8250_unregister_port(priv->line);
+ priv->line = -ENODEV;
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
- serial8250_unregister_port(priv->line);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
device_init_wakeup(&pdev->dev, false);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int omap8250_prepare(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
@@ -1579,33 +1618,38 @@ static int omap8250_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
+ int err;
serial8250_suspend_port(priv->line);
- pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
if (!device_may_wakeup(dev))
priv->wer = 0;
serial_out(up, UART_OMAP_WER, priv->wer);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
-
+ err = pm_runtime_force_suspend(dev);
flush_work(&priv->qos_work);
- return 0;
+
+ return err;
}
static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
+ int err;
+ err = pm_runtime_force_resume(dev);
+ if (err)
+ return err;
serial8250_resume_port(priv->line);
+ /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
-#else
-#define omap8250_prepare NULL
-#define omap8250_complete NULL
-#endif
-#ifdef CONFIG_PM
static int omap8250_lost_context(struct uart_8250_port *up)
{
u32 val;
@@ -1621,11 +1665,15 @@ static int omap8250_lost_context(struct uart_8250_port *up)
return 0;
}
+static void uart_write(struct omap8250_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->membase + (reg << OMAP_UART_REGSHIFT));
+}
+
/* TODO: in future, this should happen via API in drivers/reset/ */
static int omap8250_soft_reset(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
- struct uart_8250_port *up = serial8250_get_port(priv->line);
int timeout = 100;
int sysc;
int syss;
@@ -1639,20 +1687,20 @@ static int omap8250_soft_reset(struct device *dev)
* needing omap8250_soft_reset() quirk. Do it in two writes as
* recommended in the comment for omap8250_update_scr().
*/
- serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
- serial_out(up, UART_OMAP_SCR,
+ uart_write(priv, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
+ uart_write(priv, UART_OMAP_SCR,
OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
- sysc = serial_in(up, UART_OMAP_SYSC);
+ sysc = uart_read(priv, UART_OMAP_SYSC);
/* softreset the UART */
sysc |= OMAP_UART_SYSC_SOFTRESET;
- serial_out(up, UART_OMAP_SYSC, sysc);
+ uart_write(priv, UART_OMAP_SYSC, sysc);
/* By experiments, 1us enough for reset complete on AM335x */
do {
udelay(1);
- syss = serial_in(up, UART_OMAP_SYSS);
+ syss = uart_read(priv, UART_OMAP_SYSS);
} while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE));
if (!timeout) {
@@ -1666,13 +1714,10 @@ static int omap8250_soft_reset(struct device *dev)
static int omap8250_runtime_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
- struct uart_8250_port *up;
-
- /* In case runtime-pm tries this before we are setup */
- if (!priv)
- return 0;
+ struct uart_8250_port *up = NULL;
- up = serial8250_get_port(priv->line);
+ if (priv->line >= 0)
+ up = serial8250_get_port(priv->line);
/*
* When using 'no_console_suspend', the console UART must not be
* suspended. Since driver suspend is managed by runtime suspend,
@@ -1680,7 +1725,7 @@ static int omap8250_runtime_suspend(struct device *dev)
* active during suspend.
*/
if (priv->is_suspending && !console_suspend_enabled) {
- if (uart_console(&up->port))
+ if (up && uart_console(&up->port))
return -EBUSY;
}
@@ -1691,13 +1736,15 @@ static int omap8250_runtime_suspend(struct device *dev)
if (ret)
return ret;
- /* Restore to UART mode after reset (for wakeup) */
- omap8250_update_mdr1(up, priv);
- /* Restore wakeup enable register */
- serial_out(up, UART_OMAP_WER, priv->wer);
+ if (up) {
+ /* Restore to UART mode after reset (for wakeup) */
+ omap8250_update_mdr1(up, priv);
+ /* Restore wakeup enable register */
+ serial_out(up, UART_OMAP_WER, priv->wer);
+ }
}
- if (up->dma && up->dma->rxchan)
+ if (up && up->dma && up->dma->rxchan)
omap_8250_rx_dma_flush(up);
priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
@@ -1709,25 +1756,27 @@ static int omap8250_runtime_suspend(struct device *dev)
static int omap8250_runtime_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
- struct uart_8250_port *up;
-
- /* In case runtime-pm tries this before we are setup */
- if (!priv)
- return 0;
+ struct uart_8250_port *up = NULL;
- up = serial8250_get_port(priv->line);
+ if (priv->line >= 0)
+ up = serial8250_get_port(priv->line);
- if (omap8250_lost_context(up))
+ if (up && omap8250_lost_context(up)) {
+ spin_lock_irq(&up->port.lock);
omap8250_restore_regs(up);
+ spin_unlock_irq(&up->port.lock);
+ }
- if (up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2))
+ if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
+ spin_lock_irq(&up->port.lock);
omap_8250_rx_dma(up);
+ spin_unlock_irq(&up->port.lock);
+ }
priv->latency = priv->calc_latency;
schedule_work(&priv->qos_work);
return 0;
}
-#endif
#ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP
static int __init omap8250_console_fixup(void)
@@ -1770,17 +1819,17 @@ console_initcall(omap8250_console_fixup);
#endif
static const struct dev_pm_ops omap8250_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
- SET_RUNTIME_PM_OPS(omap8250_runtime_suspend,
+ SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
+ RUNTIME_PM_OPS(omap8250_runtime_suspend,
omap8250_runtime_resume, NULL)
- .prepare = omap8250_prepare,
- .complete = omap8250_complete,
+ .prepare = pm_sleep_ptr(omap8250_prepare),
+ .complete = pm_sleep_ptr(omap8250_complete),
};
static struct platform_driver omap8250_platform_driver = {
.driver = {
.name = "omap8250",
- .pm = &omap8250_dev_pm_ops,
+ .pm = pm_ptr(&omap8250_dev_pm_ops),
.of_match_table = omap8250_dt_ids,
},
.probe = omap8250_probe,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e80c4f6551a1..d2d547b5da95 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1232,14 +1232,6 @@ static int pci_oxsemi_tornado_setup(struct serial_private *priv,
return pci_default_setup(priv, board, up, idx);
}
-static int pci_asix_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- port->bugs |= UART_BUG_PARITY;
- return pci_default_setup(priv, board, port, idx);
-}
-
#define QPCR_TEST_FOR1 0x3F
#define QPCR_TEST_GET1 0x00
#define QPCR_TEST_FOR2 0x40
@@ -1955,7 +1947,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
-#define PCI_VENDOR_ID_ASIX 0x9710
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
@@ -2601,16 +2592,6 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_wch_ch38x_setup,
},
/*
- * ASIX devices with FIFO bug
- */
- {
- .vendor = PCI_VENDOR_ID_ASIX,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_asix_setup,
- },
- /*
* Broadcom TruManage (NetXtreme)
*/
{
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c153ba3a018a..16aeb1420137 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -325,7 +325,7 @@ static const struct serial8250_config uart_config[] = {
};
/* Uart divisor latch read */
-static int default_serial_dl_read(struct uart_8250_port *up)
+static u32 default_serial_dl_read(struct uart_8250_port *up)
{
/* Assign these in pieces to truncate any bits above 7. */
unsigned char dll = serial_in(up, UART_DLL);
@@ -335,72 +335,12 @@ static int default_serial_dl_read(struct uart_8250_port *up)
}
/* Uart divisor latch write */
-static void default_serial_dl_write(struct uart_8250_port *up, int value)
+static void default_serial_dl_write(struct uart_8250_port *up, u32 value)
{
serial_out(up, UART_DLL, value & 0xff);
serial_out(up, UART_DLM, value >> 8 & 0xff);
}
-#ifdef CONFIG_SERIAL_8250_RT288X
-
-#define UART_REG_UNMAPPED -1
-
-/* Au1x00/RT288x UART hardware has a weird register layout */
-static const s8 au_io_in_map[8] = {
- [UART_RX] = 0,
- [UART_IER] = 2,
- [UART_IIR] = 3,
- [UART_LCR] = 5,
- [UART_MCR] = 6,
- [UART_LSR] = 7,
- [UART_MSR] = 8,
- [UART_SCR] = UART_REG_UNMAPPED,
-};
-
-static const s8 au_io_out_map[8] = {
- [UART_TX] = 1,
- [UART_IER] = 2,
- [UART_FCR] = 4,
- [UART_LCR] = 5,
- [UART_MCR] = 6,
- [UART_LSR] = UART_REG_UNMAPPED,
- [UART_MSR] = UART_REG_UNMAPPED,
- [UART_SCR] = UART_REG_UNMAPPED,
-};
-
-unsigned int au_serial_in(struct uart_port *p, int offset)
-{
- if (offset >= ARRAY_SIZE(au_io_in_map))
- return UINT_MAX;
- offset = au_io_in_map[offset];
- if (offset == UART_REG_UNMAPPED)
- return UINT_MAX;
- return __raw_readl(p->membase + (offset << p->regshift));
-}
-
-void au_serial_out(struct uart_port *p, int offset, int value)
-{
- if (offset >= ARRAY_SIZE(au_io_out_map))
- return;
- offset = au_io_out_map[offset];
- if (offset == UART_REG_UNMAPPED)
- return;
- __raw_writel(value, p->membase + (offset << p->regshift));
-}
-
-/* Au1x00 haven't got a standard divisor latch */
-static int au_serial_dl_read(struct uart_8250_port *up)
-{
- return __raw_readl(up->port.membase + 0x28);
-}
-
-static void au_serial_dl_write(struct uart_8250_port *up, int value)
-{
- __raw_writel(value, up->port.membase + 0x28);
-}
-
-#endif
-
static unsigned int hub6_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -510,15 +450,6 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32be_serial_out;
break;
-#ifdef CONFIG_SERIAL_8250_RT288X
- case UPIO_AU:
- p->serial_in = au_serial_in;
- p->serial_out = au_serial_out;
- up->dl_read = au_serial_dl_read;
- up->dl_write = au_serial_dl_write;
- break;
-#endif
-
default:
p->serial_in = io_serial_in;
p->serial_out = io_serial_out;
@@ -608,6 +539,9 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put);
*/
static int serial8250_em485_init(struct uart_8250_port *p)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&p->port.lock);
+
if (p->em485)
goto deassert_rts;
@@ -746,6 +680,8 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial8250_rpm_get(p);
if (p->capabilities & UART_CAP_SLEEP) {
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&p->port.lock);
if (p->capabilities & UART_CAP_EFR) {
lcr = serial_in(p, UART_LCR);
efr = serial_in(p, UART_EFR);
@@ -759,6 +695,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial_out(p, UART_EFR, efr);
serial_out(p, UART_LCR, lcr);
}
+ spin_unlock_irq(&p->port.lock);
}
serial8250_rpm_put(p);
@@ -766,6 +703,9 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
static void serial8250_clear_IER(struct uart_8250_port *up)
{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
if (up->capabilities & UART_CAP_UUE)
serial_out(up, UART_IER, UART_IER_UUE);
else
@@ -848,7 +788,7 @@ static void disable_rsa(struct uart_8250_port *up)
static int size_fifo(struct uart_8250_port *up)
{
unsigned char old_fcr, old_mcr, old_lcr;
- unsigned short old_dl;
+ u32 old_dl;
int count;
old_lcr = serial_in(up, UART_LCR);
@@ -1038,6 +978,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
unsigned char status1, status2;
unsigned int iersave;
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
@@ -1221,6 +1164,8 @@ static void autoconfig(struct uart_8250_port *up)
/*
* We really do need global IRQs disabled here - we're going to
* be frobbing the chips IRQ enable register to see if it exists.
+ *
+ * Synchronize UART_IER access against the console.
*/
spin_lock_irqsave(&port->lock, flags);
@@ -1393,7 +1338,10 @@ static void autoconfig_irq(struct uart_8250_port *up)
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
save_ier = serial_in(up, UART_IER);
+ spin_unlock_irq(&port->lock);
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
@@ -1405,7 +1353,10 @@ static void autoconfig_irq(struct uart_8250_port *up)
serial8250_out_MCR(up,
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
serial_out(up, UART_IER, UART_IER_ALL_INTR);
+ spin_unlock_irq(&port->lock);
serial_in(up, UART_LSR);
serial_in(up, UART_RX);
serial_in(up, UART_IIR);
@@ -1415,7 +1366,10 @@ static void autoconfig_irq(struct uart_8250_port *up)
irq = probe_irq_off(irqs);
serial8250_out_MCR(up, save_mcr);
+ /* Synchronize UART_IER access against the console. */
+ spin_lock_irq(&port->lock);
serial_out(up, UART_IER, save_ier);
+ spin_unlock_irq(&port->lock);
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
@@ -1430,6 +1384,9 @@ static void serial8250_stop_rx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&port->lock);
+
serial8250_rpm_get(up);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
@@ -1449,6 +1406,9 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
{
unsigned char mcr = serial8250_in_MCR(p);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&p->port.lock);
+
if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
mcr |= UART_MCR_RTS;
else
@@ -1498,6 +1458,9 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
{
struct uart_8250_em485 *em485 = p->em485;
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&p->port.lock);
+
stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC;
/*
@@ -1677,6 +1640,9 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&port->lock);
+
if (!port->x_char && uart_circ_empty(&port->state->xmit))
return;
@@ -1704,6 +1670,9 @@ static void serial8250_disable_ms(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&port->lock);
+
/* no MSR capabilities */
if (up->bugs & UART_BUG_NOMSR)
return;
@@ -1718,6 +1687,9 @@ static void serial8250_enable_ms(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&port->lock);
+
/* no MSR capabilities */
if (up->bugs & UART_BUG_NOMSR)
return;
@@ -2174,6 +2146,14 @@ static void serial8250_put_poll_char(struct uart_port *port,
unsigned int ier;
struct uart_8250_port *up = up_to_u8250p(port);
+ /*
+ * Normally the port is locked to synchronize UART_IER access
+ * against the console. However, this function is only used by
+ * KDB/KGDB, where it may not be possible to acquire the port
+ * lock because all other CPUs are quiesced. The quiescence
+ * should allow safe lockless usage here.
+ */
+
serial8250_rpm_get(up);
/*
* First save the IER then disable the interrupts
@@ -2219,7 +2199,12 @@ int serial8250_do_startup(struct uart_port *port)
serial8250_rpm_get(up);
if (port->type == PORT_16C950) {
- /* Wake up and initialize UART */
+ /*
+ * Wake up and initialize UART
+ *
+ * Synchronize UART_IER access against the console.
+ */
+ spin_lock_irqsave(&port->lock, flags);
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
@@ -2229,12 +2214,19 @@ int serial8250_do_startup(struct uart_port *port)
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_LCR, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
}
if (port->type == PORT_DA830) {
- /* Reset the port */
+ /*
+ * Reset the port
+ *
+ * Synchronize UART_IER access against the console.
+ */
+ spin_lock_irqsave(&port->lock, flags);
serial_port_out(port, UART_IER, 0);
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
mdelay(10);
/* Enable Tx, Rx and free run mode */
@@ -2345,6 +2337,8 @@ int serial8250_do_startup(struct uart_port *port)
* this interrupt whenever the transmitter is idle and
* the interrupt is enabled. Delays are necessary to
* allow register changes to become visible.
+ *
+ * Synchronize UART_IER access against the console.
*/
spin_lock_irqsave(&port->lock, flags);
@@ -2495,6 +2489,8 @@ void serial8250_do_shutdown(struct uart_port *port)
serial8250_rpm_get(up);
/*
* Disable interrupts from this port
+ *
+ * Synchronize UART_IER access against the console.
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
@@ -2636,11 +2632,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
if (c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
- if (c_cflag & PARENB) {
+ if (c_cflag & PARENB)
cval |= UART_LCR_PARITY;
- if (up->bugs & UART_BUG_PARITY)
- up->fifo_bug = true;
- }
if (!(c_cflag & PARODD))
cval |= UART_LCR_EPAR;
if (c_cflag & CMSPAR)
@@ -2794,6 +2787,8 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
+ *
+ * Synchronize UART_IER access against the console.
*/
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
@@ -2801,8 +2796,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
up->lcr = cval; /* Save computed LCR */
if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
- /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
- if ((baud < 2400 && !up->dma) || up->fifo_bug) {
+ if (baud < 2400 && !up->dma) {
up->fcr &= ~UART_FCR_TRIGGER_MASK;
up->fcr |= UART_FCR_TRIGGER_1;
}
@@ -2969,11 +2963,6 @@ static unsigned int serial8250_port_size(struct uart_8250_port *pt)
{
if (pt->port.mapsize)
return pt->port.mapsize;
- if (pt->port.iotype == UPIO_AU) {
- if (pt->port.type == PORT_RT2880)
- return 0x100;
- return 0x1000;
- }
if (is_omap1_8250(pt))
return 0x16 << pt->port.regshift;
@@ -3138,8 +3127,7 @@ static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
struct uart_8250_port *up = up_to_u8250p(uport);
int rxtrig;
- if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
- up->fifo_bug)
+ if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
return -EINVAL;
rxtrig = bytes_to_fcr_rxtrig(up, bytes);
@@ -3223,10 +3211,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
- /* if access method is AU, it is a 16550 with a quirk */
- if (port->type == PORT_16550A && port->iotype == UPIO_AU)
- up->bugs |= UART_BUG_NOMSR;
-
/* HW bugs may trigger IRQ while IIR == NO_INT */
if (port->type == PORT_TEGRA)
up->bugs |= UART_BUG_NOMSR;
@@ -3293,6 +3277,7 @@ void serial8250_init_port(struct uart_8250_port *up)
struct uart_port *port = &up->port;
spin_lock_init(&port->lock);
+ port->ctrl_id = 0;
port->ops = &serial8250_pops;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 795e55142d4c..28b341f602c6 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -60,7 +60,7 @@ static const struct of_device_id serial_pxa_dt_ids[] = {
MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
/* Uart divisor latch write */
-static void serial_pxa_dl_write(struct uart_8250_port *up, int value)
+static void serial_pxa_dl_write(struct uart_8250_port *up, u32 value)
{
unsigned int dll;
diff --git a/drivers/tty/serial/8250/8250_rt288x.c b/drivers/tty/serial/8250/8250_rt288x.c
new file mode 100644
index 000000000000..6415ca8d3adf
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_rt288x.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RT288x/Au1xxx driver
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+#define RT288X_DL 0x28
+
+/* Au1x00/RT288x UART hardware has a weird register layout */
+static const u8 au_io_in_map[7] = {
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+};
+
+static const u8 au_io_out_map[5] = {
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+};
+
+static unsigned int au_serial_in(struct uart_port *p, int offset)
+{
+ if (offset >= ARRAY_SIZE(au_io_in_map))
+ return UINT_MAX;
+ offset = au_io_in_map[offset];
+
+ return __raw_readl(p->membase + (offset << p->regshift));
+}
+
+static void au_serial_out(struct uart_port *p, int offset, int value)
+{
+ if (offset >= ARRAY_SIZE(au_io_out_map))
+ return;
+ offset = au_io_out_map[offset];
+
+ __raw_writel(value, p->membase + (offset << p->regshift));
+}
+
+/* Au1x00 haven't got a standard divisor latch */
+static u32 au_serial_dl_read(struct uart_8250_port *up)
+{
+ return __raw_readl(up->port.membase + RT288X_DL);
+}
+
+static void au_serial_dl_write(struct uart_8250_port *up, u32 value)
+{
+ __raw_writel(value, up->port.membase + RT288X_DL);
+}
+
+int au_platform_setup(struct plat_serial8250_port *p)
+{
+ p->iotype = UPIO_AU;
+
+ p->serial_in = au_serial_in;
+ p->serial_out = au_serial_out;
+ p->dl_read = au_serial_dl_read;
+ p->dl_write = au_serial_dl_write;
+
+ p->mapsize = 0x1000;
+
+ p->bugs |= UART_BUG_NOMSR;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(au_platform_setup);
+
+int rt288x_setup(struct uart_port *p)
+{
+ struct uart_8250_port *up = up_to_u8250p(p);
+
+ p->iotype = UPIO_AU;
+
+ p->serial_in = au_serial_in;
+ p->serial_out = au_serial_out;
+ up->dl_read = au_serial_dl_read;
+ up->dl_write = au_serial_dl_write;
+
+ p->mapsize = 0x100;
+
+ up->bugs |= UART_BUG_NOMSR;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt288x_setup);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void au_putc(struct uart_port *port, unsigned char c)
+{
+ unsigned int status;
+
+ au_serial_out(port, UART_TX, c);
+
+ for (;;) {
+ status = au_serial_in(port, UART_LSR);
+ if (uart_lsr_tx_empty(status))
+ break;
+ cpu_relax();
+ }
+}
+
+static void au_early_serial8250_write(struct console *console,
+ const char *s, unsigned int count)
+{
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
+
+ uart_console_write(port, s, count, au_putc);
+}
+
+static int __init early_au_setup(struct earlycon_device *dev, const char *opt)
+{
+ rt288x_setup(&dev->port);
+ dev->con->write = au_early_serial8250_write;
+
+ return 0;
+}
+OF_EARLYCON_DECLARE(palmchip, "ralink,rt2880-uart", early_au_setup);
+#endif
+
+MODULE_DESCRIPTION("RT288x/Au1xxx UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index a2978abab0db..a405155264b1 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -145,12 +145,12 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
* The divisor latch register exists at different address.
* Override dl_read/write callbacks.
*/
-static int uniphier_serial_dl_read(struct uart_8250_port *up)
+static u32 uniphier_serial_dl_read(struct uart_8250_port *up)
{
return readl(up->port.membase + UNIPHIER_UART_DLR);
}
-static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
+static void uniphier_serial_dl_write(struct uart_8250_port *up, u32 value)
{
writel(value, up->port.membase + UNIPHIER_UART_DLR);
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 5313aa31930f..ee17cf5c44c6 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -71,14 +71,16 @@ config SERIAL_8250_16550A_VARIANTS
console, you can say N here.
config SERIAL_8250_FINTEK
- bool "Support for Fintek F81216A LPC to 4 UART RS485 API"
+ bool "Support for Fintek variants"
depends on SERIAL_8250
help
- Selecting this option will add support for the RS485 capabilities
- of the Fintek F81216A LPC to 4 UART.
+ Selecting this option will add support for the RS232 and RS485
+ capabilities of the Fintek F81216A LPC to 4 UART as well similar
+ variants.
If this option is not selected the device will be configured as a
- standard 16550A serial port.
+ standard 16550A serial port, however the device may not function
+ correctly without this option enabled.
If unsure, say N.
@@ -377,9 +379,9 @@ config SERIAL_8250_BCM2835AUX
If unsure, say N.
config SERIAL_8250_FSL
- bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
- depends on SERIAL_8250_CONSOLE
- default PPC || ARM || ARM64
+ tristate "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
+ depends on SERIAL_8250
+ default SERIAL_8250 if PPC || ARM || ARM64
help
Selecting this option enables a workaround for a break-detection
erratum for Freescale 16550 UARTs in the 8250 driver. It also
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 4fc2fc1f41b6..628b75be312e 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
+obj-$(CONFIG_SERIAL_8250_RT288X) += 8250_rt288x.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 71a7a3e724ca..bdc568a4ab66 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1555,6 +1555,29 @@ config SERIAL_SUNPLUS_CONSOLE
you can alter that using a kernel command line option such as
"console=ttySUPx".
+config SERIAL_NUVOTON_MA35D1
+ tristate "Nuvoton MA35D1 family UART support"
+ depends on ARCH_MA35 || COMPILE_TEST
+ select SERIAL_CORE
+ help
+ This driver supports Nuvoton MA35D1 family UART ports. If you would
+ like to use them, you must answer Y or M to this option. Note that
+ for use as console, it must be included in kernel and not as a
+ module. If you enable this option, Ma35D1 serial ports in the system
+ will be registered as ttyNVTx.
+
+config SERIAL_NUVOTON_MA35D1_CONSOLE
+ bool "Console on a Nuvotn MA35D1 family UART port"
+ depends on SERIAL_NUVOTON_MA35D1=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this options if you'd like to use the UART port0 of the
+ Nuvoton MA35D1 family as a console.
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default,
+ but you can alter that using a kernel command line option such as
+ "console=ttyNVTx".
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index cd9afd9e3018..d4123469583d 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -3,7 +3,8 @@
# Makefile for the kernel serial device drivers.
#
-obj-$(CONFIG_SERIAL_CORE) += serial_core.o
+obj-$(CONFIG_SERIAL_CORE) += serial_base.o
+serial_base-y := serial_core.o serial_base_bus.o serial_ctrl.o serial_port.o
obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
obj-$(CONFIG_SERIAL_EARLYCON_SEMIHOST) += earlycon-semihost.o
@@ -21,7 +22,7 @@ obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
# Now bring in any enabled 8250/16450/16550 type drivers.
-obj-$(CONFIG_SERIAL_8250) += 8250/
+obj-y += 8250/
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
@@ -93,3 +94,4 @@ obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
+obj-$(CONFIG_SERIAL_NUVOTON_MA35D1) += ma35d1_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index d8c2f3455eeb..c5c3f4674459 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2166,6 +2166,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* ----------^----------^----------^----------^-----
*/
pl011_write_lcr_h(uap, lcr_h);
+
+ /*
+ * Receive was disabled by pl011_disable_uart during shutdown.
+ * Need to reenable receive if you need to use a tty_driver
+ * returns from tty_find_polling_driver() after a port shutdown.
+ */
+ old_cr |= UART011_CR_RXE;
pl011_write(old_cr, uap, REG_CR);
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 9cd7479b03c0..3467a875641a 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -868,11 +868,11 @@ static void atmel_complete_tx_dma(void *arg)
dmaengine_terminate_all(chan);
uart_xmit_advance(port, atmel_port->tx_len);
- spin_lock_irq(&atmel_port->lock_tx);
+ spin_lock(&atmel_port->lock_tx);
async_tx_ack(atmel_port->desc_tx);
atmel_port->cookie_tx = -EINVAL;
atmel_port->desc_tx = NULL;
- spin_unlock_irq(&atmel_port->lock_tx);
+ spin_unlock(&atmel_port->lock_tx);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -3006,14 +3006,13 @@ static int atmel_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- int ret = 0;
tasklet_kill(&atmel_port->tasklet_rx);
tasklet_kill(&atmel_port->tasklet_tx);
device_init_wakeup(&pdev->dev, 0);
- ret = uart_remove_one_port(&atmel_uart, port);
+ uart_remove_one_port(&atmel_uart, port);
kfree(atmel_port->rx_ring.buf);
@@ -3023,7 +3022,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
pdev->dev.of_node = NULL;
- return ret;
+ return 0;
}
static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index e190dce58f46..e49bc4019b50 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -514,7 +514,9 @@ static int uart_clps711x_remove(struct platform_device *pdev)
{
struct clps711x_port *s = platform_get_drvdata(pdev);
- return uart_remove_one_port(&clps711x_uart, &s->port);
+ uart_remove_one_port(&clps711x_uart, &s->port);
+
+ return 0;
}
static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 349e7da643f0..66afa9bea6bf 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1431,7 +1431,10 @@ static int cpm_uart_probe(struct platform_device *ofdev)
static int cpm_uart_remove(struct platform_device *ofdev)
{
struct uart_cpm_port *pinfo = platform_get_drvdata(ofdev);
- return uart_remove_one_port(&cpm_reg, &pinfo->port);
+
+ uart_remove_one_port(&cpm_reg, &pinfo->port);
+
+ return 0;
}
static const struct of_device_id cpm_uart_match[] = {
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 7fd30fcc10c6..4d80fae20177 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -238,6 +238,7 @@
/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
#define DMA_RX_TIMEOUT (10)
+#define DMA_RX_IDLE_CHARS 8
#define UART_AUTOSUSPEND_TIMEOUT 3000
#define DRIVER_NAME "fsl-lpuart"
@@ -282,6 +283,7 @@ struct lpuart_port {
struct scatterlist rx_sgl, tx_sgl[2];
struct circ_buf rx_ring;
int rx_dma_rng_buf_len;
+ int last_residue;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
bool is_cs7; /* Set to true when character size is 7 */
@@ -331,7 +333,7 @@ static struct lpuart_soc_data imx8qxp_data = {
.devtype = IMX8QXP_LPUART,
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
- .rx_watermark = 31,
+ .rx_watermark = 7, /* A lower watermark is ideal for low baud rates. */
};
static struct lpuart_soc_data imxrt1050_data = {
.devtype = IMXRT1050_LPUART,
@@ -1255,6 +1257,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.rx += copied;
}
+ sport->last_residue = state.residue;
+
exit:
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
@@ -1272,11 +1276,43 @@ static void lpuart_dma_rx_complete(void *arg)
lpuart_copy_rx_to_tty(sport);
}
+/*
+ * Timer function to simulate the hardware EOP (End Of Package) event.
+ * The timer callback is to check for new RX data and copy to TTY buffer.
+ * If no new data are received since last interval, the EOP condition is
+ * met, complete the DMA transfer by copying the data. Otherwise, just
+ * restart timer.
+ */
static void lpuart_timer_func(struct timer_list *t)
{
struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
+ enum dma_status dmastat;
+ struct dma_chan *chan = sport->dma_rx_chan;
+ struct circ_buf *ring = &sport->rx_ring;
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
- lpuart_copy_rx_to_tty(sport);
+ dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
+ if (dmastat == DMA_ERROR) {
+ dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
+ return;
+ }
+
+ ring->head = sport->rx_sgl.length - state.residue;
+ count = CIRC_CNT(ring->head, ring->tail, sport->rx_sgl.length);
+
+ /* Check if new data received before copying */
+ if ((count != 0) && (sport->last_residue == state.residue))
+ lpuart_copy_rx_to_tty(sport);
+ else
+ mod_timer(&sport->lpuart_timer,
+ jiffies + sport->dma_rx_timeout);
+
+ if (spin_trylock_irqsave(&sport->port.lock, flags)) {
+ sport->last_residue = state.residue;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
}
static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
@@ -1297,9 +1333,20 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
*/
sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
+ sport->rx_dma_rng_buf_len = max_t(int,
+ sport->rxfifo_size * 2,
+ sport->rx_dma_rng_buf_len);
+ /*
+ * Keep this condition check in case rxfifo_size is unavailable
+ * for some SoCs.
+ */
if (sport->rx_dma_rng_buf_len < 16)
sport->rx_dma_rng_buf_len = 16;
+ sport->last_residue = 0;
+ sport->dma_rx_timeout = max(nsecs_to_jiffies(
+ sport->port.frame_time * DMA_RX_IDLE_CHARS), 1UL);
+
ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
if (!ring->buf)
return -ENOMEM;
@@ -1689,12 +1736,13 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
if (!sport->dma_rx_chan)
goto err;
+ /* set default Rx DMA timeout */
+ sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
+
ret = lpuart_start_rx_dma(sport);
if (ret)
goto err;
- /* set Rx DMA timeout */
- sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
if (!sport->dma_rx_timeout)
sport->dma_rx_timeout = 1;
@@ -2676,6 +2724,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index c5e17569c3ad..7341d060f85c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -369,6 +369,16 @@ static void imx_uart_soft_reset(struct imx_port *sport)
sport->idle_counter = 0;
}
+static void imx_uart_disable_loopback_rs485(struct imx_port *sport)
+{
+ unsigned int uts;
+
+ /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
+ uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
+ uts &= ~UTS_LOOP;
+ imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
+}
+
/* called with port.lock taken and irqs off */
static void imx_uart_start_rx(struct uart_port *port)
{
@@ -390,6 +400,7 @@ static void imx_uart_start_rx(struct uart_port *port)
/* Write UCR2 first as it includes RXEN */
imx_uart_writel(sport, ucr2, UCR2);
imx_uart_writel(sport, ucr1, UCR1);
+ imx_uart_disable_loopback_rs485(sport);
}
/* called with port.lock taken and irqs off */
@@ -1422,7 +1433,7 @@ static int imx_uart_startup(struct uart_port *port)
int retval;
unsigned long flags;
int dma_is_inited = 0;
- u32 ucr1, ucr2, ucr3, ucr4, uts;
+ u32 ucr1, ucr2, ucr3, ucr4;
retval = clk_prepare_enable(sport->clk_per);
if (retval)
@@ -1521,10 +1532,7 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_writel(sport, ucr2, UCR2);
}
- /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
- uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
- uts &= ~UTS_LOOP;
- imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
+ imx_uart_disable_loopback_rs485(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -2467,7 +2475,9 @@ static int imx_uart_remove(struct platform_device *pdev)
{
struct imx_port *sport = platform_get_drvdata(pdev);
- return uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
+ uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
+
+ return 0;
}
static void imx_uart_restore_context(struct imx_port *sport)
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index f1387f1024db..bcaa479608d8 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -890,7 +890,9 @@ static int lqasc_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
- return uart_remove_one_port(&lqasc_reg, port);
+ uart_remove_one_port(&lqasc_reg, port);
+
+ return 0;
}
static const struct ltq_soc_data soc_data_lantiq = {
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
new file mode 100644
index 000000000000..2604b4d9fb78
--- /dev/null
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MA35D1 serial driver
+ * Copyright (C) 2023 Nuvoton Technology Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty_flip.h>
+#include <linux/units.h>
+
+#define MA35_UART_NR 17
+
+#define MA35_RBR_REG 0x00
+#define MA35_THR_REG 0x00
+#define MA35_IER_REG 0x04
+#define MA35_FCR_REG 0x08
+#define MA35_LCR_REG 0x0C
+#define MA35_MCR_REG 0x10
+#define MA35_MSR_REG 0x14
+#define MA35_FSR_REG 0x18
+#define MA35_ISR_REG 0x1C
+#define MA35_TOR_REG 0x20
+#define MA35_BAUD_REG 0x24
+#define MA35_ALTCTL_REG 0x2C
+#define MA35_FUN_SEL_REG 0x30
+#define MA35_WKCTL_REG 0x40
+#define MA35_WKSTS_REG 0x44
+
+/* MA35_IER_REG - Interrupt Enable Register */
+#define MA35_IER_RDA_IEN BIT(0) /* RBR Available Interrupt Enable */
+#define MA35_IER_THRE_IEN BIT(1) /* THR Empty Interrupt Enable */
+#define MA35_IER_RLS_IEN BIT(2) /* RX Line Status Interrupt Enable */
+#define MA35_IER_RTO_IEN BIT(4) /* RX Time-out Interrupt Enable */
+#define MA35_IER_BUFERR_IEN BIT(5) /* Buffer Error Interrupt Enable */
+#define MA35_IER_TIME_OUT_EN BIT(11) /* RX Buffer Time-out Counter Enable */
+#define MA35_IER_AUTO_RTS BIT(12) /* nRTS Auto-flow Control Enable */
+#define MA35_IER_AUTO_CTS BIT(13) /* nCTS Auto-flow Control Enable */
+
+/* MA35_FCR_REG - FIFO Control Register */
+#define MA35_FCR_RFR BIT(1) /* RX Field Software Reset */
+#define MA35_FCR_TFR BIT(2) /* TX Field Software Reset */
+#define MA35_FCR_RFITL_MASK GENMASK(7, 4) /* RX FIFO Interrupt Trigger Level */
+#define MA35_FCR_RFITL_1BYTE FIELD_PREP(MA35_FCR_RFITL_MASK, 0)
+#define MA35_FCR_RFITL_4BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 1)
+#define MA35_FCR_RFITL_8BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 2)
+#define MA35_FCR_RFITL_14BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 3)
+#define MA35_FCR_RFITL_30BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 4)
+#define MA35_FCR_RTSTL_MASK GENMASK(19, 16) /* nRTS Trigger Level */
+#define MA35_FCR_RTSTL_1BYTE FIELD_PREP(MA35_FCR_RTSTL_MASK, 0)
+#define MA35_FCR_RTSTL_4BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 1)
+#define MA35_FCR_RTSTL_8BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 2)
+#define MA35_FCR_RTSTL_14BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 3)
+#define MA35_FCR_RTSTLL_30BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 4)
+
+/* MA35_LCR_REG - Line Control Register */
+#define MA35_LCR_NSB BIT(2) /* Number of “STOP Bit†*/
+#define MA35_LCR_PBE BIT(3) /* Parity Bit Enable */
+#define MA35_LCR_EPE BIT(4) /* Even Parity Enable */
+#define MA35_LCR_SPE BIT(5) /* Stick Parity Enable */
+#define MA35_LCR_BREAK BIT(6) /* Break Control */
+#define MA35_LCR_WLS_MASK GENMASK(1, 0) /* Word Length Selection */
+#define MA35_LCR_WLS_5BITS FIELD_PREP(MA35_LCR_WLS_MASK, 0)
+#define MA35_LCR_WLS_6BITS FIELD_PREP(MA35_LCR_WLS_MASK, 1)
+#define MA35_LCR_WLS_7BITS FIELD_PREP(MA35_LCR_WLS_MASK, 2)
+#define MA35_LCR_WLS_8BITS FIELD_PREP(MA35_LCR_WLS_MASK, 3)
+
+/* MA35_MCR_REG - Modem Control Register */
+#define MA35_MCR_RTS_CTRL BIT(1) /* nRTS Signal Control */
+#define MA35_MCR_RTSACTLV BIT(9) /* nRTS Pin Active Level */
+#define MA35_MCR_RTSSTS BIT(13) /* nRTS Pin Status (Read Only) */
+
+/* MA35_MSR_REG - Modem Status Register */
+#define MA35_MSR_CTSDETF BIT(0) /* Detect nCTS State Change Flag */
+#define MA35_MSR_CTSSTS BIT(4) /* nCTS Pin Status (Read Only) */
+#define MA35_MSR_CTSACTLV BIT(8) /* nCTS Pin Active Level */
+
+/* MA35_FSR_REG - FIFO Status Register */
+#define MA35_FSR_RX_OVER_IF BIT(0) /* RX Overflow Error Interrupt Flag */
+#define MA35_FSR_PEF BIT(4) /* Parity Error Flag*/
+#define MA35_FSR_FEF BIT(5) /* Framing Error Flag */
+#define MA35_FSR_BIF BIT(6) /* Break Interrupt Flag */
+#define MA35_FSR_RX_EMPTY BIT(14) /* Receiver FIFO Empty (Read Only) */
+#define MA35_FSR_RX_FULL BIT(15) /* Receiver FIFO Full (Read Only) */
+#define MA35_FSR_TX_EMPTY BIT(22) /* Transmitter FIFO Empty (Read Only) */
+#define MA35_FSR_TX_FULL BIT(23) /* Transmitter FIFO Full (Read Only) */
+#define MA35_FSR_TX_OVER_IF BIT(24) /* TX Overflow Error Interrupt Flag */
+#define MA35_FSR_TE_FLAG BIT(28) /* Transmitter Empty Flag (Read Only) */
+#define MA35_FSR_RXPTR_MSK GENMASK(13, 8) /* TX FIFO Pointer mask */
+#define MA35_FSR_TXPTR_MSK GENMASK(21, 16) /* RX FIFO Pointer mask */
+
+/* MA35_ISR_REG - Interrupt Status Register */
+#define MA35_ISR_RDA_IF BIT(0) /* RBR Available Interrupt Flag */
+#define MA35_ISR_THRE_IF BIT(1) /* THR Empty Interrupt Flag */
+#define MA35_ISR_RLSIF BIT(2) /* Receive Line Interrupt Flag */
+#define MA35_ISR_MODEMIF BIT(3) /* MODEM Interrupt Flag */
+#define MA35_ISR_RXTO_IF BIT(4) /* RX Time-out Interrupt Flag */
+#define MA35_ISR_BUFEIF BIT(5) /* Buffer Error Interrupt Flag */
+#define MA35_ISR_WK_IF BIT(6) /* UART Wake-up Interrupt Flag */
+#define MA35_ISR_RDAINT BIT(8) /* RBR Available Interrupt Indicator */
+#define MA35_ISR_THRE_INT BIT(9) /* THR Empty Interrupt Indicator */
+#define MA35_ISR_ALL 0xFFFFFFFF
+
+/* MA35_BAUD_REG - Baud Rate Divider Register */
+#define MA35_BAUD_MODE_MASK GENMASK(29, 28)
+#define MA35_BAUD_MODE0 FIELD_PREP(MA35_BAUD_MODE_MASK, 0)
+#define MA35_BAUD_MODE1 FIELD_PREP(MA35_BAUD_MODE_MASK, 2)
+#define MA35_BAUD_MODE2 FIELD_PREP(MA35_BAUD_MODE_MASK, 3)
+#define MA35_BAUD_MASK GENMASK(15, 0)
+
+/* MA35_ALTCTL_REG - Alternate Control/Status Register */
+#define MA35_ALTCTL_RS485AUD BIT(10) /* RS-485 Auto Direction Function */
+
+/* MA35_FUN_SEL_REG - Function Select Register */
+#define MA35_FUN_SEL_MASK GENMASK(2, 0)
+#define MA35_FUN_SEL_UART FIELD_PREP(MA35_FUN_SEL_MASK, 0)
+#define MA35_FUN_SEL_RS485 FIELD_PREP(MA35_FUN_SEL_MASK, 3)
+
+/* The constrain for MA35D1 UART baud rate divider */
+#define MA35_BAUD_DIV_MAX 0xFFFF
+#define MA35_BAUD_DIV_MIN 11
+
+/* UART FIFO depth */
+#define MA35_UART_FIFO_DEPTH 32
+/* UART console clock */
+#define MA35_UART_CONSOLE_CLK (24 * HZ_PER_MHZ)
+/* UART register ioremap size */
+#define MA35_UART_REG_SIZE 0x100
+/* Rx Timeout */
+#define MA35_UART_RX_TOUT 0x40
+
+#define MA35_IER_CONFIG (MA35_IER_RTO_IEN | MA35_IER_RDA_IEN | \
+ MA35_IER_TIME_OUT_EN | MA35_IER_BUFERR_IEN)
+
+#define MA35_ISR_IF_CHECK (MA35_ISR_RDA_IF | MA35_ISR_RXTO_IF | \
+ MA35_ISR_THRE_INT | MA35_ISR_BUFEIF)
+
+#define MA35_FSR_TX_BOTH_EMPTY (MA35_FSR_TE_FLAG | MA35_FSR_TX_EMPTY)
+
+static struct uart_driver ma35d1serial_reg;
+
+struct uart_ma35d1_port {
+ struct uart_port port;
+ struct clk *clk;
+ u16 capabilities; /* port capabilities */
+ u8 ier;
+ u8 lcr;
+ u8 mcr;
+ u32 baud_rate;
+ u32 console_baud_rate;
+ u32 console_line;
+ u32 console_int;
+};
+
+static struct uart_ma35d1_port ma35d1serial_ports[MA35_UART_NR];
+
+static struct uart_ma35d1_port *to_ma35d1_uart_port(struct uart_port *uart)
+{
+ return container_of(uart, struct uart_ma35d1_port, port);
+}
+
+static u32 serial_in(struct uart_ma35d1_port *p, u32 offset)
+{
+ return readl_relaxed(p->port.membase + offset);
+}
+
+static void serial_out(struct uart_ma35d1_port *p, u32 offset, u32 value)
+{
+ writel_relaxed(value, p->port.membase + offset);
+}
+
+static void __stop_tx(struct uart_ma35d1_port *p)
+{
+ u32 ier;
+
+ ier = serial_in(p, MA35_IER_REG);
+ if (ier & MA35_IER_THRE_IEN)
+ serial_out(p, MA35_IER_REG, ier & ~MA35_IER_THRE_IEN);
+}
+
+static void ma35d1serial_stop_tx(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ __stop_tx(up);
+}
+
+static void transmit_chars(struct uart_ma35d1_port *up)
+{
+ u32 count;
+ u8 ch;
+
+ if (uart_tx_stopped(&up->port)) {
+ ma35d1serial_stop_tx(&up->port);
+ return;
+ }
+ count = MA35_UART_FIFO_DEPTH - FIELD_GET(MA35_FSR_TXPTR_MSK,
+ serial_in(up, MA35_FSR_REG));
+ uart_port_tx_limited(&up->port, ch, count,
+ !(serial_in(up, MA35_FSR_REG) & MA35_FSR_TX_FULL),
+ serial_out(up, MA35_THR_REG, ch),
+ ({}));
+}
+
+static void ma35d1serial_start_tx(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 ier;
+
+ ier = serial_in(up, MA35_IER_REG);
+ serial_out(up, MA35_IER_REG, ier & ~MA35_IER_THRE_IEN);
+ transmit_chars(up);
+ serial_out(up, MA35_IER_REG, ier | MA35_IER_THRE_IEN);
+}
+
+static void ma35d1serial_stop_rx(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 ier;
+
+ ier = serial_in(up, MA35_IER_REG);
+ ier &= ~MA35_IER_RDA_IEN;
+ serial_out(up, MA35_IER_REG, ier);
+}
+
+static void receive_chars(struct uart_ma35d1_port *up)
+{
+ int max_count = 256;
+ u8 ch, flag;
+ u32 fsr;
+
+ fsr = serial_in(up, MA35_FSR_REG);
+ do {
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(fsr & (MA35_FSR_BIF | MA35_FSR_FEF |
+ MA35_FSR_PEF | MA35_FSR_RX_OVER_IF))) {
+ if (fsr & MA35_FSR_BIF) {
+ up->port.icount.brk++;
+ if (uart_handle_break(&up->port))
+ continue;
+ }
+ if (fsr & MA35_FSR_FEF)
+ up->port.icount.frame++;
+ if (fsr & MA35_FSR_PEF)
+ up->port.icount.parity++;
+ if (fsr & MA35_FSR_RX_OVER_IF)
+ up->port.icount.overrun++;
+
+ serial_out(up, MA35_FSR_REG,
+ fsr & (MA35_FSR_BIF | MA35_FSR_FEF |
+ MA35_FSR_PEF | MA35_FSR_RX_OVER_IF));
+ if (fsr & MA35_FSR_BIF)
+ flag = TTY_BREAK;
+ else if (fsr & MA35_FSR_PEF)
+ flag = TTY_PARITY;
+ else if (fsr & MA35_FSR_FEF)
+ flag = TTY_FRAME;
+ }
+
+ ch = serial_in(up, MA35_RBR_REG);
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ spin_lock(&up->port.lock);
+ uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
+ spin_unlock(&up->port.lock);
+
+ fsr = serial_in(up, MA35_FSR_REG);
+ } while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
+
+ spin_lock(&up->port.lock);
+ tty_flip_buffer_push(&up->port.state->port);
+ spin_unlock(&up->port.lock);
+}
+
+static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 isr, fsr;
+
+ isr = serial_in(up, MA35_ISR_REG);
+ fsr = serial_in(up, MA35_FSR_REG);
+
+ if (!(isr & MA35_ISR_IF_CHECK))
+ return IRQ_NONE;
+
+ if (isr & (MA35_ISR_RDA_IF | MA35_ISR_RXTO_IF))
+ receive_chars(up);
+ if (isr & MA35_ISR_THRE_INT)
+ transmit_chars(up);
+ if (fsr & MA35_FSR_TX_OVER_IF)
+ serial_out(up, MA35_FSR_REG, MA35_FSR_TX_OVER_IF);
+
+ return IRQ_HANDLED;
+}
+
+static u32 ma35d1serial_tx_empty(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 fsr;
+
+ fsr = serial_in(up, MA35_FSR_REG);
+ if ((fsr & MA35_FSR_TX_BOTH_EMPTY) == MA35_FSR_TX_BOTH_EMPTY)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static u32 ma35d1serial_get_mctrl(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 status;
+ u32 ret = 0;
+
+ status = serial_in(up, MA35_MSR_REG);
+ if (!(status & MA35_MSR_CTSSTS))
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void ma35d1serial_set_mctrl(struct uart_port *port, u32 mctrl)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 mcr, msr, ier;
+
+ mcr = serial_in(up, MA35_MCR_REG);
+ mcr &= ~MA35_MCR_RTS_CTRL;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= MA35_MCR_RTSACTLV;
+ else
+ mcr &= ~MA35_MCR_RTSACTLV;
+
+ if (up->mcr & UART_MCR_AFE) {
+ ier = serial_in(up, MA35_IER_REG);
+ ier |= MA35_IER_AUTO_RTS | MA35_IER_AUTO_CTS;
+ serial_out(up, MA35_IER_REG, ier);
+ up->port.flags |= UPF_HARD_FLOW;
+ } else {
+ ier = serial_in(up, MA35_IER_REG);
+ ier &= ~(MA35_IER_AUTO_RTS | MA35_IER_AUTO_CTS);
+ serial_out(up, MA35_IER_REG, ier);
+ up->port.flags &= ~UPF_HARD_FLOW;
+ }
+
+ msr = serial_in(up, MA35_MSR_REG);
+ msr |= MA35_MSR_CTSACTLV;
+ serial_out(up, MA35_MSR_REG, msr);
+ serial_out(up, MA35_MCR_REG, mcr);
+}
+
+static void ma35d1serial_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ unsigned long flags;
+ u32 lcr;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ lcr = serial_in(up, MA35_LCR_REG);
+ if (break_state != 0)
+ lcr |= MA35_LCR_BREAK;
+ else
+ lcr &= ~MA35_LCR_BREAK;
+ serial_out(up, MA35_LCR_REG, lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int ma35d1serial_startup(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ u32 fcr;
+ int retval;
+
+ /* Reset FIFO */
+ serial_out(up, MA35_FCR_REG, MA35_FCR_TFR | MA35_FCR_RFR);
+
+ /* Clear pending interrupts */
+ serial_out(up, MA35_ISR_REG, MA35_ISR_ALL);
+
+ retval = request_irq(port->irq, ma35d1serial_interrupt, 0,
+ dev_name(port->dev), port);
+ if (retval) {
+ dev_err(up->port.dev, "request irq failed.\n");
+ return retval;
+ }
+
+ fcr = serial_in(up, MA35_FCR_REG);
+ fcr |= MA35_FCR_RFITL_4BYTES | MA35_FCR_RTSTL_8BYTES;
+ serial_out(up, MA35_FCR_REG, fcr);
+ serial_out(up, MA35_LCR_REG, MA35_LCR_WLS_8BITS);
+ serial_out(up, MA35_TOR_REG, MA35_UART_RX_TOUT);
+ serial_out(up, MA35_IER_REG, MA35_IER_CONFIG);
+ return 0;
+}
+
+static void ma35d1serial_shutdown(struct uart_port *port)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ serial_out(up, MA35_IER_REG, 0);
+ free_irq(port->irq, port);
+}
+
+static void ma35d1serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+ unsigned long flags;
+ u32 baud, quot;
+ u32 lcr = 0;
+
+ lcr = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
+
+ if (termios->c_cflag & CSTOPB)
+ lcr |= MA35_LCR_NSB;
+ if (termios->c_cflag & PARENB)
+ lcr |= MA35_LCR_PBE;
+ if (!(termios->c_cflag & PARODD))
+ lcr |= MA35_LCR_EPE;
+ if (termios->c_cflag & CMSPAR)
+ lcr |= MA35_LCR_SPE;
+
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / MA35_BAUD_DIV_MAX,
+ port->uartclk / MA35_BAUD_DIV_MIN);
+
+ /* MA35D1 UART baud rate equation: baudrate = UART_CLK / (quot + 2) */
+ quot = (port->uartclk / baud) - 2;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= MA35_FSR_FEF | MA35_FSR_PEF;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= MA35_FSR_BIF;
+
+ /* Characteres to ignore */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= MA35_FSR_FEF | MA35_FSR_PEF;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= MA35_FSR_BIF;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= MA35_FSR_RX_OVER_IF;
+ }
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE;
+ else
+ up->mcr &= ~UART_MCR_AFE;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ ma35d1serial_set_mctrl(&up->port, up->port.mctrl);
+
+ serial_out(up, MA35_BAUD_REG, MA35_BAUD_MODE2 | FIELD_PREP(MA35_BAUD_MASK, quot));
+
+ serial_out(up, MA35_LCR_REG, lcr);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static const char *ma35d1serial_type(struct uart_port *port)
+{
+ return "ma35d1-uart";
+}
+
+static void ma35d1serial_config_port(struct uart_port *port, int flags)
+{
+ /*
+ * Driver core for serial ports forces a non-zero value for port type.
+ * Write an arbitrary value here to accommodate the serial core driver,
+ * as ID part of UAPI is redundant.
+ */
+ port->type = 1;
+}
+
+static int ma35d1serial_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (port->type != PORT_UNKNOWN && ser->type != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops ma35d1serial_ops = {
+ .tx_empty = ma35d1serial_tx_empty,
+ .set_mctrl = ma35d1serial_set_mctrl,
+ .get_mctrl = ma35d1serial_get_mctrl,
+ .stop_tx = ma35d1serial_stop_tx,
+ .start_tx = ma35d1serial_start_tx,
+ .stop_rx = ma35d1serial_stop_rx,
+ .break_ctl = ma35d1serial_break_ctl,
+ .startup = ma35d1serial_startup,
+ .shutdown = ma35d1serial_shutdown,
+ .set_termios = ma35d1serial_set_termios,
+ .type = ma35d1serial_type,
+ .config_port = ma35d1serial_config_port,
+ .verify_port = ma35d1serial_verify_port,
+};
+
+static const struct of_device_id ma35d1_serial_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35d1_serial_of_match);
+
+#ifdef CONFIG_SERIAL_NUVOTON_MA35D1_CONSOLE
+
+static struct device_node *ma35d1serial_uart_nodes[MA35_UART_NR];
+
+static void wait_for_xmitr(struct uart_ma35d1_port *up)
+{
+ unsigned int reg = 0;
+
+ read_poll_timeout_atomic(serial_in, reg, reg & MA35_FSR_TX_EMPTY,
+ 1, 10000, false,
+ up, MA35_FSR_REG);
+}
+
+static void ma35d1serial_console_putchar(struct uart_port *port, unsigned char ch)
+{
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ wait_for_xmitr(up);
+ serial_out(up, MA35_THR_REG, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void ma35d1serial_console_write(struct console *co, const char *s, u32 count)
+{
+ struct uart_ma35d1_port *up = &ma35d1serial_ports[co->index];
+ unsigned long flags;
+ int locked = 1;
+ u32 ier;
+
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, MA35_IER_REG);
+ serial_out(up, MA35_IER_REG, 0);
+
+ uart_console_write(&up->port, s, count, ma35d1serial_console_putchar);
+
+ wait_for_xmitr(up);
+ serial_out(up, MA35_IER_REG, ier);
+
+ if (locked)
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int __init ma35d1serial_console_setup(struct console *co, char *options)
+{
+ struct device_node *np;
+ struct uart_ma35d1_port *p;
+ u32 val32[4];
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
+ pr_debug("Console Port%x out of range\n", co->index);
+ return -EINVAL;
+ }
+
+ np = ma35d1serial_uart_nodes[co->index];
+ p = &ma35d1serial_ports[co->index];
+ if (!np || !p)
+ return -ENODEV;
+
+ if (of_property_read_u32_array(np, "reg", val32, ARRAY_SIZE(val32)) != 0)
+ return -EINVAL;
+
+ p->port.iobase = val32[1];
+ p->port.membase = ioremap(p->port.iobase, MA35_UART_REG_SIZE);
+ if (!p->port.membase)
+ return -ENOMEM;
+
+ p->port.ops = &ma35d1serial_ops;
+ p->port.line = 0;
+ p->port.uartclk = MA35_UART_CONSOLE_CLK;
+
+ port = &ma35d1serial_ports[co->index].port;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct console ma35d1serial_console = {
+ .name = "ttyNVT",
+ .write = ma35d1serial_console_write,
+ .device = uart_console_device,
+ .setup = ma35d1serial_console_setup,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+ .data = &ma35d1serial_reg,
+};
+
+static void ma35d1serial_console_init_port(void)
+{
+ u32 i = 0;
+ struct device_node *np;
+
+ for_each_matching_node(np, ma35d1_serial_of_match) {
+ if (ma35d1serial_uart_nodes[i] == NULL) {
+ of_node_get(np);
+ ma35d1serial_uart_nodes[i] = np;
+ i++;
+ if (i == MA35_UART_NR)
+ break;
+ }
+ }
+}
+
+static int __init ma35d1serial_console_init(void)
+{
+ ma35d1serial_console_init_port();
+ register_console(&ma35d1serial_console);
+ return 0;
+}
+console_initcall(ma35d1serial_console_init);
+
+#define MA35D1SERIAL_CONSOLE (&ma35d1serial_console)
+#else
+#define MA35D1SERIAL_CONSOLE NULL
+#endif
+
+static struct uart_driver ma35d1serial_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyNVT",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .cons = MA35D1SERIAL_CONSOLE,
+ .nr = MA35_UART_NR,
+};
+
+/*
+ * Register a set of serial devices attached to a platform device.
+ * The list is terminated with a zero flags entry, which means we expect
+ * all entries to have at least UPF_BOOT_AUTOCONF set.
+ */
+static int ma35d1serial_probe(struct platform_device *pdev)
+{
+ struct resource *res_mem;
+ struct uart_ma35d1_port *up;
+ int ret = 0;
+
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
+ return ret;
+ }
+ }
+ up = &ma35d1serial_ports[ret];
+ up->port.line = ret;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ up->port.iobase = res_mem->start;
+ up->port.membase = ioremap(up->port.iobase, MA35_UART_REG_SIZE);
+ up->port.ops = &ma35d1serial_ops;
+
+ spin_lock_init(&up->port.lock);
+
+ up->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(up->clk)) {
+ ret = PTR_ERR(up->clk);
+ dev_err(&pdev->dev, "failed to get core clk: %d\n", ret);
+ goto err_iounmap;
+ }
+
+ ret = clk_prepare_enable(up->clk);
+ if (ret)
+ goto err_iounmap;
+
+ if (up->port.line != 0)
+ up->port.uartclk = clk_get_rate(up->clk);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ up->port.irq = ret;
+ up->port.dev = &pdev->dev;
+ up->port.flags = UPF_BOOT_AUTOCONF;
+
+ platform_set_drvdata(pdev, up);
+
+ ret = uart_add_one_port(&ma35d1serial_reg, &up->port);
+ if (ret < 0)
+ goto err_free_irq;
+
+ return 0;
+
+err_free_irq:
+ free_irq(up->port.irq, &up->port);
+
+err_clk_disable:
+ clk_disable_unprepare(up->clk);
+
+err_iounmap:
+ iounmap(up->port.membase);
+ return ret;
+}
+
+/*
+ * Remove serial ports registered against a platform device.
+ */
+static int ma35d1serial_remove(struct platform_device *dev)
+{
+ struct uart_port *port = platform_get_drvdata(dev);
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ uart_remove_one_port(&ma35d1serial_reg, port);
+ clk_disable_unprepare(up->clk);
+ return 0;
+}
+
+static int ma35d1serial_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct uart_port *port = platform_get_drvdata(dev);
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ uart_suspend_port(&ma35d1serial_reg, &up->port);
+ if (up->port.line == 0) {
+ up->console_baud_rate = serial_in(up, MA35_BAUD_REG);
+ up->console_line = serial_in(up, MA35_LCR_REG);
+ up->console_int = serial_in(up, MA35_IER_REG);
+ }
+ return 0;
+}
+
+static int ma35d1serial_resume(struct platform_device *dev)
+{
+ struct uart_port *port = platform_get_drvdata(dev);
+ struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
+
+ if (up->port.line == 0) {
+ serial_out(up, MA35_BAUD_REG, up->console_baud_rate);
+ serial_out(up, MA35_LCR_REG, up->console_line);
+ serial_out(up, MA35_IER_REG, up->console_int);
+ }
+ uart_resume_port(&ma35d1serial_reg, &up->port);
+ return 0;
+}
+
+static struct platform_driver ma35d1serial_driver = {
+ .probe = ma35d1serial_probe,
+ .remove = ma35d1serial_remove,
+ .suspend = ma35d1serial_suspend,
+ .resume = ma35d1serial_resume,
+ .driver = {
+ .name = "ma35d1-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ma35d1_serial_of_match),
+ },
+};
+
+static int __init ma35d1serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&ma35d1serial_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&ma35d1serial_driver);
+ if (ret)
+ uart_unregister_driver(&ma35d1serial_reg);
+
+ return ret;
+}
+
+static void __exit ma35d1serial_exit(void)
+{
+ platform_driver_unregister(&ma35d1serial_driver);
+ uart_unregister_driver(&ma35d1serial_reg);
+}
+
+module_init(ma35d1serial_init);
+module_exit(ma35d1serial_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MA35D1 serial driver");
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 9fee722058f4..997e39449766 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1636,7 +1636,7 @@ static struct i2c_driver max310x_i2c_driver = {
.of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
- .probe_new = max310x_i2c_probe,
+ .probe = max310x_i2c_probe,
.remove = max310x_i2c_remove,
};
#endif
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 8582479f0211..444c74eeab7d 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1053,6 +1053,11 @@ static int setup_fifos(struct qcom_geni_serial_port *port)
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
if (port->rx_buf && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
+ /*
+ * Use krealloc rather than krealloc_array because rx_buf is
+ * accessed as 1 byte entries as well as 4 byte entries so it's
+ * not necessarily an array.
+ */
port->rx_buf = devm_krealloc(uport->dev, port->rx_buf,
port->rx_fifo_depth * sizeof(u32),
GFP_KERNEL);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 2a7520ad3abd..b29e9dfd81a6 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -24,6 +24,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -1459,8 +1460,12 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
continue;
rate = clk_get_rate(clk);
- if (!rate)
+ if (!rate) {
+ dev_err(ourport->port.dev,
+ "Failed to get clock rate for %s.\n", clkname);
+ clk_put(clk);
continue;
+ }
if (ourport->info->has_divslot) {
unsigned long div = rate / req_baud;
@@ -1481,15 +1486,21 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
}
quot--;
- calc_deviation = req_baud - baud;
- if (calc_deviation < 0)
- calc_deviation = -calc_deviation;
+ calc_deviation = abs(req_baud - baud);
if (calc_deviation < deviation) {
+ /*
+ * If we find a better clk, release the previous one, if
+ * any.
+ */
+ if (!IS_ERR(*best_clk))
+ clk_put(*best_clk);
*best_clk = clk;
best_quot = quot;
*clk_num = cnt;
deviation = calc_deviation;
+ } else {
+ clk_put(clk);
}
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index abad091baeea..2e7e7c409cf2 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1709,7 +1709,7 @@ static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.name = SC16IS7XX_NAME,
.of_match_table = sc16is7xx_dt_ids,
},
- .probe_new = sc16is7xx_i2c_probe,
+ .probe = sc16is7xx_i2c_probe,
.remove = sc16is7xx_i2c_remove,
.id_table = sc16is7xx_i2c_id_table,
};
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
new file mode 100644
index 000000000000..9faac0ff6b89
--- /dev/null
+++ b/drivers/tty/serial/serial_base.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Serial core related functions, serial port device drivers do not need this.
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tony Lindgren <tony@atomide.com>
+ */
+
+#define to_serial_base_ctrl_device(d) container_of((d), struct serial_ctrl_device, dev)
+#define to_serial_base_port_device(d) container_of((d), struct serial_port_device, dev)
+
+struct uart_driver;
+struct uart_port;
+struct device_driver;
+struct device;
+
+struct serial_ctrl_device {
+ struct device dev;
+};
+
+struct serial_port_device {
+ struct device dev;
+ struct uart_port *port;
+};
+
+int serial_base_ctrl_init(void);
+void serial_base_ctrl_exit(void);
+
+int serial_base_port_init(void);
+void serial_base_port_exit(void);
+
+int serial_base_driver_register(struct device_driver *driver);
+void serial_base_driver_unregister(struct device_driver *driver);
+
+struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port,
+ struct device *parent);
+struct serial_port_device *serial_base_port_add(struct uart_port *port,
+ struct serial_ctrl_device *parent);
+void serial_base_ctrl_device_remove(struct serial_ctrl_device *ctrl_dev);
+void serial_base_port_device_remove(struct serial_port_device *port_dev);
+
+int serial_ctrl_register_port(struct uart_driver *drv, struct uart_port *port);
+void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port);
+
+int serial_core_register_port(struct uart_driver *drv, struct uart_port *port);
+void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port);
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
new file mode 100644
index 000000000000..6ff59c89d867
--- /dev/null
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial base bus layer for controllers
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tony Lindgren <tony@atomide.com>
+ *
+ * The serial core bus manages the serial core controller instances.
+ */
+
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "serial_base.h"
+
+static bool serial_base_initialized;
+
+static int serial_base_match(struct device *dev, struct device_driver *drv)
+{
+ int len = strlen(drv->name);
+
+ return !strncmp(dev_name(dev), drv->name, len);
+}
+
+static struct bus_type serial_base_bus_type = {
+ .name = "serial-base",
+ .match = serial_base_match,
+};
+
+int serial_base_driver_register(struct device_driver *driver)
+{
+ driver->bus = &serial_base_bus_type;
+
+ return driver_register(driver);
+}
+
+void serial_base_driver_unregister(struct device_driver *driver)
+{
+ driver_unregister(driver);
+}
+
+static int serial_base_device_init(struct uart_port *port,
+ struct device *dev,
+ struct device *parent_dev,
+ const struct device_type *type,
+ void (*release)(struct device *dev),
+ int id)
+{
+ device_initialize(dev);
+ dev->type = type;
+ dev->parent = parent_dev;
+ dev->bus = &serial_base_bus_type;
+ dev->release = release;
+
+ if (!serial_base_initialized) {
+ dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n");
+ return -EPROBE_DEFER;
+ }
+
+ return dev_set_name(dev, "%s.%s.%d", type->name, dev_name(port->dev), id);
+}
+
+static const struct device_type serial_ctrl_type = {
+ .name = "ctrl",
+};
+
+static void serial_base_ctrl_release(struct device *dev)
+{
+ struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
+
+ kfree(ctrl_dev);
+}
+
+void serial_base_ctrl_device_remove(struct serial_ctrl_device *ctrl_dev)
+{
+ if (!ctrl_dev)
+ return;
+
+ device_del(&ctrl_dev->dev);
+}
+
+struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port,
+ struct device *parent)
+{
+ struct serial_ctrl_device *ctrl_dev;
+ int err;
+
+ ctrl_dev = kzalloc(sizeof(*ctrl_dev), GFP_KERNEL);
+ if (!ctrl_dev)
+ return ERR_PTR(-ENOMEM);
+
+ err = serial_base_device_init(port, &ctrl_dev->dev,
+ parent, &serial_ctrl_type,
+ serial_base_ctrl_release,
+ port->ctrl_id);
+ if (err)
+ goto err_put_device;
+
+ err = device_add(&ctrl_dev->dev);
+ if (err)
+ goto err_put_device;
+
+ return ctrl_dev;
+
+err_put_device:
+ put_device(&ctrl_dev->dev);
+
+ return ERR_PTR(err);
+}
+
+static const struct device_type serial_port_type = {
+ .name = "port",
+};
+
+static void serial_base_port_release(struct device *dev)
+{
+ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+
+ kfree(port_dev);
+}
+
+struct serial_port_device *serial_base_port_add(struct uart_port *port,
+ struct serial_ctrl_device *ctrl_dev)
+{
+ struct serial_port_device *port_dev;
+ int err;
+
+ port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
+ if (!port_dev)
+ return ERR_PTR(-ENOMEM);
+
+ err = serial_base_device_init(port, &port_dev->dev,
+ &ctrl_dev->dev, &serial_port_type,
+ serial_base_port_release,
+ port->line);
+ if (err)
+ goto err_put_device;
+
+ port_dev->port = port;
+
+ err = device_add(&port_dev->dev);
+ if (err)
+ goto err_put_device;
+
+ return port_dev;
+
+err_put_device:
+ put_device(&port_dev->dev);
+
+ return ERR_PTR(err);
+}
+
+void serial_base_port_device_remove(struct serial_port_device *port_dev)
+{
+ if (!port_dev)
+ return;
+
+ device_del(&port_dev->dev);
+}
+
+static int serial_base_init(void)
+{
+ int ret;
+
+ ret = bus_register(&serial_base_bus_type);
+ if (ret)
+ return ret;
+
+ ret = serial_base_ctrl_init();
+ if (ret)
+ goto err_bus_unregister;
+
+ ret = serial_base_port_init();
+ if (ret)
+ goto err_ctrl_exit;
+
+ serial_base_initialized = true;
+
+ return 0;
+
+err_ctrl_exit:
+ serial_base_ctrl_exit();
+
+err_bus_unregister:
+ bus_unregister(&serial_base_bus_type);
+
+ return ret;
+}
+arch_initcall(serial_base_init);
+
+static void serial_base_exit(void)
+{
+ serial_base_port_exit();
+ serial_base_ctrl_exit();
+ bus_unregister(&serial_base_bus_type);
+}
+module_exit(serial_base_exit);
+
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("Serial core bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 54e82f476a2c..831d033611e6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -17,6 +17,7 @@
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
@@ -31,6 +32,8 @@
#include <linux/irq.h>
#include <linux/uaccess.h>
+#include "serial_base.h"
+
/*
* This is used to lock changes in serial line configuration.
*/
@@ -134,9 +137,30 @@ static void __uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
+ struct serial_port_device *port_dev;
+ int err;
+
+ if (!port || port->flags & UPF_DEAD || uart_tx_stopped(port))
+ return;
+
+ port_dev = port->port_dev;
+
+ /* Increment the runtime PM usage count for the active check below */
+ err = pm_runtime_get(&port_dev->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&port_dev->dev);
+ return;
+ }
- if (port && !(port->flags & UPF_DEAD) && !uart_tx_stopped(port))
+ /*
+ * Start TX if enabled, and kick runtime PM. If the device is not
+ * enabled, serial_port_runtime_resume() calls start_tx() again
+ * after enabling the device.
+ */
+ if (pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
+ pm_runtime_mark_last_busy(&port_dev->dev);
+ pm_runtime_put_autosuspend(&port_dev->dev);
}
static void uart_start(struct tty_struct *tty)
@@ -2333,8 +2357,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
* able to Re-start_rx later.
*/
if (!console_suspend_enabled && uart_console(uport)) {
- if (uport->ops->start_rx)
+ if (uport->ops->start_rx) {
+ spin_lock_irq(&uport->lock);
uport->ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
+ }
goto unlock;
}
@@ -2427,8 +2454,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (console_suspend_enabled)
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
- if (!console_suspend_enabled && uport->ops->start_rx)
+ if (!console_suspend_enabled && uport->ops->start_rx) {
+ spin_lock_irq(&uport->lock);
uport->ops->start_rx(uport);
+ spin_unlock_irq(&uport->lock);
+ }
if (console_suspend_enabled)
console_start(uport->cons);
}
@@ -3042,7 +3072,7 @@ static const struct attribute_group tty_dev_attr_group = {
};
/**
- * uart_add_one_port - attach a driver-defined port structure
+ * serial_core_add_one_port - attach a driver-defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure to use for this port.
*
@@ -3051,8 +3081,9 @@ static const struct attribute_group tty_dev_attr_group = {
* This allows the driver @drv to register its own uart_port structure with the
* core driver. The main purpose is to allow the low level uart drivers to
* expand uart_port, rather than having yet more levels of structures.
+ * Caller must hold port_mutex.
*/
-int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
+static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state;
struct tty_port *port;
@@ -3066,7 +3097,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
state = drv->state + uport->line;
port = &state->port;
- mutex_lock(&port_mutex);
mutex_lock(&port->mutex);
if (state->uart_port) {
ret = -EINVAL;
@@ -3131,21 +3161,14 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->line);
}
- /*
- * Ensure UPF_DEAD is not set.
- */
- uport->flags &= ~UPF_DEAD;
-
out:
mutex_unlock(&port->mutex);
- mutex_unlock(&port_mutex);
return ret;
}
-EXPORT_SYMBOL(uart_add_one_port);
/**
- * uart_remove_one_port - detach a driver defined port structure
+ * serial_core_remove_one_port - detach a driver defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure for this port
*
@@ -3153,21 +3176,16 @@ EXPORT_SYMBOL(uart_add_one_port);
*
* This unhooks (and hangs up) the specified port structure from the core
* driver. No further calls will be made to the low-level code for this port.
+ * Caller must hold port_mutex.
*/
-int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
+static void serial_core_remove_one_port(struct uart_driver *drv,
+ struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct uart_port *uart_port;
struct tty_struct *tty;
- int ret = 0;
-
- mutex_lock(&port_mutex);
- /*
- * Mark the port "dead" - this prevents any opens from
- * succeeding while we shut down the port.
- */
mutex_lock(&port->mutex);
uart_port = uart_port_check(state);
if (uart_port != uport)
@@ -3176,10 +3194,8 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
if (!uart_port) {
mutex_unlock(&port->mutex);
- ret = -EINVAL;
- goto out;
+ return;
}
- uport->flags |= UPF_DEAD;
mutex_unlock(&port->mutex);
/*
@@ -3211,18 +3227,14 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
* Indicate that there isn't a port here anymore.
*/
uport->type = PORT_UNKNOWN;
+ uport->port_dev = NULL;
mutex_lock(&port->mutex);
WARN_ON(atomic_dec_return(&state->refcount) < 0);
wait_event(state->remove_wait, !atomic_read(&state->refcount));
state->uart_port = NULL;
mutex_unlock(&port->mutex);
-out:
- mutex_unlock(&port_mutex);
-
- return ret;
}
-EXPORT_SYMBOL(uart_remove_one_port);
/**
* uart_match_port - are the two ports equivalent?
@@ -3257,6 +3269,144 @@ bool uart_match_port(const struct uart_port *port1,
}
EXPORT_SYMBOL(uart_match_port);
+static struct serial_ctrl_device *
+serial_core_get_ctrl_dev(struct serial_port_device *port_dev)
+{
+ struct device *dev = &port_dev->dev;
+
+ return to_serial_base_ctrl_device(dev->parent);
+}
+
+/*
+ * Find a registered serial core controller device if one exists. Returns
+ * the first device matching the ctrl_id. Caller must hold port_mutex.
+ */
+static struct serial_ctrl_device *serial_core_ctrl_find(struct uart_driver *drv,
+ struct device *phys_dev,
+ int ctrl_id)
+{
+ struct uart_state *state;
+ int i;
+
+ lockdep_assert_held(&port_mutex);
+
+ for (i = 0; i < drv->nr; i++) {
+ state = drv->state + i;
+ if (!state->uart_port || !state->uart_port->port_dev)
+ continue;
+
+ if (state->uart_port->dev == phys_dev &&
+ state->uart_port->ctrl_id == ctrl_id)
+ return serial_core_get_ctrl_dev(state->uart_port->port_dev);
+ }
+
+ return NULL;
+}
+
+static struct serial_ctrl_device *serial_core_ctrl_device_add(struct uart_port *port)
+{
+ return serial_base_ctrl_add(port, port->dev);
+}
+
+static int serial_core_port_device_add(struct serial_ctrl_device *ctrl_dev,
+ struct uart_port *port)
+{
+ struct serial_port_device *port_dev;
+
+ port_dev = serial_base_port_add(port, ctrl_dev);
+ if (IS_ERR(port_dev))
+ return PTR_ERR(port_dev);
+
+ port->port_dev = port_dev;
+
+ return 0;
+}
+
+/*
+ * Initialize a serial core port device, and a controller device if needed.
+ */
+int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
+{
+ struct serial_ctrl_device *ctrl_dev, *new_ctrl_dev = NULL;
+ int ret;
+
+ mutex_lock(&port_mutex);
+
+ /*
+ * Prevent serial_port_runtime_resume() from trying to use the port
+ * until serial_core_add_one_port() has completed
+ */
+ port->flags |= UPF_DEAD;
+
+ /* Inititalize a serial core controller device if needed */
+ ctrl_dev = serial_core_ctrl_find(drv, port->dev, port->ctrl_id);
+ if (!ctrl_dev) {
+ new_ctrl_dev = serial_core_ctrl_device_add(port);
+ if (IS_ERR(new_ctrl_dev)) {
+ ret = PTR_ERR(new_ctrl_dev);
+ goto err_unlock;
+ }
+ ctrl_dev = new_ctrl_dev;
+ }
+
+ /*
+ * Initialize a serial core port device. Tag the port dead to prevent
+ * serial_port_runtime_resume() trying to do anything until port has
+ * been registered. It gets cleared by serial_core_add_one_port().
+ */
+ ret = serial_core_port_device_add(ctrl_dev, port);
+ if (ret)
+ goto err_unregister_ctrl_dev;
+
+ ret = serial_core_add_one_port(drv, port);
+ if (ret)
+ goto err_unregister_port_dev;
+
+ port->flags &= ~UPF_DEAD;
+
+ mutex_unlock(&port_mutex);
+
+ return 0;
+
+err_unregister_port_dev:
+ serial_base_port_device_remove(port->port_dev);
+
+err_unregister_ctrl_dev:
+ serial_base_ctrl_device_remove(new_ctrl_dev);
+
+err_unlock:
+ mutex_unlock(&port_mutex);
+
+ return ret;
+}
+
+/*
+ * Removes a serial core port device, and the related serial core controller
+ * device if the last instance.
+ */
+void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port)
+{
+ struct device *phys_dev = port->dev;
+ struct serial_port_device *port_dev = port->port_dev;
+ struct serial_ctrl_device *ctrl_dev = serial_core_get_ctrl_dev(port_dev);
+ int ctrl_id = port->ctrl_id;
+
+ mutex_lock(&port_mutex);
+
+ port->flags |= UPF_DEAD;
+
+ serial_core_remove_one_port(drv, port);
+
+ /* Note that struct uart_port *port is no longer valid at this point */
+ serial_base_port_device_remove(port_dev);
+
+ /* Drop the serial core controller device if no ports are using it */
+ if (!serial_core_ctrl_find(drv, phys_dev, ctrl_id))
+ serial_base_ctrl_device_remove(ctrl_dev);
+
+ mutex_unlock(&port_mutex);
+}
+
/**
* uart_handle_dcd_change - handle a change of carrier detect state
* @uport: uart_port structure for the open port
diff --git a/drivers/tty/serial/serial_ctrl.c b/drivers/tty/serial/serial_ctrl.c
new file mode 100644
index 000000000000..6fcf634425dc
--- /dev/null
+++ b/drivers/tty/serial/serial_ctrl.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial core controller driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tony Lindgren <tony@atomide.com>
+ *
+ * This driver manages the serial core controller struct device instances.
+ * The serial core controller devices are children of the physical serial
+ * port device.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+
+#include "serial_base.h"
+
+static int serial_ctrl_probe(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int serial_ctrl_remove(struct device *dev)
+{
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/*
+ * Serial core controller device init functions. Note that the physical
+ * serial port device driver may not have completed probe at this point.
+ */
+int serial_ctrl_register_port(struct uart_driver *drv, struct uart_port *port)
+{
+ return serial_core_register_port(drv, port);
+}
+
+void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port)
+{
+ serial_core_unregister_port(drv, port);
+}
+
+static struct device_driver serial_ctrl_driver = {
+ .name = "ctrl",
+ .suppress_bind_attrs = true,
+ .probe = serial_ctrl_probe,
+ .remove = serial_ctrl_remove,
+};
+
+int serial_base_ctrl_init(void)
+{
+ return serial_base_driver_register(&serial_ctrl_driver);
+}
+
+void serial_base_ctrl_exit(void)
+{
+ serial_base_driver_unregister(&serial_ctrl_driver);
+}
+
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("Serial core controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
new file mode 100644
index 000000000000..862423237007
--- /dev/null
+++ b/drivers/tty/serial/serial_port.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial core port device driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tony Lindgren <tony@atomide.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+
+#include "serial_base.h"
+
+#define SERIAL_PORT_AUTOSUSPEND_DELAY_MS 500
+
+/* Only considers pending TX for now. Caller must take care of locking */
+static int __serial_port_busy(struct uart_port *port)
+{
+ return !uart_tx_stopped(port) &&
+ uart_circ_chars_pending(&port->state->xmit);
+}
+
+static int serial_port_runtime_resume(struct device *dev)
+{
+ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ struct uart_port *port;
+ unsigned long flags;
+
+ port = port_dev->port;
+
+ if (port->flags & UPF_DEAD)
+ goto out;
+
+ /* Flush any pending TX for the port */
+ spin_lock_irqsave(&port->lock, flags);
+ if (__serial_port_busy(port))
+ port->ops->start_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+out:
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
+ NULL, serial_port_runtime_resume, NULL);
+
+static int serial_port_probe(struct device *dev)
+{
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, SERIAL_PORT_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+}
+
+static int serial_port_remove(struct device *dev)
+{
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/*
+ * Serial core port device init functions. Note that the physical serial
+ * port device driver may not have completed probe at this point.
+ */
+int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
+{
+ return serial_ctrl_register_port(drv, port);
+}
+EXPORT_SYMBOL(uart_add_one_port);
+
+void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
+{
+ serial_ctrl_unregister_port(drv, port);
+}
+EXPORT_SYMBOL(uart_remove_one_port);
+
+static struct device_driver serial_port_driver = {
+ .name = "port",
+ .suppress_bind_attrs = true,
+ .probe = serial_port_probe,
+ .remove = serial_port_remove,
+ .pm = pm_ptr(&serial_port_pm),
+};
+
+int serial_base_port_init(void)
+{
+ return serial_base_driver_register(&serial_port_driver);
+}
+
+void serial_base_port_exit(void)
+{
+ serial_base_driver_unregister(&serial_port_driver);
+}
+
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("Serial controller port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 5215e6910f68..aa471c9c24d9 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -754,7 +754,7 @@ static struct asc_port *asc_of_get_asc_port(struct platform_device *pdev)
asc_ports[id].hw_flow_control = of_property_read_bool(np,
"uart-has-rtscts");
- asc_ports[id].force_m1 = of_property_read_bool(np, "st,force_m1");
+ asc_ports[id].force_m1 = of_property_read_bool(np, "st,force-m1");
asc_ports[id].port.line = id;
asc_ports[id].rts = NULL;
@@ -796,7 +796,9 @@ static int asc_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
- return uart_remove_one_port(&asc_uart_driver, port);
+ uart_remove_one_port(&asc_uart_driver, port);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 1e38fc9b10c1..e9e11a259621 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1755,13 +1755,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- int err;
u32 cr3;
pm_runtime_get_sync(&pdev->dev);
- err = uart_remove_one_port(&stm32_usart_driver, port);
- if (err)
- return(err);
+ uart_remove_one_port(&stm32_usart_driver, port);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 94584e54ebbe..679574893ebe 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -685,18 +685,15 @@ static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
*
* @dev: pointer to device structure
*/
-static int ulite_release(struct device *dev)
+static void ulite_release(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- int rc = 0;
if (port) {
- rc = uart_remove_one_port(&ulite_uart_driver, port);
+ uart_remove_one_port(&ulite_uart_driver, port);
dev_set_drvdata(dev, NULL);
port->mapbase = 0;
}
-
- return rc;
}
/**
@@ -900,14 +897,13 @@ static int ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
- int rc;
clk_disable_unprepare(pdata->clk);
- rc = ulite_release(&pdev->dev);
+ ulite_release(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- return rc;
+ return 0;
}
/* work with hotplug and coldplug */
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 8e521c69a959..20a751663ef9 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1670,14 +1670,13 @@ static int cdns_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct cdns_uart *cdns_uart_data = port->private_data;
- int rc;
/* Remove the cdns_uart port from the serial core */
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb);
#endif
- rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
+ uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
port->mapbase = 0;
clk_disable_unprepare(cdns_uart_data->uartclk);
clk_disable_unprepare(cdns_uart_data->pclk);
@@ -1693,7 +1692,7 @@ static int cdns_uart_remove(struct platform_device *pdev)
if (!--instances)
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
- return rc;
+ return 0;
}
static struct platform_driver cdns_uart_platform_driver = {
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index 1e0d80e98d26..89769a1f1f97 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -99,14 +99,15 @@ extern int tty_ldisc_autoload;
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-void tty_audit_add_data(struct tty_struct *tty, const void *data, size_t size);
-void tty_audit_tiocsti(struct tty_struct *tty, char ch);
+void tty_audit_add_data(const struct tty_struct *tty, const void *data,
+ size_t size);
+void tty_audit_tiocsti(const struct tty_struct *tty, char ch);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size)
+static inline void tty_audit_add_data(const struct tty_struct *tty,
+ const void *data, size_t size)
{
}
-static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+static inline void tty_audit_tiocsti(const struct tty_struct *tty, char ch)
{
}
#endif
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index ca7afd7b2716..24d010589379 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -15,7 +15,7 @@
struct tty_audit_buf {
struct mutex mutex; /* Protects all data below */
dev_t dev; /* The TTY which the data is from */
- unsigned icanon:1;
+ bool icanon;
size_t valid;
unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */
};
@@ -33,16 +33,16 @@ static struct tty_audit_buf *tty_audit_buf_alloc(void)
{
struct tty_audit_buf *buf;
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
goto err;
+
buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
+
mutex_init(&buf->mutex);
- buf->dev = MKDEV(0, 0);
- buf->icanon = 0;
- buf->valid = 0;
+
return buf;
err_buf:
@@ -59,27 +59,27 @@ static void tty_audit_buf_free(struct tty_audit_buf *buf)
}
static void tty_audit_log(const char *description, dev_t dev,
- unsigned char *data, size_t size)
+ const unsigned char *data, size_t size)
{
struct audit_buffer *ab;
pid_t pid = task_pid_nr(current);
uid_t uid = from_kuid(&init_user_ns, task_uid(current));
uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
unsigned int sessionid = audit_get_sessionid(current);
+ char name[TASK_COMM_LEN];
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_TTY);
- if (ab) {
- char name[sizeof(current->comm)];
-
- audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
- " minor=%d comm=", description, pid, uid,
- loginuid, sessionid, MAJOR(dev), MINOR(dev));
- get_task_comm(name, current);
- audit_log_untrustedstring(ab, name);
- audit_log_format(ab, " data=");
- audit_log_n_hex(ab, data, size);
- audit_log_end(ab);
- }
+ if (!ab)
+ return;
+
+ audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d minor=%d comm=",
+ description, pid, uid, loginuid, sessionid,
+ MAJOR(dev), MINOR(dev));
+ get_task_comm(name, current);
+ audit_log_untrustedstring(ab, name);
+ audit_log_format(ab, " data=");
+ audit_log_n_hex(ab, data, size);
+ audit_log_end(ab);
}
/*
@@ -134,7 +134,7 @@ void tty_audit_fork(struct signal_struct *sig)
/*
* tty_audit_tiocsti - Log TIOCSTI
*/
-void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+void tty_audit_tiocsti(const struct tty_struct *tty, char ch)
{
dev_t dev;
@@ -199,11 +199,12 @@ static struct tty_audit_buf *tty_audit_buf_get(void)
*
* Audit @data of @size from @tty, if necessary.
*/
-void tty_audit_add_data(struct tty_struct *tty, const void *data, size_t size)
+void tty_audit_add_data(const struct tty_struct *tty, const void *data,
+ size_t size)
{
struct tty_audit_buf *buf;
- unsigned int icanon = !!L_ICANON(tty);
unsigned int audit_tty;
+ bool icanon = L_ICANON(tty);
dev_t dev;
audit_tty = READ_ONCE(current->signal->audit_tty);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4737a8f92c2e..3959efc717aa 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -101,6 +101,7 @@
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/termios_internal.h>
+#include <linux/fs.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
@@ -811,18 +812,26 @@ void start_tty(struct tty_struct *tty)
}
EXPORT_SYMBOL(start_tty);
-static void tty_update_time(struct timespec64 *time)
+static void tty_update_time(struct tty_struct *tty, bool mtime)
{
time64_t sec = ktime_get_real_seconds();
+ struct tty_file_private *priv;
- /*
- * We only care if the two values differ in anything other than the
- * lower three bits (i.e every 8 seconds). If so, then we can update
- * the time of the tty device, otherwise it could be construded as a
- * security leak to let userspace know the exact timing of the tty.
- */
- if ((sec ^ time->tv_sec) & ~7)
- time->tv_sec = sec;
+ spin_lock(&tty->files_lock);
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ struct inode *inode = file_inode(priv->file);
+ struct timespec64 *time = mtime ? &inode->i_mtime : &inode->i_atime;
+
+ /*
+ * We only care if the two values differ in anything other than the
+ * lower three bits (i.e every 8 seconds). If so, then we can update
+ * the time of the tty device, otherwise it could be construded as a
+ * security leak to let userspace know the exact timing of the tty.
+ */
+ if ((sec ^ time->tv_sec) & ~7)
+ time->tv_sec = sec;
+ }
+ spin_unlock(&tty->files_lock);
}
/*
@@ -928,7 +937,7 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
tty_ldisc_deref(ld);
if (i > 0)
- tty_update_time(&inode->i_atime);
+ tty_update_time(tty, false);
return i;
}
@@ -1036,7 +1045,7 @@ static inline ssize_t do_tty_write(
cond_resched();
}
if (written) {
- tty_update_time(&file_inode(file)->i_mtime);
+ tty_update_time(tty, true);
ret = written;
}
out:
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 9566a95aeed9..0f3bd943b58b 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -68,7 +68,6 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
-void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
index 69e93f3e7faf..6d99e5a06ae8 100644
--- a/drivers/uio/uio_dfl.c
+++ b/drivers/uio/uio_dfl.c
@@ -46,11 +46,13 @@ static int uio_dfl_probe(struct dfl_device *ddev)
#define FME_FEATURE_ID_ETH_GROUP 0x10
#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
+#define FME_FEATURE_ID_VENDOR_SPECIFIC 0x23
#define PORT_FEATURE_ID_IOPLL_USRCLK 0x14
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
{ FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
+ { FME_ID, FME_FEATURE_ID_VENDOR_SPECIFIC },
{ PORT_ID, PORT_FEATURE_ID_IOPLL_USRCLK },
{ }
};
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 6db5cb1b2dbb..bb9d5d7ffefc 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -177,7 +177,7 @@ static int c67x00_drv_probe(struct platform_device *pdev)
return ret;
}
-static int c67x00_drv_remove(struct platform_device *pdev)
+static void c67x00_drv_remove(struct platform_device *pdev)
{
struct c67x00_device *c67x00 = platform_get_drvdata(pdev);
struct resource *res;
@@ -197,13 +197,11 @@ static int c67x00_drv_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
kfree(c67x00);
-
- return 0;
}
static struct platform_driver c67x00_driver = {
.probe = c67x00_drv_probe,
- .remove = c67x00_drv_remove,
+ .remove_new = c67x00_drv_remove,
.driver = {
.name = "c67x00",
},
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index b98ca0a1352a..0a514b591527 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -78,6 +78,17 @@ config USB_CDNS3_IMX
For example, imx8qm and imx8qxp.
+config USB_CDNS3_STARFIVE
+ tristate "Cadence USB3 support on StarFive SoC platforms"
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ help
+ Say 'Y' or 'M' here if you are building for StarFive SoCs
+ platforms that contain Cadence USB3 controller core.
+
+ e.g. JH7110.
+
+ If you choose to build this driver as module it will
+ be dynamically linked and module will be called cdns3-starfive.ko
endif
if USB_CDNS_SUPPORT
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index 61edb2f89276..48dfae75b5aa 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -24,6 +24,7 @@ endif
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
+obj-$(CONFIG_USB_CDNS3_STARFIVE) += cdns3-starfive.o
cdnsp-udc-pci-y := cdnsp-pci.o
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 1dcadef933e3..ea19253fd2d0 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -800,7 +800,8 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
if (request->status == -EINPROGRESS)
request->status = status;
- usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
+ if (likely(!(priv_req->flags & REQUEST_UNALIGNED)))
+ usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
priv_ep->dir);
if ((priv_req->flags & REQUEST_UNALIGNED) &&
@@ -808,10 +809,10 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
/* Make DMA buffer CPU accessible */
dma_sync_single_for_cpu(priv_dev->sysdev,
priv_req->aligned_buf->dma,
- priv_req->aligned_buf->size,
+ request->actual,
priv_req->aligned_buf->dir);
memcpy(request->buf, priv_req->aligned_buf->buf,
- request->length);
+ request->actual);
}
priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
@@ -2543,10 +2544,12 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
if (ret < 0)
return ret;
- ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
+ if (likely(!(priv_req->flags & REQUEST_UNALIGNED))) {
+ ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
usb_endpoint_dir_in(ep->desc));
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
list_add_tail(&request->list, &priv_ep->deferred_req_list);
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 59860d1753fd..281de47e2a3b 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -105,11 +105,11 @@ static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
}
static const struct clk_bulk_data imx_cdns3_core_clks[] = {
- { .id = "usb3_lpm_clk" },
- { .id = "usb3_bus_clk" },
- { .id = "usb3_aclk" },
- { .id = "usb3_ipg_clk" },
- { .id = "usb3_core_pclk" },
+ { .id = "lpm" },
+ { .id = "bus" },
+ { .id = "aclk" },
+ { .id = "ipg" },
+ { .id = "core" },
};
static int cdns_imx_noncore_init(struct cdns_imx *data)
@@ -218,7 +218,7 @@ err:
return ret;
}
-static int cdns_imx_remove(struct platform_device *pdev)
+static void cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_imx *data = dev_get_drvdata(dev);
@@ -229,8 +229,6 @@ static int cdns_imx_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -375,14 +373,22 @@ static inline bool cdns_imx_is_power_lost(struct cdns_imx *data)
return false;
}
+static int __maybe_unused cdns_imx_system_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
static int __maybe_unused cdns_imx_system_resume(struct device *dev)
{
struct cdns_imx *data = dev_get_drvdata(dev);
int ret;
- ret = cdns_imx_resume(dev);
- if (ret)
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
return ret;
+ }
if (cdns_imx_is_power_lost(data)) {
dev_dbg(dev, "resume from power lost\n");
@@ -405,7 +411,7 @@ static int cdns_imx_platform_suspend(struct device *dev,
static const struct dev_pm_ops cdns_imx_pm_ops = {
SET_RUNTIME_PM_OPS(cdns_imx_suspend, cdns_imx_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(cdns_imx_suspend, cdns_imx_system_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(cdns_imx_system_suspend, cdns_imx_system_resume)
};
static const struct of_device_id cdns_imx_of_match[] = {
@@ -416,7 +422,7 @@ MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
static struct platform_driver cdns_imx_driver = {
.probe = cdns_imx_probe,
- .remove = cdns_imx_remove,
+ .remove_new = cdns_imx_remove,
.driver = {
.name = "cdns3-imx",
.of_match_table = cdns_imx_of_match,
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 2bc5d094548b..884e2301237f 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -175,7 +175,7 @@ err_phy3_init:
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_plat_remove(struct platform_device *pdev)
+static void cdns3_plat_remove(struct platform_device *pdev)
{
struct cdns *cdns = platform_get_drvdata(pdev);
struct device *dev = cdns->dev;
@@ -187,7 +187,6 @@ static int cdns3_plat_remove(struct platform_device *pdev)
set_phy_power_off(cdns);
phy_exit(cdns->usb2_phy);
phy_exit(cdns->usb3_phy);
- return 0;
}
#ifdef CONFIG_PM
@@ -320,7 +319,7 @@ MODULE_DEVICE_TABLE(of, of_cdns3_match);
static struct platform_driver cdns3_driver = {
.probe = cdns3_plat_probe,
- .remove = cdns3_plat_remove,
+ .remove_new = cdns3_plat_remove,
.driver = {
.name = "cdns-usb3",
.of_match_table = of_match_ptr(of_cdns3_match),
diff --git a/drivers/usb/cdns3/cdns3-starfive.c b/drivers/usb/cdns3/cdns3-starfive.c
new file mode 100644
index 000000000000..fc1f003b145d
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-starfive.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-starfive.c - StarFive specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Minda Chen <minda.chen@starfivetech.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+#include <linux/usb/otg.h>
+#include "core.h"
+
+#define USB_STRAP_HOST BIT(17)
+#define USB_STRAP_DEVICE BIT(18)
+#define USB_STRAP_MASK GENMASK(18, 16)
+
+#define USB_SUSPENDM_HOST BIT(19)
+#define USB_SUSPENDM_MASK BIT(19)
+
+#define USB_MISC_CFG_MASK GENMASK(23, 20)
+#define USB_SUSPENDM_BYPS BIT(20)
+#define USB_PLL_EN BIT(22)
+#define USB_REFCLK_MODE BIT(23)
+
+struct cdns_starfive {
+ struct device *dev;
+ struct regmap *stg_syscon;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ u32 stg_usb_mode;
+};
+
+static void cdns_mode_init(struct platform_device *pdev,
+ struct cdns_starfive *data)
+{
+ enum usb_dr_mode mode;
+
+ regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
+ USB_MISC_CFG_MASK,
+ USB_SUSPENDM_BYPS | USB_PLL_EN | USB_REFCLK_MODE);
+
+ /* dr mode setting */
+ mode = usb_get_dr_mode(&pdev->dev);
+
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ regmap_update_bits(data->stg_syscon,
+ data->stg_usb_mode,
+ USB_STRAP_MASK,
+ USB_STRAP_HOST);
+ regmap_update_bits(data->stg_syscon,
+ data->stg_usb_mode,
+ USB_SUSPENDM_MASK,
+ USB_SUSPENDM_HOST);
+ break;
+
+ case USB_DR_MODE_PERIPHERAL:
+ regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
+ USB_STRAP_MASK, USB_STRAP_DEVICE);
+ regmap_update_bits(data->stg_syscon, data->stg_usb_mode,
+ USB_SUSPENDM_MASK, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static int cdns_clk_rst_init(struct cdns_starfive *data)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "failed to enable clocks\n");
+
+ ret = reset_control_deassert(data->resets);
+ if (ret) {
+ dev_err(data->dev, "failed to reset clocks\n");
+ goto err_clk_init;
+ }
+
+ return ret;
+
+err_clk_init:
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ return ret;
+}
+
+static void cdns_clk_rst_deinit(struct cdns_starfive *data)
+{
+ reset_control_assert(data->resets);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+}
+
+static int cdns_starfive_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_starfive *data;
+ unsigned int args;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ data->stg_syscon =
+ syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "starfive,stg-syscon", 1, &args);
+
+ if (IS_ERR(data->stg_syscon))
+ return dev_err_probe(dev, PTR_ERR(data->stg_syscon),
+ "Failed to parse starfive,stg-syscon\n");
+
+ data->stg_usb_mode = args;
+
+ data->num_clks = devm_clk_bulk_get_all(data->dev, &data->clks);
+ if (data->num_clks < 0)
+ return dev_err_probe(data->dev, -ENODEV,
+ "Failed to get clocks\n");
+
+ data->resets = devm_reset_control_array_get_exclusive(data->dev);
+ if (IS_ERR(data->resets))
+ return dev_err_probe(data->dev, PTR_ERR(data->resets),
+ "Failed to get resets");
+
+ cdns_mode_init(pdev, data);
+ ret = cdns_clk_rst_init(data);
+ if (ret)
+ return ret;
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "Failed to create children\n");
+ cdns_clk_rst_deinit(data);
+ return ret;
+ }
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int cdns_starfive_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_starfive_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_starfive *data = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ device_for_each_child(dev, NULL, cdns_starfive_remove_core);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ cdns_clk_rst_deinit(data);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int cdns_starfive_runtime_resume(struct device *dev)
+{
+ struct cdns_starfive *data = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(data->num_clks, data->clks);
+}
+
+static int cdns_starfive_runtime_suspend(struct device *dev)
+{
+ struct cdns_starfive *data = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cdns_starfive_resume(struct device *dev)
+{
+ struct cdns_starfive *data = dev_get_drvdata(dev);
+
+ return cdns_clk_rst_init(data);
+}
+
+static int cdns_starfive_suspend(struct device *dev)
+{
+ struct cdns_starfive *data = dev_get_drvdata(dev);
+
+ cdns_clk_rst_deinit(data);
+
+ return 0;
+}
+#endif
+#endif
+
+static const struct dev_pm_ops cdns_starfive_pm_ops = {
+ SET_RUNTIME_PM_OPS(cdns_starfive_runtime_suspend,
+ cdns_starfive_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(cdns_starfive_suspend, cdns_starfive_resume)
+};
+
+static const struct of_device_id cdns_starfive_of_match[] = {
+ { .compatible = "starfive,jh7110-usb", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_starfive_of_match);
+
+static struct platform_driver cdns_starfive_driver = {
+ .probe = cdns_starfive_probe,
+ .remove = cdns_starfive_remove,
+ .driver = {
+ .name = "cdns3-starfive",
+ .of_match_table = cdns_starfive_of_match,
+ .pm = &cdns_starfive_pm_ops,
+ },
+};
+module_platform_driver(cdns_starfive_driver);
+
+MODULE_ALIAS("platform:cdns3-starfive");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 StarFive Glue Layer");
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 07c318770362..81b9132e3aaa 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -199,7 +199,7 @@ static int cdns_ti_remove_core(struct device *dev, void *c)
return 0;
}
-static int cdns_ti_remove(struct platform_device *pdev)
+static void cdns_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -208,8 +208,6 @@ static int cdns_ti_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const struct of_device_id cdns_ti_of_match[] = {
@@ -221,7 +219,7 @@ MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
static struct platform_driver cdns_ti_driver = {
.probe = cdns_ti_probe,
- .remove = cdns_ti_remove,
+ .remove_new = cdns_ti_remove,
.driver = {
.name = "cdns3-ti",
.of_match_table = cdns_ti_of_match,
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 2855ac303001..336ef6dd8e7d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
CI_HDRC_PMQOS,
};
+static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
+ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+};
+
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
{ .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
+ { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
@@ -502,7 +507,7 @@ disable_hsic_regulator:
return ret;
}
-static int ci_hdrc_imx_remove(struct platform_device *pdev)
+static void ci_hdrc_imx_remove(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev);
@@ -522,8 +527,6 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
-
- return 0;
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
@@ -650,7 +653,7 @@ static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
};
static struct platform_driver ci_hdrc_imx_driver = {
.probe = ci_hdrc_imx_probe,
- .remove = ci_hdrc_imx_remove,
+ .remove_new = ci_hdrc_imx_remove,
.shutdown = ci_hdrc_imx_shutdown,
.driver = {
.name = "imx_usb",
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 46105457e1ca..7b5b47ce8a02 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -274,7 +274,7 @@ err_iface:
return ret;
}
-static int ci_hdrc_msm_remove(struct platform_device *pdev)
+static void ci_hdrc_msm_remove(struct platform_device *pdev)
{
struct ci_hdrc_msm *ci = platform_get_drvdata(pdev);
@@ -282,8 +282,6 @@ static int ci_hdrc_msm_remove(struct platform_device *pdev)
ci_hdrc_remove_device(ci->ci);
clk_disable_unprepare(ci->iface_clk);
clk_disable_unprepare(ci->core_clk);
-
- return 0;
}
static const struct of_device_id msm_ci_dt_match[] = {
@@ -294,7 +292,7 @@ MODULE_DEVICE_TABLE(of, msm_ci_dt_match);
static struct platform_driver ci_hdrc_msm_driver = {
.probe = ci_hdrc_msm_probe,
- .remove = ci_hdrc_msm_remove,
+ .remove_new = ci_hdrc_msm_remove,
.driver = {
.name = "msm_hsusb",
.of_match_table = msm_ci_dt_match,
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index a72a9474afea..ca36d11a69ea 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -362,7 +362,7 @@ fail_power_off:
return err;
}
-static int tegra_usb_remove(struct platform_device *pdev)
+static void tegra_usb_remove(struct platform_device *pdev)
{
struct tegra_usb *usb = platform_get_drvdata(pdev);
@@ -371,8 +371,6 @@ static int tegra_usb_remove(struct platform_device *pdev)
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
-
- return 0;
}
static int __maybe_unused tegra_usb_runtime_resume(struct device *dev)
@@ -410,7 +408,7 @@ static struct platform_driver tegra_usb_driver = {
.pm = &tegra_usb_pm,
},
.probe = tegra_usb_probe,
- .remove = tegra_usb_remove,
+ .remove_new = tegra_usb_remove,
};
module_platform_driver(tegra_usb_driver);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index dc86b12060b5..1321ee67f3b8 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -106,20 +106,18 @@ clk_err:
return ret;
}
-static int ci_hdrc_usb2_remove(struct platform_device *pdev)
+static void ci_hdrc_usb2_remove(struct platform_device *pdev)
{
struct ci_hdrc_usb2_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(priv->ci_pdev);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static struct platform_driver ci_hdrc_usb2_driver = {
.probe = ci_hdrc_usb2_probe,
- .remove = ci_hdrc_usb2_remove,
+ .remove_new = ci_hdrc_usb2_remove,
.driver = {
.name = "chipidea-usb2",
.of_match_table = of_match_ptr(ci_hdrc_usb2_of_match),
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 798cb077867a..51994d655b82 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1227,7 +1227,7 @@ ulpi_exit:
return ret;
}
-static int ci_hdrc_remove(struct platform_device *pdev)
+static void ci_hdrc_remove(struct platform_device *pdev)
{
struct ci_hdrc *ci = platform_get_drvdata(pdev);
@@ -1245,8 +1245,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
ci_ulpi_exit(ci);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1485,7 +1483,7 @@ static const struct dev_pm_ops ci_pm_ops = {
static struct platform_driver ci_hdrc_driver = {
.probe = ci_hdrc_probe,
- .remove = ci_hdrc_remove,
+ .remove_new = ci_hdrc_remove,
.driver = {
.name = "ci_hdrc",
.pm = &ci_pm_ops,
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index c57c1a71a513..9ee9621e2ccc 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -113,7 +113,6 @@
#define MX7D_USBNC_USB_CTRL2_DP_DM_MASK (BIT(12) | BIT(13) | \
BIT(14) | BIT(15))
-#define MX7D_USB_OTG_PHY_CFG1 0x30
#define MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL BIT(0)
#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 BIT(1)
#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 BIT(2)
@@ -135,7 +134,7 @@
#define TXVREFTUNE0_MASK (0xf << 20)
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
- MX6_BM_ID_WAKEUP)
+ MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
struct usbmisc_ops {
/* It's called once when probe a usb device */
@@ -152,6 +151,7 @@ struct usbmisc_ops {
int (*charger_detection)(struct imx_usbmisc_data *data);
/* It's called when system resume from usb power lost */
int (*power_lost_check)(struct imx_usbmisc_data *data);
+ void (*vbus_comparator_on)(struct imx_usbmisc_data *data, bool on);
};
struct imx_usbmisc {
@@ -875,6 +875,33 @@ static int imx7d_charger_detection(struct imx_usbmisc_data *data)
return ret;
}
+static void usbmisc_imx7d_vbus_comparator_on(struct imx_usbmisc_data *data,
+ bool on)
+{
+ unsigned long flags;
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ u32 val;
+
+ if (data->hsic)
+ return;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ /*
+ * Disable VBUS valid comparator when in suspend mode,
+ * when OTG is disabled and DRVVBUS0 is asserted case
+ * the Bandgap circuitry and VBUS Valid comparator are
+ * still powered, even in Suspend or Sleep mode.
+ */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ if (on)
+ val |= MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
+ else
+ val &= ~MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
+
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+}
+
static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -1018,6 +1045,7 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
.set_wakeup = usbmisc_imx7d_set_wakeup,
.charger_detection = imx7d_charger_detection,
.power_lost_check = usbmisc_imx7d_power_lost_check,
+ .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on,
};
static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
@@ -1132,6 +1160,9 @@ int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
usbmisc = dev_get_drvdata(data->dev);
+ if (usbmisc->ops->vbus_comparator_on)
+ usbmisc->ops->vbus_comparator_on(data, false);
+
if (wakeup && usbmisc->ops->set_wakeup)
ret = usbmisc->ops->set_wakeup(data, true);
if (ret) {
@@ -1185,6 +1216,9 @@ int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup)
goto hsic_set_clk_fail;
}
+ if (usbmisc->ops->vbus_comparator_on)
+ usbmisc->ops->vbus_comparator_on(data, true);
+
return 0;
hsic_set_clk_fail:
diff --git a/drivers/usb/common/led.c b/drivers/usb/common/led.c
index 0865dd44a80a..1de18d90b134 100644
--- a/drivers/usb/common/led.c
+++ b/drivers/usb/common/led.c
@@ -14,8 +14,6 @@
#define BLINK_DELAY 30
-static unsigned long usb_blink_delay = BLINK_DELAY;
-
DEFINE_LED_TRIGGER(ledtrig_usb_gadget);
DEFINE_LED_TRIGGER(ledtrig_usb_host);
@@ -32,7 +30,7 @@ void usb_led_activity(enum usb_led_event ev)
break;
}
/* led_trigger_blink_oneshot() handles trig == NULL gracefully */
- led_trigger_blink_oneshot(trig, &usb_blink_delay, &usb_blink_delay, 0);
+ led_trigger_blink_oneshot(trig, BLINK_DELAY, BLINK_DELAY, 0);
}
EXPORT_SYMBOL_GPL(usb_led_activity);
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index e20874caba36..766005d20bae 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -267,7 +267,7 @@ put_role_sw:
return ret;
}
-static int usb_conn_remove(struct platform_device *pdev)
+static void usb_conn_remove(struct platform_device *pdev)
{
struct usb_conn_info *info = platform_get_drvdata(pdev);
@@ -277,8 +277,6 @@ static int usb_conn_remove(struct platform_device *pdev)
regulator_disable(info->vbus);
usb_role_switch_put(info->role_sw);
-
- return 0;
}
static int __maybe_unused usb_conn_suspend(struct device *dev)
@@ -338,7 +336,7 @@ MODULE_DEVICE_TABLE(of, usb_conn_dt_match);
static struct platform_driver usb_conn_driver = {
.probe = usb_conn_probe,
- .remove = usb_conn_remove,
+ .remove_new = usb_conn_remove,
.driver = {
.name = "usb-conn-gpio",
.pm = &usb_conn_pm_ops,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fcf68818e999..1a16a8bdea60 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -746,6 +746,7 @@ static int driver_resume(struct usb_interface *intf)
return 0;
}
+#ifdef CONFIG_PM
/* The following routines apply to the entire device, not interfaces */
void usbfs_notify_suspend(struct usb_device *udev)
{
@@ -764,6 +765,7 @@ void usbfs_notify_resume(struct usb_device *udev)
}
mutex_unlock(&usbfs_mutex);
}
+#endif
struct usb_driver usbfs_driver = {
.name = "usbfs",
@@ -2640,21 +2642,21 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_RESET:
@@ -2666,7 +2668,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_GETDRIVER:
@@ -2693,7 +2695,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
#ifdef CONFIG_COMPAT
@@ -2701,14 +2703,14 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_DISCSIGNAL32:
@@ -2720,7 +2722,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
break;
case USBDEVFS_IOCTL32:
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index ab2f3737764e..990280688b25 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -415,12 +415,15 @@ static int check_root_hub_suspended(struct device *dev)
return 0;
}
-static int suspend_common(struct device *dev, bool do_wakeup)
+static int suspend_common(struct device *dev, pm_message_t msg)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ bool do_wakeup;
int retval;
+ do_wakeup = PMSG_IS_AUTO(msg) ? true : device_may_wakeup(dev);
+
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
* and the stub usb_device (which we check here). But maybe it
@@ -447,7 +450,7 @@ static int suspend_common(struct device *dev, bool do_wakeup)
(retval == 0 && do_wakeup && hcd->shared_hcd &&
HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
if (hcd->driver->pci_resume)
- hcd->driver->pci_resume(hcd, false);
+ hcd->driver->pci_resume(hcd, msg);
retval = -EBUSY;
}
if (retval)
@@ -470,7 +473,7 @@ static int suspend_common(struct device *dev, bool do_wakeup)
return retval;
}
-static int resume_common(struct device *dev, int event)
+static int resume_common(struct device *dev, pm_message_t msg)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
@@ -498,12 +501,11 @@ static int resume_common(struct device *dev, int event)
* No locking is needed because PCI controller drivers do not
* get unbound during system resume.
*/
- if (pci_dev->class == CL_EHCI && event != PM_EVENT_AUTO_RESUME)
+ if (pci_dev->class == CL_EHCI && msg.event != PM_EVENT_AUTO_RESUME)
for_each_companion(pci_dev, hcd,
ehci_wait_for_companions);
- retval = hcd->driver->pci_resume(hcd,
- event == PM_EVENT_RESTORE);
+ retval = hcd->driver->pci_resume(hcd, msg);
if (retval) {
dev_err(dev, "PCI post-resume error %d!\n", retval);
usb_hc_died(hcd);
@@ -516,7 +518,7 @@ static int resume_common(struct device *dev, int event)
static int hcd_pci_suspend(struct device *dev)
{
- return suspend_common(dev, device_may_wakeup(dev));
+ return suspend_common(dev, PMSG_SUSPEND);
}
static int hcd_pci_suspend_noirq(struct device *dev)
@@ -577,12 +579,12 @@ static int hcd_pci_resume_noirq(struct device *dev)
static int hcd_pci_resume(struct device *dev)
{
- return resume_common(dev, PM_EVENT_RESUME);
+ return resume_common(dev, PMSG_RESUME);
}
static int hcd_pci_restore(struct device *dev)
{
- return resume_common(dev, PM_EVENT_RESTORE);
+ return resume_common(dev, PMSG_RESTORE);
}
#else
@@ -600,7 +602,7 @@ static int hcd_pci_runtime_suspend(struct device *dev)
{
int retval;
- retval = suspend_common(dev, true);
+ retval = suspend_common(dev, PMSG_AUTO_SUSPEND);
if (retval == 0)
powermac_set_asic(to_pci_dev(dev), 0);
dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
@@ -612,7 +614,7 @@ static int hcd_pci_runtime_resume(struct device *dev)
int retval;
powermac_set_asic(to_pci_dev(dev), 1);
- retval = resume_common(dev, PM_EVENT_AUTO_RESUME);
+ retval = resume_common(dev, PMSG_AUTO_RESUME);
dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
return retval;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 97a0f8faea6e..a739403a9e45 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2018,6 +2018,19 @@ bool usb_device_is_owned(struct usb_device *udev)
return !!hub->ports[udev->portnum - 1]->port_owner;
}
+static void update_port_device_state(struct usb_device *udev)
+{
+ struct usb_hub *hub;
+ struct usb_port *port_dev;
+
+ if (udev->parent) {
+ hub = usb_hub_to_struct_hub(udev->parent);
+ port_dev = hub->ports[udev->portnum - 1];
+ WRITE_ONCE(port_dev->state, udev->state);
+ sysfs_notify_dirent(port_dev->state_kn);
+ }
+}
+
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
@@ -2030,6 +2043,7 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
udev->state = USB_STATE_NOTATTACHED;
+ update_port_device_state(udev);
}
/**
@@ -2086,6 +2100,7 @@ void usb_set_device_state(struct usb_device *udev,
udev->state != USB_STATE_SUSPENDED)
udev->active_duration += jiffies;
udev->state = new_state;
+ update_port_device_state(udev);
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index e23833562e4f..37897afd1b64 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -84,6 +84,8 @@ struct usb_hub {
* @peer: related usb2 and usb3 ports (share the same connector)
* @req: default pm qos request for hubs without port power control
* @connect_type: port's connect type
+ * @state: device state of the usb device attached to the port
+ * @state_kn: kernfs_node of the sysfs attribute that accesses @state
* @location: opaque representation of platform connector location
* @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
* @portnum: port index num based one
@@ -100,6 +102,8 @@ struct usb_port {
struct usb_port *peer;
struct dev_pm_qos_request *req;
enum usb_port_connect_type connect_type;
+ enum usb_device_state state;
+ struct kernfs_node *state_kn;
usb_port_location_t location;
struct mutex status_lock;
u32 over_current_count;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 06a8f1f84f6f..77be0dc28da9 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -160,6 +160,16 @@ static ssize_t connect_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(connect_type);
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+ enum usb_device_state state = READ_ONCE(port_dev->state);
+
+ return sysfs_emit(buf, "%s\n", usb_state_string(state));
+}
+static DEVICE_ATTR_RO(state);
+
static ssize_t over_current_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -259,6 +269,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
+ &dev_attr_state.attr,
&dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
@@ -705,19 +716,24 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
+ port_dev->state_kn = sysfs_get_dirent(port_dev->dev.kobj.sd, "state");
+ if (!port_dev->state_kn) {
+ dev_err(&port_dev->dev, "failed to sysfs_get_dirent 'state'\n");
+ retval = -ENODEV;
+ goto err_unregister;
+ }
+
/* Set default policy of port-poweroff disabled. */
retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
if (retval < 0) {
- device_unregister(&port_dev->dev);
- return retval;
+ goto err_put_kn;
}
retval = component_add(&port_dev->dev, &connector_ops);
if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
- device_unregister(&port_dev->dev);
- return retval;
+ goto err_put_kn;
}
find_and_link_peer(hub, port1);
@@ -754,6 +770,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
port_dev->req = NULL;
}
return 0;
+
+err_put_kn:
+ sysfs_put(port_dev->state_kn);
+err_unregister:
+ device_unregister(&port_dev->dev);
+
+ return retval;
}
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
@@ -765,5 +788,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
if (peer)
unlink_peers(port_dev, peer);
component_del(&port_dev->dev, &connector_ops);
+ sysfs_put(port_dev->state_kn);
device_unregister(&port_dev->dev);
}
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 21d16533bd2f..4c7c3dd15f9b 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -161,6 +161,25 @@ static void dwc2_set_amlogic_g12a_params(struct dwc2_hsotg *hsotg)
p->hird_threshold_en = false;
}
+static void dwc2_set_amlogic_a1_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->speed = DWC2_SPEED_PARAM_HIGH;
+ p->host_rx_fifo_size = 192;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 128;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+ p->phy_utmi_width = 8;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 << GAHBCFG_HBSTLEN_SHIFT;
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
+}
+
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -258,6 +277,8 @@ const struct of_device_id dwc2_of_match_table[] = {
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson-g12a-usb",
.data = dwc2_set_amlogic_g12a_params },
+ { .compatible = "amlogic,meson-a1-usb",
+ .data = dwc2_set_amlogic_a1_params },
{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "st,stm32f4x9-fsotg",
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 5aee284018c0..0a806f80217e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -203,6 +203,11 @@ int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
return ret;
}
+static void dwc2_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
{
int i, ret;
@@ -213,6 +218,10 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
"error getting reset control\n");
reset_control_deassert(hsotg->reset);
+ ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
+ hsotg->reset);
+ if (ret)
+ return ret;
hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
if (IS_ERR(hsotg->reset_ecc))
@@ -220,6 +229,10 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
"error getting reset control for ecc\n");
reset_control_deassert(hsotg->reset_ecc);
+ ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
+ hsotg->reset_ecc);
+ if (ret)
+ return ret;
/*
* Attempt to find a generic PHY, then look for an old style
@@ -288,7 +301,7 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
* stops device processing. Any resources used on behalf of this device are
* freed.
*/
-static int dwc2_driver_remove(struct platform_device *dev)
+static void dwc2_driver_remove(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
struct dwc2_gregs_backup *gr;
@@ -338,11 +351,6 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
-
- reset_control_assert(hsotg->reset);
- reset_control_assert(hsotg->reset_ecc);
-
- return 0;
}
/**
@@ -746,7 +754,7 @@ static struct platform_driver dwc2_platform_driver = {
.pm = &dwc2_dev_pm_ops,
},
.probe = dwc2_driver_probe,
- .remove = dwc2_driver_remove,
+ .remove_new = dwc2_driver_remove,
.shutdown = dwc2_driver_shutdown,
};
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d68958e151a7..f6689b731718 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1800,6 +1800,17 @@ static int dwc3_probe(struct platform_device *pdev)
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
+ if (dev->of_node) {
+ struct device_node *parent = of_get_parent(dev->of_node);
+
+ if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
+ dwc_res.start -= DWC3_GLOBALS_REGS_START;
+ dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
+ }
+
+ of_node_put(parent);
+ }
+
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1913,7 +1924,7 @@ err_put_psy:
return ret;
}
-static int dwc3_remove(struct platform_device *pdev)
+static void dwc3_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
@@ -1940,8 +1951,6 @@ static int dwc3_remove(struct platform_device *pdev)
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -2252,7 +2261,7 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
- .remove = dwc3_remove,
+ .remove_new = dwc3_remove,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1f043c31a096..8b1295e4dcdd 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -84,6 +84,8 @@
#define DWC3_OTG_REGS_START 0xcc00
#define DWC3_OTG_REGS_END 0xccff
+#define DWC3_RTK_RTD_GLOBALS_REGS_START 0x8100
+
/* Global Registers */
#define DWC3_GSBUSCFG0 0xc100
#define DWC3_GSBUSCFG1 0xc104
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index cda9458c809b..1755f2f848c5 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -275,7 +275,7 @@ static int dwc3_ti_remove_core(struct device *dev, void *c)
return 0;
}
-static int dwc3_ti_remove(struct platform_device *pdev)
+static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_data *data = platform_get_drvdata(pdev);
@@ -294,7 +294,6 @@ static int dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_set_suspended(dev);
platform_set_drvdata(pdev, NULL);
- return 0;
}
#ifdef CONFIG_PM
@@ -362,7 +361,7 @@ MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
static struct platform_driver dwc3_ti_driver = {
.probe = dwc3_ti_probe,
- .remove = dwc3_ti_remove,
+ .remove_new = dwc3_ti_remove,
.driver = {
.name = "dwc3-am62",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 4be6a873bd07..f882dd647340 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -128,7 +128,7 @@ vdd33_err:
return ret;
}
-static int dwc3_exynos_remove(struct platform_device *pdev)
+static void dwc3_exynos_remove(struct platform_device *pdev)
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
int i;
@@ -143,8 +143,6 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
-
- return 0;
}
static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
@@ -234,7 +232,7 @@ static const struct dev_pm_ops dwc3_exynos_dev_pm_ops = {
static struct platform_driver dwc3_exynos_driver = {
.probe = dwc3_exynos_probe,
- .remove = dwc3_exynos_remove,
+ .remove_new = dwc3_exynos_remove,
.driver = {
.name = "exynos-dwc3",
.of_match_table = exynos_dwc3_match,
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index 174f07614318..8b9a3bb587bf 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -266,7 +266,7 @@ disable_hsio_clk:
return err;
}
-static int dwc3_imx8mp_remove(struct platform_device *pdev)
+static void dwc3_imx8mp_remove(struct platform_device *pdev)
{
struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -280,8 +280,6 @@ static int dwc3_imx8mp_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
@@ -411,7 +409,7 @@ MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match);
static struct platform_driver dwc3_imx8mp_driver = {
.probe = dwc3_imx8mp_probe,
- .remove = dwc3_imx8mp_remove,
+ .remove_new = dwc3_imx8mp_remove,
.driver = {
.name = "imx8mp-dwc3",
.pm = &dwc3_imx8mp_dev_pm_ops,
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 1317959294e6..0a09aedc2573 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -181,7 +181,7 @@ static int kdwc3_remove_core(struct device *dev, void *c)
return 0;
}
-static int kdwc3_remove(struct platform_device *pdev)
+static void kdwc3_remove(struct platform_device *pdev)
{
struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
@@ -198,8 +198,6 @@ static int kdwc3_remove(struct platform_device *pdev)
phy_pm_runtime_put_sync(kdwc->usb3_phy);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const struct of_device_id kdwc3_of_match[] = {
@@ -211,7 +209,7 @@ MODULE_DEVICE_TABLE(of, kdwc3_of_match);
static struct platform_driver kdwc3_driver = {
.probe = kdwc3_probe,
- .remove = kdwc3_remove,
+ .remove_new = kdwc3_remove,
.driver = {
.name = "keystone-dwc3",
.of_match_table = kdwc3_of_match,
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index b282ad0e69c6..e99c7489dba0 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -140,7 +140,6 @@ static const char * const meson_a1_phy_names[] = {
struct dwc3_meson_g12a;
struct dwc3_meson_g12a_drvdata {
- bool otg_switch_supported;
bool otg_phy_host_port_disable;
struct clk_bulk_data *clks;
int num_clks;
@@ -189,7 +188,6 @@ static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv);
*/
static const struct dwc3_meson_g12a_drvdata gxl_drvdata = {
- .otg_switch_supported = true,
.otg_phy_host_port_disable = true,
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
@@ -203,7 +201,6 @@ static const struct dwc3_meson_g12a_drvdata gxl_drvdata = {
};
static const struct dwc3_meson_g12a_drvdata gxm_drvdata = {
- .otg_switch_supported = true,
.otg_phy_host_port_disable = true,
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
@@ -217,7 +214,6 @@ static const struct dwc3_meson_g12a_drvdata gxm_drvdata = {
};
static const struct dwc3_meson_g12a_drvdata axg_drvdata = {
- .otg_switch_supported = true,
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_gxl_clocks),
.phy_names = meson_a1_phy_names,
@@ -230,7 +226,6 @@ static const struct dwc3_meson_g12a_drvdata axg_drvdata = {
};
static const struct dwc3_meson_g12a_drvdata g12a_drvdata = {
- .otg_switch_supported = true,
.clks = meson_g12a_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
.phy_names = meson_g12a_phy_names,
@@ -242,7 +237,6 @@ static const struct dwc3_meson_g12a_drvdata g12a_drvdata = {
};
static const struct dwc3_meson_g12a_drvdata a1_drvdata = {
- .otg_switch_supported = false,
.clks = meson_a1_clocks,
.num_clks = ARRAY_SIZE(meson_a1_clocks),
.phy_names = meson_a1_phy_names,
@@ -307,7 +301,7 @@ static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
U2P_R0_POWER_ON_RESET,
U2P_R0_POWER_ON_RESET);
- if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
+ if (i == USB2_OTG_PHY) {
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
@@ -490,7 +484,7 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
{
int ret;
- if (!priv->drvdata->otg_switch_supported || !priv->phys[USB2_OTG_PHY])
+ if (!priv->phys[USB2_OTG_PHY])
return -EINVAL;
if (mode == PHY_MODE_USB_HOST)
@@ -589,9 +583,6 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
int ret, irq;
struct device *dev = &pdev->dev;
- if (!priv->drvdata->otg_switch_supported)
- return 0;
-
if (priv->otg_mode == USB_DR_MODE_OTG) {
/* Ack irq before registering */
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
@@ -805,7 +796,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = dwc3_meson_g12a_otg_init(pdev, priv);
if (ret)
- goto err_phys_power;
+ goto err_plat_depopulate;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -813,6 +804,9 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
return 0;
+err_plat_depopulate:
+ of_platform_depopulate(dev);
+
err_phys_power:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_power_off(priv->phys[i]);
@@ -835,14 +829,13 @@ err_disable_clks:
return ret;
}
-static int dwc3_meson_g12a_remove(struct platform_device *pdev)
+static void dwc3_meson_g12a_remove(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int i;
- if (priv->drvdata->otg_switch_supported)
- usb_role_switch_unregister(priv->role_switch);
+ usb_role_switch_unregister(priv->role_switch);
of_platform_depopulate(dev);
@@ -859,8 +852,6 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
-
- return 0;
}
static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev)
@@ -971,7 +962,7 @@ MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match);
static struct platform_driver dwc3_meson_g12a_driver = {
.probe = dwc3_meson_g12a_probe,
- .remove = dwc3_meson_g12a_remove,
+ .remove_new = dwc3_meson_g12a_remove,
.driver = {
.name = "dwc3-meson-g12a",
.of_match_table = dwc3_meson_g12a_match,
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 71fd620c5161..7e6ad8fe61a5 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -112,13 +112,11 @@ static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
pm_runtime_set_suspended(simple->dev);
}
-static int dwc3_of_simple_remove(struct platform_device *pdev)
+static void dwc3_of_simple_remove(struct platform_device *pdev)
{
struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
__dwc3_of_simple_teardown(simple);
-
- return 0;
}
static void dwc3_of_simple_shutdown(struct platform_device *pdev)
@@ -183,7 +181,7 @@ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
- .remove = dwc3_of_simple_remove,
+ .remove_new = dwc3_of_simple_remove,
.shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index efaf0db595f4..d5c77db4daa9 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -534,7 +534,7 @@ err1:
return ret;
}
-static int dwc3_omap_remove(struct platform_device *pdev)
+static void dwc3_omap_remove(struct platform_device *pdev)
{
struct dwc3_omap *omap = platform_get_drvdata(pdev);
@@ -543,8 +543,6 @@ static int dwc3_omap_remove(struct platform_device *pdev)
of_platform_depopulate(omap->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id of_dwc3_match[] = {
@@ -611,7 +609,7 @@ static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
static struct platform_driver dwc3_omap_driver = {
.probe = dwc3_omap_probe,
- .remove = dwc3_omap_remove,
+ .remove_new = dwc3_omap_remove,
.driver = {
.name = "omap-dwc3",
.of_match_table = of_dwc3_match,
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 79b22abf9727..3de43df6bbe8 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -167,7 +167,8 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
qcom->edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(qcom->edev))
- return PTR_ERR(qcom->edev);
+ return dev_err_probe(dev, PTR_ERR(qcom->edev),
+ "Failed to get extcon\n");
qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier;
@@ -252,16 +253,14 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
- dev_err(dev, "failed to get usb-ddr path: %ld\n",
- PTR_ERR(qcom->icc_path_ddr));
- return PTR_ERR(qcom->icc_path_ddr);
+ return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
+ "failed to get usb-ddr path\n");
}
qcom->icc_path_apps = of_icc_get(dev, "apps-usb");
if (IS_ERR(qcom->icc_path_apps)) {
- dev_err(dev, "failed to get apps-usb path: %ld\n",
- PTR_ERR(qcom->icc_path_apps));
- ret = PTR_ERR(qcom->icc_path_apps);
+ ret = dev_err_probe(dev, PTR_ERR(qcom->icc_path_apps),
+ "failed to get apps-usb path\n");
goto put_path_ddr;
}
@@ -800,6 +799,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
struct resource *res, *parent_res = NULL;
+ struct resource local_res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -821,9 +821,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
- ret = PTR_ERR(qcom->resets);
- dev_err(&pdev->dev, "failed to get resets, err=%d\n", ret);
- return ret;
+ return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
+ "failed to get resets\n");
}
ret = reset_control_assert(qcom->resets);
@@ -842,7 +841,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
if (ret) {
- dev_err(dev, "failed to get clocks\n");
+ dev_err_probe(dev, ret, "failed to get clocks\n");
goto reset_assert;
}
@@ -851,9 +850,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (np) {
parent_res = res;
} else {
- parent_res = kmemdup(res, sizeof(struct resource), GFP_KERNEL);
- if (!parent_res)
- return -ENOMEM;
+ memcpy(&local_res, res, sizeof(struct resource));
+ parent_res = &local_res;
parent_res->start = res->start +
qcom->acpi_pdata->qscratch_base_offset;
@@ -865,9 +863,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(qcom->urs_usb)) {
dev_err(dev, "failed to create URS USB platdev\n");
if (!qcom->urs_usb)
- return -ENODEV;
+ ret = -ENODEV;
else
- return PTR_ERR(qcom->urs_usb);
+ ret = PTR_ERR(qcom->urs_usb);
+ goto clk_disable;
}
}
}
@@ -947,14 +946,18 @@ reset_assert:
return ret;
}
-static int dwc3_qcom_remove(struct platform_device *pdev)
+static void dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
device_remove_software_node(&qcom->dwc3->dev);
- of_platform_depopulate(dev);
+ if (np)
+ of_platform_depopulate(&pdev->dev);
+ else
+ platform_device_put(pdev);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -967,8 +970,6 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
pm_runtime_allow(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
@@ -1061,7 +1062,7 @@ MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
- .remove = dwc3_qcom_remove,
+ .remove_new = dwc3_qcom_remove,
.driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index fea5290de83f..211360eee95a 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -305,7 +305,7 @@ undo_platform_dev_alloc:
return ret;
}
-static int st_dwc3_remove(struct platform_device *pdev)
+static void st_dwc3_remove(struct platform_device *pdev)
{
struct st_dwc3 *dwc3_data = platform_get_drvdata(pdev);
@@ -313,8 +313,6 @@ static int st_dwc3_remove(struct platform_device *pdev)
reset_control_assert(dwc3_data->rstc_pwrdn);
reset_control_assert(dwc3_data->rstc_rst);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -364,7 +362,7 @@ MODULE_DEVICE_TABLE(of, st_dwc3_match);
static struct platform_driver st_dwc3_driver = {
.probe = st_dwc3_probe,
- .remove = st_dwc3_remove,
+ .remove_new = st_dwc3_remove,
.driver = {
.name = "usb-st-dwc3",
.of_match_table = st_dwc3_match,
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 2c36f97652ca..19307d24f3a0 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -305,7 +305,7 @@ err_clk_put:
return ret;
}
-static int dwc3_xlnx_remove(struct platform_device *pdev)
+static void dwc3_xlnx_remove(struct platform_device *pdev)
{
struct dwc3_xlnx *priv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -318,8 +318,6 @@ static int dwc3_xlnx_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
-
- return 0;
}
static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
@@ -388,7 +386,7 @@ static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
static struct platform_driver dwc3_xlnx_driver = {
.probe = dwc3_xlnx_probe,
- .remove = dwc3_xlnx_remove,
+ .remove_new = dwc3_xlnx_remove,
.driver = {
.name = "dwc3-xilinx",
.of_match_table = dwc3_xlnx_of_match,
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 953b752a5052..b94243237293 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1207,5 +1207,8 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
}
break;
+ default:
+ dev_err(dwc->dev, "unknown endpoint event %d\n", event->endpoint_event);
+ break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b78599dd705c..5fd067151fbf 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2703,13 +2703,17 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
{
+ int ret;
+
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
* 4.1.9, it specifies that for a reconnect after a
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
- dwc3_core_soft_reset(dwc);
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ return ret;
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
@@ -2744,7 +2748,9 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = pm_runtime_get_sync(dwc->dev);
if (!ret || ret < 0) {
pm_runtime_put(dwc->dev);
- return 0;
+ if (ret < 0)
+ pm_runtime_set_suspended(dwc->dev);
+ return ret;
}
if (dwc->pullups_connected == is_on) {
@@ -3809,6 +3815,9 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
+ default:
+ dev_err(dwc->dev, "unknown endpoint event %d\n", event->endpoint_event);
+ break;
}
}
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index cb75464ab290..958fc40eae86 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -165,7 +165,7 @@ static int fotg210_probe(struct platform_device *pdev)
return ret;
}
-static int fotg210_remove(struct platform_device *pdev)
+static void fotg210_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum usb_dr_mode mode;
@@ -176,8 +176,6 @@ static int fotg210_remove(struct platform_device *pdev)
fotg210_udc_remove(pdev);
else
fotg210_hcd_remove(pdev);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -196,7 +194,7 @@ static struct platform_driver fotg210_driver = {
.of_match_table = of_match_ptr(fotg210_of_match),
},
.probe = fotg210_probe,
- .remove = fotg210_remove,
+ .remove_new = fotg210_remove,
};
static int __init fotg210_init(void)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 9f6b10134121..ea85e2c701a1 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -23,7 +23,11 @@
#define HIDG_MINORS 4
static int major, minors;
-static struct class *hidg_class;
+
+static const struct class hidg_class = {
+ .name = "hidg",
+};
+
static DEFINE_IDA(hidg_ida);
static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
@@ -1272,7 +1276,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
device_initialize(&hidg->dev);
hidg->dev.release = hidg_release;
- hidg->dev.class = hidg_class;
+ hidg->dev.class = &hidg_class;
hidg->dev.devt = MKDEV(major, opts->minor);
ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
if (ret)
@@ -1325,17 +1329,13 @@ int ghid_setup(struct usb_gadget *g, int count)
int status;
dev_t dev;
- hidg_class = class_create("hidg");
- if (IS_ERR(hidg_class)) {
- status = PTR_ERR(hidg_class);
- hidg_class = NULL;
+ status = class_register(&hidg_class);
+ if (status)
return status;
- }
status = alloc_chrdev_region(&dev, 0, count, "hidg");
if (status) {
- class_destroy(hidg_class);
- hidg_class = NULL;
+ class_unregister(&hidg_class);
return status;
}
@@ -1352,6 +1352,5 @@ void ghid_cleanup(void)
major = minors = 0;
}
- class_destroy(hidg_class);
- hidg_class = NULL;
+ class_unregister(&hidg_class);
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 3a30feb47073..da07e45ae6df 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2857,7 +2857,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
const char **name_pfx)
{
struct fsg_lun *lun;
- char *pathbuf, *p;
+ char *pathbuf = NULL, *p = "(no medium)";
int rc = -ENOMEM;
if (id >= ARRAY_SIZE(common->luns))
@@ -2907,12 +2907,9 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
rc = fsg_lun_open(lun, cfg->filename);
if (rc)
goto error_lun;
- }
- pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
- p = "(no medium)";
- if (fsg_lun_is_open(lun)) {
p = "(error)";
+ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (pathbuf) {
p = file_path(lun->filp, pathbuf, PATH_MAX);
if (IS_ERR(p))
@@ -2931,7 +2928,6 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
error_lun:
if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
- fsg_lun_close(lun);
common->luns[id] = NULL;
error_sysfs:
kfree(lun);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 28db3e336e7d..076dd4c1be96 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -54,7 +54,10 @@
#define DEFAULT_Q_LEN 10 /* same as legacy g_printer gadget */
static int major, minors;
-static struct class *usb_gadget_class;
+static const struct class usb_gadget_class = {
+ .name = "usb_printer_gadget",
+};
+
static DEFINE_IDA(printer_ida);
static DEFINE_MUTEX(printer_ida_lock); /* protects access do printer_ida */
@@ -1120,7 +1123,7 @@ autoconf_fail:
/* Setup the sysfs files for the printer gadget. */
devt = MKDEV(major, dev->minor);
- pdev = device_create(usb_gadget_class, NULL, devt,
+ pdev = device_create(&usb_gadget_class, NULL, devt,
NULL, "g_printer%d", dev->minor);
if (IS_ERR(pdev)) {
ERROR(dev, "Failed to create device: g_printer\n");
@@ -1143,7 +1146,7 @@ autoconf_fail:
return 0;
fail_cdev_add:
- device_destroy(usb_gadget_class, devt);
+ device_destroy(&usb_gadget_class, devt);
fail_rx_reqs:
while (!list_empty(&dev->rx_reqs)) {
@@ -1211,8 +1214,8 @@ static ssize_t f_printer_opts_pnp_string_show(struct config_item *item,
if (!opts->pnp_string)
goto unlock;
- result = strlcpy(page, opts->pnp_string, PAGE_SIZE);
- if (result >= PAGE_SIZE) {
+ result = strscpy(page, opts->pnp_string, PAGE_SIZE);
+ if (result < 1) {
result = PAGE_SIZE;
} else if (page[result - 1] != '\n' && result + 1 < PAGE_SIZE) {
page[result++] = '\n';
@@ -1410,7 +1413,7 @@ static void printer_func_unbind(struct usb_configuration *c,
dev = func_to_printer(f);
- device_destroy(usb_gadget_class, MKDEV(major, dev->minor));
+ device_destroy(&usb_gadget_class, MKDEV(major, dev->minor));
/* Remove Character Device */
cdev_del(&dev->printer_cdev);
@@ -1512,19 +1515,14 @@ static int gprinter_setup(int count)
int status;
dev_t devt;
- usb_gadget_class = class_create("usb_printer_gadget");
- if (IS_ERR(usb_gadget_class)) {
- status = PTR_ERR(usb_gadget_class);
- usb_gadget_class = NULL;
- pr_err("unable to create usb_gadget class %d\n", status);
+ status = class_register(&usb_gadget_class);
+ if (status)
return status;
- }
status = alloc_chrdev_region(&devt, 0, count, "USB printer gadget");
if (status) {
pr_err("alloc_chrdev_region %d\n", status);
- class_destroy(usb_gadget_class);
- usb_gadget_class = NULL;
+ class_unregister(&usb_gadget_class);
return status;
}
@@ -1540,6 +1538,5 @@ static void gprinter_cleanup(void)
unregister_chrdev_region(MKDEV(major, 0), minors);
major = minors = 0;
}
- class_destroy(usb_gadget_class);
- usb_gadget_class = NULL;
+ class_unregister(&usb_gadget_class);
}
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index a0ca47fbff0f..1115396b46a0 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -539,16 +539,20 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
- struct usb_ep *ep = port->port_usb->out;
+ struct usb_ep *ep;
int status;
unsigned started;
+ if (!port->port_usb || !port->port.tty)
+ return -EIO;
+
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
+ ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)
@@ -916,8 +920,11 @@ static void __gs_console_push(struct gs_console *cons)
}
req->length = size;
+
+ spin_unlock_irq(&cons->lock);
if (usb_ep_queue(ep, req, GFP_ATOMIC))
req->length = 0;
+ spin_lock_irq(&cons->lock);
}
static void gs_console_work(struct work_struct *work)
@@ -1420,10 +1427,19 @@ EXPORT_SYMBOL_GPL(gserial_disconnect);
void gserial_suspend(struct gserial *gser)
{
- struct gs_port *port = gser->ioport;
+ struct gs_port *port;
unsigned long flags;
- spin_lock_irqsave(&port->port_lock, flags);
+ spin_lock_irqsave(&serial_port_lock, flags);
+ port = gser->ioport;
+
+ if (!port) {
+ spin_unlock_irqrestore(&serial_port_lock, flags);
+ return;
+ }
+
+ spin_lock(&port->port_lock);
+ spin_unlock(&serial_port_lock);
port->suspended = true;
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index dd1c6b2ca7c6..91af3b1ef0d4 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -382,9 +382,12 @@ static void uvcg_video_pump(struct work_struct *work)
{
struct uvc_video *video = container_of(work, struct uvc_video, pump);
struct uvc_video_queue *queue = &video->queue;
+ /* video->max_payload_size is only set when using bulk transfer */
+ bool is_bulk = video->max_payload_size;
struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
+ bool buf_done;
int ret;
while (video->ep->enabled) {
@@ -408,20 +411,47 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
- if (buf == NULL) {
+
+ if (buf != NULL) {
+ video->encode(req, video, buf);
+ buf_done = buf->state == UVC_BUF_STATE_DONE;
+ } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
+ /*
+ * No video buffer available; the queue is still connected and
+ * we're transferring over ISOC. Queue a 0 length request to
+ * prevent missed ISOC transfers.
+ */
+ req->length = 0;
+ buf_done = false;
+ } else {
+ /*
+ * Either the queue has been disconnected or no video buffer
+ * available for bulk transfer. Either way, stop processing
+ * further.
+ */
spin_unlock_irqrestore(&queue->irqlock, flags);
break;
}
- video->encode(req, video, buf);
-
/*
- * With usb3 we have more requests. This will decrease the
- * interrupt load to a quarter but also catches the corner
- * cases, which needs to be handled.
+ * With USB3 handling more requests at a higher speed, we can't
+ * afford to generate an interrupt for every request. Decide to
+ * interrupt:
+ *
+ * - When no more requests are available in the free queue, as
+ * this may be our last chance to refill the endpoint's
+ * request queue.
+ *
+ * - When this is request is the last request for the video
+ * buffer, as we want to start sending the next video buffer
+ * ASAP in case it doesn't get started already in the next
+ * iteration of this loop.
+ *
+ * - Four times over the length of the requests queue (as
+ * indicated by video->uvc_num_requests), as a trade-off
+ * between latency and interrupt load.
*/
- if (list_empty(&video->req_free) ||
- buf->state == UVC_BUF_STATE_DONE ||
+ if (list_empty(&video->req_free) || buf_done ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
@@ -441,8 +471,7 @@ static void uvcg_video_pump(struct work_struct *work)
/* Endpoint now owns the request */
req = NULL;
- if (buf->state != UVC_BUF_STATE_DONE)
- video->req_int_count++;
+ video->req_int_count++;
}
if (!req)
@@ -527,4 +556,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
return 0;
}
-
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 097854683e5b..a9544fea8723 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -389,8 +389,10 @@ static int gfs_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ ret = -ENOMEM;
goto error_rndis;
+ }
usb_otg_descriptor_init(cdev->gadget, usb_desc);
gfs_otg_desc[0] = usb_desc;
gfs_otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 133daf88162e..4ca67b2f8f24 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -237,7 +237,7 @@ static int hidg_plat_driver_probe(struct platform_device *pdev)
return 0;
}
-static int hidg_plat_driver_remove(struct platform_device *pdev)
+static void hidg_plat_driver_remove(struct platform_device *pdev)
{
struct hidg_func_node *e, *n;
@@ -245,8 +245,6 @@ static int hidg_plat_driver_remove(struct platform_device *pdev)
list_del(&e->node);
kfree(e);
}
-
- return 0;
}
@@ -263,7 +261,7 @@ static struct usb_composite_driver hidg_driver = {
};
static struct platform_driver hidg_plat_driver = {
- .remove = hidg_plat_driver_remove,
+ .remove_new = hidg_plat_driver_remove,
.driver = {
.name = "hidg",
},
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 83cae6bb12eb..aae1787320d4 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -463,6 +463,8 @@ config USB_ASPEED_UDC
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
+source "drivers/usb/gadget/udc/cdns2/Kconfig"
+
#
# LAST -- dummy/emulated controller
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index ee569f63c74a..b52f93e9c61d 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
obj-$(CONFIG_USB_ASPEED_UDC) += aspeed_udc.o
obj-$(CONFIG_USB_BDC_UDC) += bdc/
obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o
+obj-$(CONFIG_USB_CDNS2_UDC) += cdns2/
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index 86398a04a012..16f2db8c4a2b 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -253,14 +253,14 @@ void ast_vhub_init_hw(struct ast_vhub *vhub)
vhub->regs + AST_VHUB_IER);
}
-static int ast_vhub_remove(struct platform_device *pdev)
+static void ast_vhub_remove(struct platform_device *pdev)
{
struct ast_vhub *vhub = platform_get_drvdata(pdev);
unsigned long flags;
int i;
if (!vhub || !vhub->regs)
- return 0;
+ return;
/* Remove devices */
for (i = 0; i < vhub->max_ports; i++)
@@ -289,8 +289,6 @@ static int ast_vhub_remove(struct platform_device *pdev)
vhub->ep0_bufs,
vhub->ep0_bufs_dma);
vhub->ep0_bufs = NULL;
-
- return 0;
}
static int ast_vhub_probe(struct platform_device *pdev)
@@ -431,7 +429,7 @@ MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids);
static struct platform_driver ast_vhub_driver = {
.probe = ast_vhub_probe,
- .remove = ast_vhub_remove,
+ .remove_new = ast_vhub_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ast_vhub_dt_ids,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 53ca38c4b3ec..6c0ed3fa5eb1 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2369,7 +2369,7 @@ static int usba_udc_probe(struct platform_device *pdev)
return 0;
}
-static int usba_udc_remove(struct platform_device *pdev)
+static void usba_udc_remove(struct platform_device *pdev)
{
struct usba_udc *udc;
int i;
@@ -2382,8 +2382,6 @@ static int usba_udc_remove(struct platform_device *pdev)
for (i = 1; i < udc->num_ep; i++)
usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
usba_cleanup_debugfs(udc);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2450,7 +2448,7 @@ static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
static struct platform_driver udc_driver = {
.probe = usba_udc_probe,
- .remove = usba_udc_remove,
+ .remove_new = usba_udc_remove,
.driver = {
.name = "atmel_usba_udc",
.pm = &usba_udc_pm_ops,
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index a3055dd4acfb..da7011d906e0 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2354,7 +2354,7 @@ report_request_failure:
* bcm63xx_udc_remove - Remove the device from the system.
* @pdev: Platform device struct from the bcm63xx BSP code.
*/
-static int bcm63xx_udc_remove(struct platform_device *pdev)
+static void bcm63xx_udc_remove(struct platform_device *pdev)
{
struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
@@ -2363,13 +2363,11 @@ static int bcm63xx_udc_remove(struct platform_device *pdev)
BUG_ON(udc->driver);
bcm63xx_uninit_udc_hw(udc);
-
- return 0;
}
static struct platform_driver bcm63xx_udc_driver = {
.probe = bcm63xx_udc_probe,
- .remove = bcm63xx_udc_remove,
+ .remove_new = bcm63xx_udc_remove,
.driver = {
.name = DRV_MODULE_NAME,
},
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 9849e0c86e23..35a652807fca 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -583,7 +583,7 @@ disable_clk:
return ret;
}
-static int bdc_remove(struct platform_device *pdev)
+static void bdc_remove(struct platform_device *pdev)
{
struct bdc *bdc;
@@ -593,7 +593,6 @@ static int bdc_remove(struct platform_device *pdev)
bdc_hw_exit(bdc);
bdc_phy_exit(bdc);
clk_disable_unprepare(bdc->clk);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -648,7 +647,7 @@ static struct platform_driver bdc_driver = {
.of_match_table = bdc_of_match,
},
.probe = bdc_probe,
- .remove = bdc_remove,
+ .remove_new = bdc_remove,
};
module_platform_driver(bdc_driver);
diff --git a/drivers/usb/gadget/udc/cdns2/Kconfig b/drivers/usb/gadget/udc/cdns2/Kconfig
new file mode 100644
index 000000000000..c07d353903ea
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/Kconfig
@@ -0,0 +1,11 @@
+config USB_CDNS2_UDC
+ tristate "Cadence USBHS Device Controller"
+ depends on USB_PCI && ACPI && HAS_DMA
+ help
+ Cadence USBHS Device controller is a PCI based USB peripheral
+ controller which supports both full and high speed USB 2.0
+ data transfers.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "cdns2-udc-pci.ko" and to
+ force all gadget drivers to also be dynamically linked.
diff --git a/drivers/usb/gadget/udc/cdns2/Makefile b/drivers/usb/gadget/udc/cdns2/Makefile
new file mode 100644
index 000000000000..a1ffbbe2e768
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# define_trace.h needs to know how to find our header
+CFLAGS_cdns2-trace.o := -I$(src)
+
+obj-$(CONFIG_USB_CDNS2_UDC) += cdns2-udc-pci.o
+cdns2-udc-pci-$(CONFIG_USB_CDNS2_UDC) += cdns2-pci.o cdns2-gadget.o cdns2-ep0.o
+cdns2-udc-pci-$(CONFIG_TRACING) += cdns2-trace.o
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-debug.h b/drivers/usb/gadget/udc/cdns2/cdns2-debug.h
new file mode 100644
index 000000000000..be9ae0d28a40
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-debug.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USBHS-DEV Driver.
+ * Debug header file.
+ *
+ * Copyright (C) 2023 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#ifndef __LINUX_CDNS2_DEBUG
+#define __LINUX_CDNS2_DEBUG
+
+static inline const char *cdns2_decode_usb_irq(char *str, size_t size,
+ u8 usb_irq, u8 ext_irq)
+{
+ int ret;
+
+ ret = snprintf(str, size, "usbirq: 0x%02x - ", usb_irq);
+
+ if (usb_irq & USBIRQ_SOF)
+ ret += snprintf(str + ret, size - ret, "SOF ");
+ if (usb_irq & USBIRQ_SUTOK)
+ ret += snprintf(str + ret, size - ret, "SUTOK ");
+ if (usb_irq & USBIRQ_SUDAV)
+ ret += snprintf(str + ret, size - ret, "SETUP ");
+ if (usb_irq & USBIRQ_SUSPEND)
+ ret += snprintf(str + ret, size - ret, "Suspend ");
+ if (usb_irq & USBIRQ_URESET)
+ ret += snprintf(str + ret, size - ret, "Reset ");
+ if (usb_irq & USBIRQ_HSPEED)
+ ret += snprintf(str + ret, size - ret, "HS ");
+ if (usb_irq & USBIRQ_LPM)
+ ret += snprintf(str + ret, size - ret, "LPM ");
+
+ ret += snprintf(str + ret, size - ret, ", EXT: 0x%02x - ", ext_irq);
+
+ if (ext_irq & EXTIRQ_WAKEUP)
+ ret += snprintf(str + ret, size - ret, "Wakeup ");
+ if (ext_irq & EXTIRQ_VBUSFAULT_FALL)
+ ret += snprintf(str + ret, size - ret, "VBUS_FALL ");
+ if (ext_irq & EXTIRQ_VBUSFAULT_RISE)
+ ret += snprintf(str + ret, size - ret, "VBUS_RISE ");
+
+ if (ret >= size)
+ pr_info("CDNS2: buffer overflowed.\n");
+
+ return str;
+}
+
+static inline const char *cdns2_decode_dma_irq(char *str, size_t size,
+ u32 ep_ists, u32 ep_sts,
+ const char *ep_name)
+{
+ int ret;
+
+ ret = snprintf(str, size, "ISTS: %08x, %s: %08x ",
+ ep_ists, ep_name, ep_sts);
+
+ if (ep_sts & DMA_EP_STS_IOC)
+ ret += snprintf(str + ret, size - ret, "IOC ");
+ if (ep_sts & DMA_EP_STS_ISP)
+ ret += snprintf(str + ret, size - ret, "ISP ");
+ if (ep_sts & DMA_EP_STS_DESCMIS)
+ ret += snprintf(str + ret, size - ret, "DESCMIS ");
+ if (ep_sts & DMA_EP_STS_TRBERR)
+ ret += snprintf(str + ret, size - ret, "TRBERR ");
+ if (ep_sts & DMA_EP_STS_OUTSMM)
+ ret += snprintf(str + ret, size - ret, "OUTSMM ");
+ if (ep_sts & DMA_EP_STS_ISOERR)
+ ret += snprintf(str + ret, size - ret, "ISOERR ");
+ if (ep_sts & DMA_EP_STS_DBUSY)
+ ret += snprintf(str + ret, size - ret, "DBUSY ");
+ if (DMA_EP_STS_CCS(ep_sts))
+ ret += snprintf(str + ret, size - ret, "CCS ");
+
+ if (ret >= size)
+ pr_info("CDNS2: buffer overflowed.\n");
+
+ return str;
+}
+
+static inline const char *cdns2_decode_epx_irq(char *str, size_t size,
+ char *ep_name, u32 ep_ists,
+ u32 ep_sts)
+{
+ return cdns2_decode_dma_irq(str, size, ep_ists, ep_sts, ep_name);
+}
+
+static inline const char *cdns2_decode_ep0_irq(char *str, size_t size,
+ u32 ep_ists, u32 ep_sts,
+ int dir)
+{
+ return cdns2_decode_dma_irq(str, size, ep_ists, ep_sts,
+ dir ? "ep0IN" : "ep0OUT");
+}
+
+static inline const char *cdns2_raw_ring(struct cdns2_endpoint *pep,
+ struct cdns2_trb *trbs,
+ char *str, size_t size)
+{
+ struct cdns2_ring *ring = &pep->ring;
+ struct cdns2_trb *trb;
+ dma_addr_t dma;
+ int ret;
+ int i;
+
+ ret = snprintf(str, size, "\n\t\tTR for %s:", pep->name);
+
+ trb = &trbs[ring->dequeue];
+ dma = cdns2_trb_virt_to_dma(pep, trb);
+ ret += snprintf(str + ret, size - ret,
+ "\n\t\tRing deq index: %d, trb: V=%p, P=0x%pad\n",
+ ring->dequeue, trb, &dma);
+
+ trb = &trbs[ring->enqueue];
+ dma = cdns2_trb_virt_to_dma(pep, trb);
+ ret += snprintf(str + ret, size - ret,
+ "\t\tRing enq index: %d, trb: V=%p, P=0x%pad\n",
+ ring->enqueue, trb, &dma);
+
+ ret += snprintf(str + ret, size - ret,
+ "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
+ ring->free_trbs, ring->ccs, ring->pcs);
+
+ if (TRBS_PER_SEGMENT > 40) {
+ ret += snprintf(str + ret, size - ret,
+ "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
+ return str;
+ }
+
+ dma = ring->dma;
+ for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+ trb = &trbs[i];
+ ret += snprintf(str + ret, size - ret,
+ "\t\t@%pad %08x %08x %08x\n", &dma,
+ le32_to_cpu(trb->buffer),
+ le32_to_cpu(trb->length),
+ le32_to_cpu(trb->control));
+ dma += sizeof(*trb);
+ }
+
+ if (ret >= size)
+ pr_info("CDNS2: buffer overflowed.\n");
+
+ return str;
+}
+
+static inline const char *cdns2_trb_type_string(u8 type)
+{
+ switch (type) {
+ case TRB_NORMAL:
+ return "Normal";
+ case TRB_LINK:
+ return "Link";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *cdns2_decode_trb(char *str, size_t size, u32 flags,
+ u32 length, u32 buffer)
+{
+ int type = TRB_FIELD_TO_TYPE(flags);
+ int ret;
+
+ switch (type) {
+ case TRB_LINK:
+ ret = snprintf(str, size,
+ "LINK %08x type '%s' flags %c:%c:%c%c:%c",
+ buffer, cdns2_trb_type_string(type),
+ flags & TRB_CYCLE ? 'C' : 'c',
+ flags & TRB_TOGGLE ? 'T' : 't',
+ flags & TRB_CHAIN ? 'C' : 'c',
+ flags & TRB_CHAIN ? 'H' : 'h',
+ flags & TRB_IOC ? 'I' : 'i');
+ break;
+ case TRB_NORMAL:
+ ret = snprintf(str, size,
+ "type: '%s', Buffer: %08x, length: %ld, burst len: %ld, "
+ "flags %c:%c:%c%c:%c",
+ cdns2_trb_type_string(type),
+ buffer, TRB_LEN(length),
+ TRB_FIELD_TO_BURST(length),
+ flags & TRB_CYCLE ? 'C' : 'c',
+ flags & TRB_ISP ? 'I' : 'i',
+ flags & TRB_CHAIN ? 'C' : 'c',
+ flags & TRB_CHAIN ? 'H' : 'h',
+ flags & TRB_IOC ? 'I' : 'i');
+ break;
+ default:
+ ret = snprintf(str, size, "type '%s' -> raw %08x %08x %08x",
+ cdns2_trb_type_string(type),
+ buffer, length, flags);
+ }
+
+ if (ret >= size)
+ pr_info("CDNS2: buffer overflowed.\n");
+
+ return str;
+}
+
+#endif /*__LINUX_CDNS2_DEBUG*/
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c b/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c
new file mode 100644
index 000000000000..fa12a5d46f2e
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBHS-DEV driver.
+ *
+ * Copyright (C) 2023 Cadence Design Systems.
+ *
+ * Authors: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#include <linux/usb/composite.h>
+#include <asm/unaligned.h>
+
+#include "cdns2-gadget.h"
+#include "cdns2-trace.h"
+
+static struct usb_endpoint_descriptor cdns2_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64)
+};
+
+static int cdns2_w_index_to_ep_index(u16 wIndex)
+{
+ if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
+ return 0;
+
+ return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
+ (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
+}
+
+static bool cdns2_check_new_setup(struct cdns2_device *pdev)
+{
+ u8 reg;
+
+ reg = readb(&pdev->ep0_regs->cs);
+
+ return !!(reg & EP0CS_CHGSET);
+}
+
+static void cdns2_ep0_enqueue(struct cdns2_device *pdev, dma_addr_t dma_addr,
+ unsigned int length, int zlp)
+{
+ struct cdns2_adma_regs __iomem *regs = pdev->adma_regs;
+ struct cdns2_endpoint *pep = &pdev->eps[0];
+ struct cdns2_ring *ring = &pep->ring;
+
+ ring->trbs[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+ ring->trbs[0].length = cpu_to_le32(TRB_LEN(length));
+
+ if (zlp) {
+ ring->trbs[0].control = cpu_to_le32(TRB_CYCLE |
+ TRB_TYPE(TRB_NORMAL));
+ ring->trbs[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+ ring->trbs[1].length = cpu_to_le32(TRB_LEN(0));
+ ring->trbs[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL));
+ } else {
+ ring->trbs[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL));
+ ring->trbs[1].control = 0;
+ }
+
+ trace_cdns2_queue_trb(pep, ring->trbs);
+
+ if (!pep->dir)
+ writel(0, &pdev->ep0_regs->rxbc);
+
+ cdns2_select_ep(pdev, pep->dir);
+
+ writel(DMA_EP_STS_TRBERR, &regs->ep_sts);
+ writel(pep->ring.dma, &regs->ep_traddr);
+
+ trace_cdns2_doorbell_ep0(pep, readl(&regs->ep_traddr));
+
+ writel(DMA_EP_CMD_DRDY, &regs->ep_cmd);
+}
+
+static int cdns2_ep0_delegate_req(struct cdns2_device *pdev)
+{
+ int ret;
+
+ spin_unlock(&pdev->lock);
+ ret = pdev->gadget_driver->setup(&pdev->gadget, &pdev->setup);
+ spin_lock(&pdev->lock);
+
+ return ret;
+}
+
+static void cdns2_ep0_stall(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep = &pdev->eps[0];
+ struct cdns2_request *preq;
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ set_reg_bit_8(&pdev->ep0_regs->cs, EP0CS_DSTALL);
+
+ if (pdev->ep0_stage == CDNS2_DATA_STAGE && preq)
+ cdns2_gadget_giveback(pep, preq, -ECONNRESET);
+ else if (preq)
+ list_del_init(&preq->list);
+
+ pdev->ep0_stage = CDNS2_SETUP_STAGE;
+ pep->ep_state |= EP_STALLED;
+}
+
+static void cdns2_status_stage(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep = &pdev->eps[0];
+ struct cdns2_request *preq;
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ if (preq)
+ list_del_init(&preq->list);
+
+ pdev->ep0_stage = CDNS2_SETUP_STAGE;
+ writeb(EP0CS_HSNAK, &pdev->ep0_regs->cs);
+}
+
+static int cdns2_req_ep0_set_configuration(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ u32 config = le16_to_cpu(ctrl_req->wValue);
+ int ret;
+
+ if (state < USB_STATE_ADDRESS) {
+ dev_err(pdev->dev, "Set Configuration - bad device state\n");
+ return -EINVAL;
+ }
+
+ ret = cdns2_ep0_delegate_req(pdev);
+ if (ret)
+ return ret;
+
+ trace_cdns2_device_state(config ? "configured" : "addressed");
+
+ if (!config)
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+
+ return 0;
+}
+
+static int cdns2_req_ep0_set_address(struct cdns2_device *pdev, u32 addr)
+{
+ enum usb_device_state device_state = pdev->gadget.state;
+ u8 reg;
+
+ if (addr > USB_DEVICE_MAX_ADDRESS) {
+ dev_err(pdev->dev,
+ "Device address (%d) cannot be greater than %d\n",
+ addr, USB_DEVICE_MAX_ADDRESS);
+ return -EINVAL;
+ }
+
+ if (device_state == USB_STATE_CONFIGURED) {
+ dev_err(pdev->dev,
+ "can't set_address from configured state\n");
+ return -EINVAL;
+ }
+
+ reg = readb(&pdev->usb_regs->fnaddr);
+ pdev->dev_address = reg;
+
+ usb_gadget_set_state(&pdev->gadget,
+ (addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT));
+
+ trace_cdns2_device_state(addr ? "addressed" : "default");
+
+ return 0;
+}
+
+static int cdns2_req_ep0_handle_status(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct cdns2_endpoint *pep;
+ __le16 *response_pkt;
+ u16 status = 0;
+ int ep_sts;
+ u32 recip;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ status = pdev->gadget.is_selfpowered;
+ status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ break;
+ case USB_RECIP_INTERFACE:
+ return cdns2_ep0_delegate_req(pdev);
+ case USB_RECIP_ENDPOINT:
+ ep_sts = cdns2_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
+ pep = &pdev->eps[ep_sts];
+
+ if (pep->ep_state & EP_STALLED)
+ status = BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ put_unaligned_le16(status, (__le16 *)pdev->ep0_preq.request.buf);
+
+ cdns2_ep0_enqueue(pdev, pdev->ep0_preq.request.dma,
+ sizeof(*response_pkt), 0);
+
+ return 0;
+}
+
+static int cdns2_ep0_handle_feature_device(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+ int ret = 0;
+ u32 wValue;
+ u16 tmode;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ state = pdev->gadget.state;
+ speed = pdev->gadget.speed;
+
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ pdev->may_wakeup = !!set;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
+ return -EINVAL;
+
+ tmode = le16_to_cpu(ctrl->wIndex);
+
+ if (!set || (tmode & 0xff) != 0)
+ return -EINVAL;
+
+ tmode >>= 8;
+ switch (tmode) {
+ case USB_TEST_J:
+ case USB_TEST_K:
+ case USB_TEST_SE0_NAK:
+ case USB_TEST_PACKET:
+ /*
+ * The USBHS controller automatically handles the
+ * Set_Feature(testmode) request. Standard test modes
+ * that use values of test mode selector from
+ * 01h to 04h (Test_J, Test_K, Test_SE0_NAK,
+ * Test_Packet) are supported by the
+ * controller(HS - ack, FS - stall).
+ */
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdns2_ep0_handle_feature_intf(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ int ret = 0;
+ u32 wValue;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdns2_ep0_handle_feature_endpoint(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ struct cdns2_endpoint *pep;
+ u8 wValue;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ pep = &pdev->eps[cdns2_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
+
+ if (wValue != USB_ENDPOINT_HALT)
+ return -EINVAL;
+
+ if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
+ return 0;
+
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ if (set || !(pep->ep_state & EP_WEDGE))
+ return cdns2_halt_endpoint(pdev, pep, set);
+ break;
+ default:
+ dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdns2_req_ep0_handle_feature(struct cdns2_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return cdns2_ep0_handle_feature_device(pdev, ctrl, set);
+ case USB_RECIP_INTERFACE:
+ return cdns2_ep0_handle_feature_intf(pdev, ctrl, set);
+ case USB_RECIP_ENDPOINT:
+ return cdns2_ep0_handle_feature_endpoint(pdev, ctrl, set);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cdns2_ep0_std_request(struct cdns2_device *pdev)
+{
+ struct usb_ctrlrequest *ctrl = &pdev->setup;
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ ret = cdns2_req_ep0_set_address(pdev,
+ le16_to_cpu(ctrl->wValue));
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ ret = cdns2_req_ep0_set_configuration(pdev, ctrl);
+ break;
+ case USB_REQ_GET_STATUS:
+ ret = cdns2_req_ep0_handle_status(pdev, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ ret = cdns2_req_ep0_handle_feature(pdev, ctrl, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ ret = cdns2_req_ep0_handle_feature(pdev, ctrl, 1);
+ break;
+ default:
+ ret = cdns2_ep0_delegate_req(pdev);
+ break;
+ }
+
+ return ret;
+}
+
+static void __pending_setup_status_handler(struct cdns2_device *pdev)
+{
+ struct usb_request *request = pdev->pending_status_request;
+
+ if (pdev->status_completion_no_call && request && request->complete) {
+ request->complete(&pdev->eps[0].endpoint, request);
+ pdev->status_completion_no_call = 0;
+ }
+}
+
+void cdns2_pending_setup_status_handler(struct work_struct *work)
+{
+ struct cdns2_device *pdev = container_of(work, struct cdns2_device,
+ pending_status_wq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ __pending_setup_status_handler(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+}
+
+void cdns2_handle_setup_packet(struct cdns2_device *pdev)
+{
+ struct usb_ctrlrequest *ctrl = &pdev->setup;
+ struct cdns2_endpoint *pep = &pdev->eps[0];
+ struct cdns2_request *preq;
+ int ret = 0;
+ u16 len;
+ u8 reg;
+ int i;
+
+ writeb(EP0CS_CHGSET, &pdev->ep0_regs->cs);
+
+ for (i = 0; i < 8; i++)
+ ((u8 *)&pdev->setup)[i] = readb(&pdev->ep0_regs->setupdat[i]);
+
+ /*
+ * If SETUP packet was modified while reading just simple ignore it.
+ * The new one will be handled latter.
+ */
+ if (cdns2_check_new_setup(pdev)) {
+ trace_cdns2_ep0_setup("overridden");
+ return;
+ }
+
+ trace_cdns2_ctrl_req(ctrl);
+
+ if (!pdev->gadget_driver)
+ goto out;
+
+ if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+ dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pep = &pdev->eps[0];
+
+ /* Halt for Ep0 is cleared automatically when SETUP packet arrives. */
+ pep->ep_state &= ~EP_STALLED;
+
+ if (!list_empty(&pep->pending_list)) {
+ preq = cdns2_next_preq(&pep->pending_list);
+ cdns2_gadget_giveback(pep, preq, -ECONNRESET);
+ }
+
+ len = le16_to_cpu(ctrl->wLength);
+ if (len)
+ pdev->ep0_stage = CDNS2_DATA_STAGE;
+ else
+ pdev->ep0_stage = CDNS2_STATUS_STAGE;
+
+ pep->dir = ctrl->bRequestType & USB_DIR_IN;
+
+ /*
+ * SET_ADDRESS request is acknowledged automatically by controller and
+ * in the worse case driver may not notice this request. To check
+ * whether this request has been processed driver can use
+ * fnaddr register.
+ */
+ reg = readb(&pdev->usb_regs->fnaddr);
+ if (pdev->setup.bRequest != USB_REQ_SET_ADDRESS &&
+ pdev->dev_address != reg)
+ cdns2_req_ep0_set_address(pdev, reg);
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = cdns2_ep0_std_request(pdev);
+ else
+ ret = cdns2_ep0_delegate_req(pdev);
+
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ trace_cdns2_ep0_status_stage("delayed");
+ return;
+ }
+
+out:
+ if (ret < 0)
+ cdns2_ep0_stall(pdev);
+ else if (pdev->ep0_stage == CDNS2_STATUS_STAGE)
+ cdns2_status_stage(pdev);
+}
+
+static void cdns2_transfer_completed(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep = &pdev->eps[0];
+
+ if (!list_empty(&pep->pending_list)) {
+ struct cdns2_request *preq;
+
+ trace_cdns2_complete_trb(pep, pep->ring.trbs);
+ preq = cdns2_next_preq(&pep->pending_list);
+
+ preq->request.actual =
+ TRB_LEN(le32_to_cpu(pep->ring.trbs->length));
+ cdns2_gadget_giveback(pep, preq, 0);
+ }
+
+ cdns2_status_stage(pdev);
+}
+
+void cdns2_handle_ep0_interrupt(struct cdns2_device *pdev, int dir)
+{
+ u32 ep_sts_reg;
+
+ cdns2_select_ep(pdev, dir);
+
+ trace_cdns2_ep0_irq(pdev);
+
+ ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
+ writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
+
+ __pending_setup_status_handler(pdev);
+
+ if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP)) {
+ pdev->eps[0].dir = dir;
+ cdns2_transfer_completed(pdev);
+ }
+}
+
+/*
+ * Function shouldn't be called by gadget driver,
+ * endpoint 0 is allways active.
+ */
+static int cdns2_gadget_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EINVAL;
+}
+
+/*
+ * Function shouldn't be called by gadget driver,
+ * endpoint 0 is allways active.
+ */
+static int cdns2_gadget_ep0_disable(struct usb_ep *ep)
+{
+ return -EINVAL;
+}
+
+static int cdns2_gadget_ep0_set_halt(struct usb_ep *ep, int value)
+{
+ struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
+ struct cdns2_device *pdev = pep->pdev;
+ unsigned long flags;
+
+ if (!value)
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdns2_ep0_stall(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdns2_gadget_ep0_set_wedge(struct usb_ep *ep)
+{
+ return cdns2_gadget_ep0_set_halt(ep, 1);
+}
+
+static int cdns2_gadget_ep0_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_request *preq;
+ unsigned long flags;
+ u8 zlp = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ preq = to_cdns2_request(request);
+
+ trace_cdns2_request_enqueue(preq);
+
+ /* Cancel the request if controller receive new SETUP packet. */
+ if (cdns2_check_new_setup(pdev)) {
+ trace_cdns2_ep0_setup("overridden");
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return -ECONNRESET;
+ }
+
+ /* Send STATUS stage. Should be called only for SET_CONFIGURATION. */
+ if (pdev->ep0_stage == CDNS2_STATUS_STAGE) {
+ cdns2_status_stage(pdev);
+
+ request->actual = 0;
+ pdev->status_completion_no_call = true;
+ pdev->pending_status_request = request;
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_CONFIGURED);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ /*
+ * Since there is no completion interrupt for status stage,
+ * it needs to call ->completion in software after
+ * cdns2_gadget_ep0_queue is back.
+ */
+ queue_work(system_freezable_wq, &pdev->pending_status_wq);
+ return 0;
+ }
+
+ if (!list_empty(&pep->pending_list)) {
+ trace_cdns2_ep0_setup("pending");
+ dev_err(pdev->dev,
+ "can't handle multiple requests for ep0\n");
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return -EBUSY;
+ }
+
+ ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
+ if (ret) {
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ dev_err(pdev->dev, "failed to map request\n");
+ return -EINVAL;
+ }
+
+ request->status = -EINPROGRESS;
+ list_add_tail(&preq->list, &pep->pending_list);
+
+ if (request->zero && request->length &&
+ (request->length % ep->maxpacket == 0))
+ zlp = 1;
+
+ cdns2_ep0_enqueue(pdev, request->dma, request->length, zlp);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops cdns2_gadget_ep0_ops = {
+ .enable = cdns2_gadget_ep0_enable,
+ .disable = cdns2_gadget_ep0_disable,
+ .alloc_request = cdns2_gadget_ep_alloc_request,
+ .free_request = cdns2_gadget_ep_free_request,
+ .queue = cdns2_gadget_ep0_queue,
+ .dequeue = cdns2_gadget_ep_dequeue,
+ .set_halt = cdns2_gadget_ep0_set_halt,
+ .set_wedge = cdns2_gadget_ep0_set_wedge,
+};
+
+void cdns2_ep0_config(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep;
+
+ pep = &pdev->eps[0];
+
+ if (!list_empty(&pep->pending_list)) {
+ struct cdns2_request *preq;
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ list_del_init(&preq->list);
+ }
+
+ writeb(EP0_FIFO_AUTO, &pdev->ep0_regs->fifo);
+ cdns2_select_ep(pdev, USB_DIR_OUT);
+ writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
+
+ writeb(EP0_FIFO_IO_TX | EP0_FIFO_AUTO, &pdev->ep0_regs->fifo);
+ cdns2_select_ep(pdev, USB_DIR_IN);
+ writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
+
+ writeb(pdev->gadget.ep0->maxpacket, &pdev->ep0_regs->maxpack);
+ writel(DMA_EP_IEN_EP_OUT0 | DMA_EP_IEN_EP_IN0,
+ &pdev->adma_regs->ep_ien);
+}
+
+void cdns2_init_ep0(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ u16 maxpacket = le16_to_cpu(cdns2_gadget_ep0_desc.wMaxPacketSize);
+
+ usb_ep_set_maxpacket_limit(&pep->endpoint, maxpacket);
+
+ pep->endpoint.ops = &cdns2_gadget_ep0_ops;
+ pep->endpoint.desc = &cdns2_gadget_ep0_desc;
+ pep->endpoint.caps.type_control = true;
+ pep->endpoint.caps.dir_in = true;
+ pep->endpoint.caps.dir_out = true;
+
+ pdev->gadget.ep0 = &pep->endpoint;
+}
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
new file mode 100644
index 000000000000..0eed0e03842c
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -0,0 +1,2474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBHS-DEV Driver - gadget side.
+ *
+ * Copyright (C) 2023 Cadence Design Systems.
+ *
+ * Authors: Pawel Laszczak <pawell@cadence.com>
+ */
+
+/*
+ * Work around 1:
+ * At some situations, the controller may get stale data address in TRB
+ * at below sequences:
+ * 1. Controller read TRB includes data address
+ * 2. Software updates TRBs includes data address and Cycle bit
+ * 3. Controller read TRB which includes Cycle bit
+ * 4. DMA run with stale data address
+ *
+ * To fix this problem, driver needs to make the first TRB in TD as invalid.
+ * After preparing all TRBs driver needs to check the position of DMA and
+ * if the DMA point to the first just added TRB and doorbell is 1,
+ * then driver must defer making this TRB as valid. This TRB will be make
+ * as valid during adding next TRB only if DMA is stopped or at TRBERR
+ * interrupt.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/property.h>
+#include <linux/dmapool.h>
+#include <linux/iopoll.h>
+
+#include "cdns2-gadget.h"
+#include "cdns2-trace.h"
+
+/**
+ * set_reg_bit_32 - set bit in given 32 bits register.
+ * @ptr: register address.
+ * @mask: bits to set.
+ */
+static void set_reg_bit_32(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) | mask;
+ writel(mask, ptr);
+}
+
+/*
+ * clear_reg_bit_32 - clear bit in given 32 bits register.
+ * @ptr: register address.
+ * @mask: bits to clear.
+ */
+static void clear_reg_bit_32(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) & ~mask;
+ writel(mask, ptr);
+}
+
+/* Clear bit in given 8 bits register. */
+static void clear_reg_bit_8(void __iomem *ptr, u8 mask)
+{
+ mask = readb(ptr) & ~mask;
+ writeb(mask, ptr);
+}
+
+/* Set bit in given 16 bits register. */
+void set_reg_bit_8(void __iomem *ptr, u8 mask)
+{
+ mask = readb(ptr) | mask;
+ writeb(mask, ptr);
+}
+
+static int cdns2_get_dma_pos(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ int dma_index;
+
+ dma_index = readl(&pdev->adma_regs->ep_traddr) - pep->ring.dma;
+
+ return dma_index / TRB_SIZE;
+}
+
+/* Get next private request from list. */
+struct cdns2_request *cdns2_next_preq(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct cdns2_request, list);
+}
+
+void cdns2_select_ep(struct cdns2_device *pdev, u32 ep)
+{
+ if (pdev->selected_ep == ep)
+ return;
+
+ pdev->selected_ep = ep;
+ writel(ep, &pdev->adma_regs->ep_sel);
+}
+
+dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
+ struct cdns2_trb *trb)
+{
+ u32 offset = (char *)trb - (char *)pep->ring.trbs;
+
+ return pep->ring.dma + offset;
+}
+
+static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_ring *ring = &pep->ring;
+
+ if (pep->ring.trbs) {
+ dma_pool_free(pdev->eps_dma_pool, ring->trbs, ring->dma);
+ memset(ring, 0, sizeof(*ring));
+ }
+}
+
+/* Allocates Transfer Ring segment. */
+static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_trb *link_trb;
+ struct cdns2_ring *ring;
+
+ ring = &pep->ring;
+
+ if (!ring->trbs) {
+ ring->trbs = dma_pool_alloc(pdev->eps_dma_pool,
+ GFP_DMA32 | GFP_ATOMIC,
+ &ring->dma);
+ if (!ring->trbs)
+ return -ENOMEM;
+ }
+
+ memset(ring->trbs, 0, TR_SEG_SIZE);
+
+ if (!pep->num)
+ return 0;
+
+ /* Initialize the last TRB as Link TRB */
+ link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
+ link_trb->buffer = cpu_to_le32(TRB_BUFFER(ring->dma));
+ link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) |
+ TRB_TOGGLE);
+
+ return 0;
+}
+
+/*
+ * Stalls and flushes selected endpoint.
+ * Endpoint must be selected before invoking this function.
+ */
+static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ int val;
+
+ trace_cdns2_ep_halt(pep, 1, 1);
+
+ writel(DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
+
+ /* Wait for DFLUSH cleared. */
+ readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
+ !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
+ pep->ep_state |= EP_STALLED;
+ pep->ep_state &= ~EP_STALL_PENDING;
+}
+
+/*
+ * Increment a trb index.
+ *
+ * The index should never point to the last link TRB in TR. After incrementing,
+ * if it point to the link TRB, wrap around to the beginning and revert
+ * cycle state bit. The link TRB is always at the last TRB entry.
+ */
+static void cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
+{
+ (*index)++;
+ if (*index == (trb_in_seg - 1)) {
+ *index = 0;
+ *cs ^= 1;
+ }
+}
+
+static void cdns2_ep_inc_enq(struct cdns2_ring *ring)
+{
+ ring->free_trbs--;
+ cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT);
+}
+
+static void cdns2_ep_inc_deq(struct cdns2_ring *ring)
+{
+ ring->free_trbs++;
+ cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT);
+}
+
+/*
+ * Enable/disable LPM.
+ *
+ * If bit USBCS_LPMNYET is not set and device receive Extended Token packet,
+ * then controller answer with ACK handshake.
+ * If bit USBCS_LPMNYET is set and device receive Extended Token packet,
+ * then controller answer with NYET handshake.
+ */
+static void cdns2_enable_l1(struct cdns2_device *pdev, int enable)
+{
+ if (enable) {
+ clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
+ writeb(LPMCLOCK_SLEEP_ENTRY, &pdev->usb_regs->lpmclock);
+ } else {
+ set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
+ }
+}
+
+static enum usb_device_speed cdns2_get_speed(struct cdns2_device *pdev)
+{
+ u8 speed = readb(&pdev->usb_regs->speedctrl);
+
+ if (speed & SPEEDCTRL_HS)
+ return USB_SPEED_HIGH;
+ else if (speed & SPEEDCTRL_FS)
+ return USB_SPEED_FULL;
+
+ return USB_SPEED_UNKNOWN;
+}
+
+static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
+ struct cdns2_trb *trb)
+{
+ if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
+ return pep->ring.trbs;
+ else
+ return ++trb;
+}
+
+void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq,
+ int status)
+{
+ struct usb_request *request = &preq->request;
+ struct cdns2_device *pdev = pep->pdev;
+
+ list_del_init(&preq->list);
+
+ if (request->status == -EINPROGRESS)
+ request->status = status;
+
+ usb_gadget_unmap_request_by_dev(pdev->dev, request, pep->dir);
+
+ /* All TRBs have finished, clear the counter. */
+ preq->finished_trb = 0;
+
+ trace_cdns2_request_giveback(preq);
+
+ if (request->complete) {
+ spin_unlock(&pdev->lock);
+ usb_gadget_giveback_request(&pep->endpoint, request);
+ spin_lock(&pdev->lock);
+ }
+
+ if (request->buf == pdev->zlp_buf)
+ cdns2_gadget_ep_free_request(&pep->endpoint, request);
+}
+
+static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
+{
+ /* Work around for stale data address in TRB. */
+ if (pep->wa1_set) {
+ trace_cdns2_wa1(pep, "restore cycle bit");
+
+ pep->wa1_set = 0;
+ pep->wa1_trb_index = 0xFFFF;
+ if (pep->wa1_cycle_bit)
+ pep->wa1_trb->control |= cpu_to_le32(0x1);
+ else
+ pep->wa1_trb->control &= cpu_to_le32(~0x1);
+ }
+}
+
+static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
+ struct cdns2_trb *trb)
+{
+ struct cdns2_device *pdev = pep->pdev;
+
+ if (!pep->wa1_set) {
+ u32 doorbell;
+
+ doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
+
+ if (doorbell) {
+ pep->wa1_cycle_bit = pep->ring.pcs ? TRB_CYCLE : 0;
+ pep->wa1_set = 1;
+ pep->wa1_trb = trb;
+ pep->wa1_trb_index = pep->ring.enqueue;
+ trace_cdns2_wa1(pep, "set guard");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ int dma_index;
+ u32 doorbell;
+
+ doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
+ dma_index = cdns2_get_dma_pos(pdev, pep);
+
+ if (!doorbell || dma_index != pep->wa1_trb_index)
+ cdns2_wa1_restore_cycle_bit(pep);
+}
+
+static int cdns2_prepare_ring(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep,
+ int num_trbs)
+{
+ struct cdns2_trb *link_trb = NULL;
+ int doorbell, dma_index;
+ struct cdns2_ring *ring;
+ u32 ch_bit = 0;
+
+ ring = &pep->ring;
+
+ if (num_trbs > ring->free_trbs) {
+ pep->ep_state |= EP_RING_FULL;
+ trace_cdns2_no_room_on_ring("Ring full\n");
+ return -ENOBUFS;
+ }
+
+ if ((ring->enqueue + num_trbs) >= (TRBS_PER_SEGMENT - 1)) {
+ doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
+ dma_index = cdns2_get_dma_pos(pdev, pep);
+
+ /* Driver can't update LINK TRB if it is current processed. */
+ if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
+ pep->ep_state |= EP_DEFERRED_DRDY;
+ return -ENOBUFS;
+ }
+
+ /* Update C bt in Link TRB before starting DMA. */
+ link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
+
+ /*
+ * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
+ * that DMA stuck at the LINK TRB.
+ * On the other hand, removing TRB_CHAIN for longer TRs for
+ * epXout cause that DMA stuck after handling LINK TRB.
+ * To eliminate this strange behavioral driver set TRB_CHAIN
+ * bit only for TR size > 2.
+ */
+ if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
+ ch_bit = TRB_CHAIN;
+
+ link_trb->control = cpu_to_le32(((ring->pcs) ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
+ }
+
+ return 0;
+}
+
+static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq)
+{
+ struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
+ struct cdns2_trb *trb = preq->trb;
+ int num_trbs = preq->num_of_trb;
+ int i = 0;
+
+ while (i < num_trbs) {
+ trace_cdns2_queue_trb(pep, trb + i);
+ if (trb + i == link_trb) {
+ trb = pep->ring.trbs;
+ num_trbs = num_trbs - i;
+ i = 0;
+ } else {
+ i++;
+ }
+ }
+}
+
+static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
+ u64 addr, u64 len)
+{
+ unsigned int num_trbs = 1;
+
+ if (pep->type == USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * To speed up DMA performance address should not exceed 4KB.
+ * for high bandwidth transfer and driver will split
+ * such buffer into two TRBs.
+ */
+ num_trbs = DIV_ROUND_UP(len +
+ (addr & (TRB_MAX_ISO_BUFF_SIZE - 1)),
+ TRB_MAX_ISO_BUFF_SIZE);
+
+ if (pep->interval > 1)
+ num_trbs = pep->dir ? num_trbs * pep->interval : 1;
+ } else if (pep->dir) {
+ /*
+ * One extra link trb for IN direction.
+ * Sometimes DMA doesn't want advance to next TD and transfer
+ * hangs. This extra Link TRB force DMA to advance to next TD.
+ */
+ num_trbs++;
+ }
+
+ return num_trbs;
+}
+
+static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
+ struct usb_request *req)
+{
+ unsigned int i, len, full_len, num_trbs = 0;
+ struct scatterlist *sg;
+ int trb_len = 0;
+
+ full_len = req->length;
+
+ for_each_sg(req->sg, sg, req->num_sgs, i) {
+ len = sg_dma_len(sg);
+ num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
+ len = min(len, full_len);
+
+ /*
+ * For HS ISO transfer TRBs should not exceed max packet size.
+ * When DMA is working, and data exceed max packet size then
+ * some data will be read in single mode instead burst mode.
+ * This behavior will drastically reduce the copying speed.
+ * To avoid this we need one or two extra TRBs.
+ * This issue occurs for UVC class with sg_supported = 1
+ * because buffers addresses are not aligned to 1024.
+ */
+ if (pep->type == USB_ENDPOINT_XFER_ISOC) {
+ u8 temp;
+
+ trb_len += len;
+ temp = trb_len >> 10;
+
+ if (temp) {
+ if (trb_len % 1024)
+ num_trbs = num_trbs + temp;
+ else
+ num_trbs = num_trbs + temp - 1;
+
+ trb_len = trb_len - (temp << 10);
+ }
+ }
+
+ full_len -= len;
+ if (full_len == 0)
+ break;
+ }
+
+ return num_trbs;
+}
+
+/*
+ * Function prepares the array with optimized AXI burst value for different
+ * transfer lengths. Controller handles the final data which are less
+ * then AXI burst size as single byte transactions.
+ * e.g.:
+ * Let's assume that driver prepares trb with trb->length 700 and burst size
+ * will be set to 128. In this case the controller will handle a first 512 as
+ * single AXI transaction but the next 188 bytes will be handled
+ * as 47 separate AXI transaction.
+ * The better solution is to use the burst size equal 16 and then we will
+ * have only 25 AXI transaction (10 * 64 + 15 *4).
+ */
+static void cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
+{
+ int axi_burst_option[] = {1, 2, 4, 8, 16, 32, 64, 128};
+ int best_burst;
+ int array_size;
+ int opt_burst;
+ int trb_size;
+ int i, j;
+
+ array_size = ARRAY_SIZE(axi_burst_option);
+
+ for (i = 0; i <= MAX_ISO_SIZE; i++) {
+ trb_size = i / 4;
+ best_burst = trb_size ? trb_size : 1;
+
+ for (j = 0; j < array_size; j++) {
+ opt_burst = trb_size / axi_burst_option[j];
+ opt_burst += trb_size % axi_burst_option[j];
+
+ if (opt_burst < best_burst) {
+ best_burst = opt_burst;
+ pdev->burst_opt[i] = axi_burst_option[j];
+ }
+ }
+ }
+}
+
+static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq,
+ int num_trbs)
+{
+ struct scatterlist *sg = NULL;
+ u32 remaining_packet_size = 0;
+ struct cdns2_trb *trb;
+ bool first_trb = true;
+ dma_addr_t trb_dma;
+ u32 trb_buff_len;
+ u32 block_length;
+ int td_idx = 0;
+ int split_size;
+ u32 full_len;
+ int enqd_len;
+ int sent_len;
+ int sg_iter;
+ u32 control;
+ int num_tds;
+ u32 length;
+
+ /*
+ * For OUT direction 1 TD per interval is enough
+ * because TRBs are not dumped by controller.
+ */
+ num_tds = pep->dir ? pep->interval : 1;
+ split_size = preq->request.num_sgs ? 1024 : 3072;
+
+ for (td_idx = 0; td_idx < num_tds; td_idx++) {
+ if (preq->request.num_sgs) {
+ sg = preq->request.sg;
+ trb_dma = sg_dma_address(sg);
+ block_length = sg_dma_len(sg);
+ } else {
+ trb_dma = preq->request.dma;
+ block_length = preq->request.length;
+ }
+
+ full_len = preq->request.length;
+ sg_iter = preq->request.num_sgs ? preq->request.num_sgs : 1;
+ remaining_packet_size = split_size;
+
+ for (enqd_len = 0; enqd_len < full_len;
+ enqd_len += trb_buff_len) {
+ if (remaining_packet_size == 0)
+ remaining_packet_size = split_size;
+
+ /*
+ * Calculate TRB length.- buffer can't across 4KB
+ * and max packet size.
+ */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(trb_dma);
+ trb_buff_len = min(trb_buff_len, remaining_packet_size);
+ trb_buff_len = min(trb_buff_len, block_length);
+
+ if (trb_buff_len > full_len - enqd_len)
+ trb_buff_len = full_len - enqd_len;
+
+ control = TRB_TYPE(TRB_NORMAL);
+
+ /*
+ * For IN direction driver has to set the IOC for
+ * last TRB in last TD.
+ * For OUT direction driver must set IOC and ISP
+ * only for last TRB in each TDs.
+ */
+ if (enqd_len + trb_buff_len >= full_len || !pep->dir)
+ control |= TRB_IOC | TRB_ISP;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling
+ * the cycle bit) until we've finished creating all the
+ * other TRBs.
+ */
+ if (first_trb) {
+ first_trb = false;
+ if (pep->ring.pcs == 0)
+ control |= TRB_CYCLE;
+ } else {
+ control |= pep->ring.pcs;
+ }
+
+ if (enqd_len + trb_buff_len < full_len)
+ control |= TRB_CHAIN;
+
+ length = TRB_LEN(trb_buff_len) |
+ TRB_BURST(pep->pdev->burst_opt[trb_buff_len]);
+
+ trb = pep->ring.trbs + pep->ring.enqueue;
+ trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
+ trb->length = cpu_to_le32(length);
+ trb->control = cpu_to_le32(control);
+
+ trb_dma += trb_buff_len;
+ sent_len = trb_buff_len;
+
+ if (sg && sent_len >= block_length) {
+ /* New sg entry */
+ --sg_iter;
+ sent_len -= block_length;
+ if (sg_iter != 0) {
+ sg = sg_next(sg);
+ trb_dma = sg_dma_address(sg);
+ block_length = sg_dma_len(sg);
+ }
+ }
+
+ remaining_packet_size -= trb_buff_len;
+ block_length -= sent_len;
+ preq->end_trb = pep->ring.enqueue;
+
+ cdns2_ep_inc_enq(&pep->ring);
+ }
+ }
+}
+
+static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq,
+ int trbs_per_td)
+{
+ struct scatterlist *sg = NULL;
+ struct cdns2_ring *ring;
+ struct cdns2_trb *trb;
+ dma_addr_t trb_dma;
+ int sg_iter = 0;
+ u32 control;
+ u32 length;
+
+ if (preq->request.num_sgs) {
+ sg = preq->request.sg;
+ trb_dma = sg_dma_address(sg);
+ length = sg_dma_len(sg);
+ } else {
+ trb_dma = preq->request.dma;
+ length = preq->request.length;
+ }
+
+ ring = &pep->ring;
+
+ for (sg_iter = 0; sg_iter < trbs_per_td; sg_iter++) {
+ control = TRB_TYPE(TRB_NORMAL) | ring->pcs | TRB_ISP;
+ trb = pep->ring.trbs + ring->enqueue;
+
+ if (pep->dir && sg_iter == trbs_per_td - 1) {
+ preq->end_trb = ring->enqueue;
+ control = ring->pcs | TRB_TYPE(TRB_LINK) | TRB_CHAIN
+ | TRB_IOC;
+ cdns2_ep_inc_enq(&pep->ring);
+
+ if (ring->enqueue == 0)
+ control |= TRB_TOGGLE;
+
+ /* Point to next bad TRB. */
+ trb->buffer = cpu_to_le32(pep->ring.dma +
+ (ring->enqueue * TRB_SIZE));
+ trb->length = 0;
+ trb->control = cpu_to_le32(control);
+ break;
+ }
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling
+ * the cycle bit) until we've finished creating all the
+ * other TRBs.
+ */
+ if (sg_iter == 0)
+ control = control ^ TRB_CYCLE;
+
+ /* For last TRB in TD. */
+ if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
+ control |= TRB_IOC;
+ else
+ control |= TRB_CHAIN;
+
+ trb->buffer = cpu_to_le32(trb_dma);
+ trb->length = cpu_to_le32(TRB_BURST(pep->trb_burst_size) |
+ TRB_LEN(length));
+ trb->control = cpu_to_le32(control);
+
+ if (sg && sg_iter < (trbs_per_td - 1)) {
+ sg = sg_next(sg);
+ trb_dma = sg_dma_address(sg);
+ length = sg_dma_len(sg);
+ }
+
+ preq->end_trb = ring->enqueue;
+ cdns2_ep_inc_enq(&pep->ring);
+ }
+}
+
+static void cdns2_set_drdy(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ trace_cdns2_ring(pep);
+
+ /*
+ * Memory barrier - Cycle Bit must be set before doorbell.
+ */
+ dma_wmb();
+
+ /* Clearing TRBERR and DESCMIS before setting DRDY. */
+ writel(DMA_EP_STS_TRBERR | DMA_EP_STS_DESCMIS,
+ &pdev->adma_regs->ep_sts);
+ writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
+
+ if (readl(&pdev->adma_regs->ep_sts) & DMA_EP_STS_TRBERR) {
+ writel(DMA_EP_STS_TRBERR, &pdev->adma_regs->ep_sts);
+ writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
+ }
+
+ trace_cdns2_doorbell_epx(pep, readl(&pdev->adma_regs->ep_traddr));
+}
+
+static int cdns2_prepare_first_isoc_transfer(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ struct cdns2_trb *trb;
+ u32 buffer;
+ u8 hw_ccs;
+
+ if ((readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY))
+ return -EBUSY;
+
+ if (!pep->dir) {
+ set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
+ writel(pep->ring.dma + pep->ring.dequeue,
+ &pdev->adma_regs->ep_traddr);
+ return 0;
+ }
+
+ /*
+ * The first packet after doorbell can be corrupted so,
+ * driver prepares 0 length packet as first packet.
+ */
+ buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
+ hw_ccs = !!DMA_EP_STS_CCS(readl(&pdev->adma_regs->ep_sts));
+
+ trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
+ trb->length = 0;
+ trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
+ trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) | TRB_TYPE(TRB_NORMAL));
+
+ /*
+ * LINK TRB is used to force updating cycle bit in controller and
+ * move to correct place in transfer ring.
+ */
+ trb++;
+ trb->length = 0;
+ trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
+ trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN);
+
+ if (hw_ccs != pep->ring.ccs)
+ trb->control |= cpu_to_le32(TRB_TOGGLE);
+
+ set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
+ writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
+ &pdev->adma_regs->ep_traddr);
+
+ return 0;
+}
+
+/* Prepare and start transfer on no-default endpoint. */
+static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_ring *ring;
+ u32 togle_pcs = 1;
+ int num_trbs;
+ int ret;
+
+ cdns2_select_ep(pdev, pep->endpoint.address);
+
+ if (preq->request.sg)
+ num_trbs = cdns2_count_sg_trbs(pep, &preq->request);
+ else
+ num_trbs = cdns2_count_trbs(pep, preq->request.dma,
+ preq->request.length);
+
+ ret = cdns2_prepare_ring(pdev, pep, num_trbs);
+ if (ret)
+ return ret;
+
+ ring = &pep->ring;
+ preq->start_trb = ring->enqueue;
+ preq->trb = ring->trbs + ring->enqueue;
+
+ if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
+ cdns2_ep_tx_isoc(pep, preq, num_trbs);
+ } else {
+ togle_pcs = cdns2_wa1_update_guard(pep, ring->trbs + ring->enqueue);
+ cdns2_ep_tx_bulk(pep, preq, num_trbs);
+ }
+
+ preq->num_of_trb = num_trbs;
+
+ /*
+ * Memory barrier - cycle bit must be set as the last operation.
+ */
+ dma_wmb();
+
+ /* Give the TD to the consumer. */
+ if (togle_pcs)
+ preq->trb->control = preq->trb->control ^ cpu_to_le32(1);
+
+ cdns2_wa1_tray_restore_cycle_bit(pdev, pep);
+ cdns2_dbg_request_trbs(pep, preq);
+
+ if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) {
+ if (pep->type == USB_ENDPOINT_XFER_ISOC) {
+ ret = cdns2_prepare_first_isoc_transfer(pdev, pep);
+ if (ret)
+ return 0;
+ }
+
+ cdns2_set_drdy(pdev, pep);
+ }
+
+ return 0;
+}
+
+/* Prepare and start transfer for all not started requests. */
+static int cdns2_start_all_request(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ struct cdns2_request *preq;
+ int ret;
+
+ while (!list_empty(&pep->deferred_list)) {
+ preq = cdns2_next_preq(&pep->deferred_list);
+
+ ret = cdns2_ep_run_transfer(pep, preq);
+ if (ret)
+ return ret;
+
+ list_move_tail(&preq->list, &pep->pending_list);
+ }
+
+ pep->ep_state &= ~EP_RING_FULL;
+
+ return 0;
+}
+
+/*
+ * Check whether trb has been handled by DMA.
+ *
+ * Endpoint must be selected before invoking this function.
+ *
+ * Returns false if request has not been handled by DMA, else returns true.
+ *
+ * SR - start ring
+ * ER - end ring
+ * DQ = ring->dequeue - dequeue position
+ * EQ = ring->enqueue - enqueue position
+ * ST = preq->start_trb - index of first TRB in transfer ring
+ * ET = preq->end_trb - index of last TRB in transfer ring
+ * CI = current_index - index of processed TRB by DMA.
+ *
+ * As first step, we check if the TRB between the ST and ET.
+ * Then, we check if cycle bit for index pep->dequeue
+ * is correct.
+ *
+ * some rules:
+ * 1. ring->dequeue never equals to current_index.
+ * 2 ring->enqueue never exceed ring->dequeue
+ * 3. exception: ring->enqueue == ring->dequeue
+ * and ring->free_trbs is zero.
+ * This case indicate that TR is full.
+ *
+ * At below two cases, the request have been handled.
+ * Case 1 - ring->dequeue < current_index
+ * SR ... EQ ... DQ ... CI ... ER
+ * SR ... DQ ... CI ... EQ ... ER
+ *
+ * Case 2 - ring->dequeue > current_index
+ * This situation takes place when CI go through the LINK TRB at the end of
+ * transfer ring.
+ * SR ... CI ... EQ ... DQ ... ER
+ */
+static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_ring *ring;
+ struct cdns2_trb *trb;
+ int current_index = 0;
+ int handled = 0;
+ int doorbell;
+
+ ring = &pep->ring;
+ current_index = cdns2_get_dma_pos(pdev, pep);
+ doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
+
+ /*
+ * Only ISO transfer can use 2 entries outside the standard
+ * Transfer Ring. First of them is used as zero length packet and the
+ * second as LINK TRB.
+ */
+ if (current_index >= TRBS_PER_SEGMENT)
+ goto finish;
+
+ /* Current trb doesn't belong to this request. */
+ if (preq->start_trb < preq->end_trb) {
+ if (ring->dequeue > preq->end_trb)
+ goto finish;
+
+ if (ring->dequeue < preq->start_trb)
+ goto finish;
+ }
+
+ if (preq->start_trb > preq->end_trb && ring->dequeue > preq->end_trb &&
+ ring->dequeue < preq->start_trb)
+ goto finish;
+
+ if (preq->start_trb == preq->end_trb && ring->dequeue != preq->end_trb)
+ goto finish;
+
+ trb = &ring->trbs[ring->dequeue];
+
+ if ((le32_to_cpu(trb->control) & TRB_CYCLE) != ring->ccs)
+ goto finish;
+
+ if (doorbell == 1 && current_index == ring->dequeue)
+ goto finish;
+
+ /* The corner case for TRBS_PER_SEGMENT equal 2). */
+ if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
+ handled = 1;
+ goto finish;
+ }
+
+ if (ring->enqueue == ring->dequeue &&
+ ring->free_trbs == 0) {
+ handled = 1;
+ } else if (ring->dequeue < current_index) {
+ if ((current_index == (TRBS_PER_SEGMENT - 1)) &&
+ !ring->dequeue)
+ goto finish;
+
+ handled = 1;
+ } else if (ring->dequeue > current_index) {
+ handled = 1;
+ }
+
+finish:
+ trace_cdns2_request_handled(preq, current_index, handled);
+
+ return handled;
+}
+
+static void cdns2_skip_isoc_td(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep,
+ struct cdns2_request *preq)
+{
+ struct cdns2_trb *trb;
+ int i;
+
+ trb = pep->ring.trbs + pep->ring.dequeue;
+
+ for (i = preq->finished_trb ; i < preq->num_of_trb; i++) {
+ preq->finished_trb++;
+ trace_cdns2_complete_trb(pep, trb);
+ cdns2_ep_inc_deq(&pep->ring);
+ trb = cdns2_next_trb(pep, trb);
+ }
+
+ cdns2_gadget_giveback(pep, preq, 0);
+ cdns2_prepare_first_isoc_transfer(pdev, pep);
+ pep->skip = false;
+ cdns2_set_drdy(pdev, pep);
+}
+
+static void cdns2_transfer_completed(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep)
+{
+ struct cdns2_request *preq = NULL;
+ bool request_handled = false;
+ struct cdns2_trb *trb;
+
+ while (!list_empty(&pep->pending_list)) {
+ preq = cdns2_next_preq(&pep->pending_list);
+ trb = pep->ring.trbs + pep->ring.dequeue;
+
+ /*
+ * The TRB was changed as link TRB, and the request
+ * was handled at ep_dequeue.
+ */
+ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK &&
+ le32_to_cpu(trb->length)) {
+ trace_cdns2_complete_trb(pep, trb);
+ cdns2_ep_inc_deq(&pep->ring);
+ trb = pep->ring.trbs + pep->ring.dequeue;
+ }
+
+ /*
+ * Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns2_select_ep(pdev, pep->endpoint.address);
+
+ while (cdns2_trb_handled(pep, preq)) {
+ preq->finished_trb++;
+
+ if (preq->finished_trb >= preq->num_of_trb)
+ request_handled = true;
+
+ trb = pep->ring.trbs + pep->ring.dequeue;
+ trace_cdns2_complete_trb(pep, trb);
+
+ if (pep->dir && pep->type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * For ISOC IN controller doens't update the
+ * trb->length.
+ */
+ preq->request.actual = preq->request.length;
+ else
+ preq->request.actual +=
+ TRB_LEN(le32_to_cpu(trb->length));
+
+ cdns2_ep_inc_deq(&pep->ring);
+ }
+
+ if (request_handled) {
+ cdns2_gadget_giveback(pep, preq, 0);
+ request_handled = false;
+ } else {
+ goto prepare_next_td;
+ }
+
+ if (pep->type != USB_ENDPOINT_XFER_ISOC &&
+ TRBS_PER_SEGMENT == 2)
+ break;
+ }
+
+prepare_next_td:
+ if (pep->skip && preq)
+ cdns2_skip_isoc_td(pdev, pep, preq);
+
+ if (!(pep->ep_state & EP_STALLED) &&
+ !(pep->ep_state & EP_STALL_PENDING))
+ cdns2_start_all_request(pdev, pep);
+}
+
+static void cdns2_wakeup(struct cdns2_device *pdev)
+{
+ if (!pdev->may_wakeup)
+ return;
+
+ /* Start driving resume signaling to indicate remote wakeup. */
+ set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_SIGRSUME);
+}
+
+static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
+{
+ struct cdns2_device *pdev = pep->pdev;
+
+ cdns2_wa1_restore_cycle_bit(pep);
+
+ if (rearm) {
+ trace_cdns2_ring(pep);
+
+ /* Cycle Bit must be updated before arming DMA. */
+ dma_wmb();
+
+ writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
+
+ cdns2_wakeup(pdev);
+
+ trace_cdns2_doorbell_epx(pep,
+ readl(&pdev->adma_regs->ep_traddr));
+ }
+}
+
+static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ u8 isoerror = 0;
+ u32 ep_sts_reg;
+ u32 val;
+
+ cdns2_select_ep(pdev, pep->endpoint.address);
+
+ trace_cdns2_epx_irq(pdev, pep);
+
+ ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
+ writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
+
+ if (pep->type == USB_ENDPOINT_XFER_ISOC) {
+ u8 mult;
+ u8 cs;
+
+ mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
+ cs = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcs) :
+ readb(&pdev->epx_regs->ep[pep->num - 1].rxcs);
+ if (mult > 0)
+ isoerror = EPX_CS_ERR(cs);
+ }
+
+ /*
+ * Sometimes ISO Error for mult=1 or mult=2 is not propagated on time
+ * from USB module to DMA module. To protect against this driver
+ * checks also the txcs/rxcs registers.
+ */
+ if ((ep_sts_reg & DMA_EP_STS_ISOERR) || isoerror) {
+ clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
+
+ /* Wait for DBUSY cleared. */
+ readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
+ !(val & DMA_EP_STS_DBUSY), 1, 125);
+
+ writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
+
+ /* Wait for DFLUSH cleared. */
+ readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
+ !(val & DMA_EP_CMD_DFLUSH), 1, 10);
+
+ pep->skip = true;
+ }
+
+ if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) {
+ if (pep->ep_state & EP_STALL_PENDING &&
+ !(ep_sts_reg & DMA_EP_STS_DESCMIS))
+ cdns2_ep_stall_flush(pep);
+
+ /*
+ * For isochronous transfer driver completes request on
+ * IOC or on TRBERR. IOC appears only when device receive
+ * OUT data packet. If host disable stream or lost some packet
+ * then the only way to finish all queued transfer is to do it
+ * on TRBERR event.
+ */
+ if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) {
+ if (!pep->dir)
+ clear_reg_bit_32(&pdev->adma_regs->ep_cfg,
+ DMA_EP_CFG_ENABLE);
+
+ cdns2_transfer_completed(pdev, pep);
+ if (pep->ep_state & EP_DEFERRED_DRDY) {
+ pep->ep_state &= ~EP_DEFERRED_DRDY;
+ cdns2_set_drdy(pdev, pep);
+ }
+
+ return;
+ }
+
+ cdns2_transfer_completed(pdev, pep);
+
+ if (!(pep->ep_state & EP_STALLED) &&
+ !(pep->ep_state & EP_STALL_PENDING)) {
+ if (pep->ep_state & EP_DEFERRED_DRDY) {
+ pep->ep_state &= ~EP_DEFERRED_DRDY;
+ cdns2_start_all_request(pdev, pep);
+ } else {
+ cdns2_rearm_transfer(pep, pep->wa1_set);
+ }
+ }
+
+ return;
+ }
+
+ if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP))
+ cdns2_transfer_completed(pdev, pep);
+}
+
+static void cdns2_disconnect_gadget(struct cdns2_device *pdev)
+{
+ if (pdev->gadget_driver && pdev->gadget_driver->disconnect)
+ pdev->gadget_driver->disconnect(&pdev->gadget);
+}
+
+static irqreturn_t cdns2_usb_irq_handler(int irq, void *data)
+{
+ struct cdns2_device *pdev = data;
+ unsigned long reg_ep_ists;
+ u8 reg_usb_irq_m;
+ u8 reg_ext_irq_m;
+ u8 reg_usb_irq;
+ u8 reg_ext_irq;
+
+ if (pdev->in_lpm)
+ return IRQ_NONE;
+
+ reg_usb_irq_m = readb(&pdev->interrupt_regs->usbien);
+ reg_ext_irq_m = readb(&pdev->interrupt_regs->extien);
+
+ /* Mask all sources of interrupt. */
+ writeb(0, &pdev->interrupt_regs->usbien);
+ writeb(0, &pdev->interrupt_regs->extien);
+ writel(0, &pdev->adma_regs->ep_ien);
+
+ /* Clear interrupt sources. */
+ writel(0, &pdev->adma_regs->ep_sts);
+ writeb(0, &pdev->interrupt_regs->usbirq);
+ writeb(0, &pdev->interrupt_regs->extirq);
+
+ reg_ep_ists = readl(&pdev->adma_regs->ep_ists);
+ reg_usb_irq = readb(&pdev->interrupt_regs->usbirq);
+ reg_ext_irq = readb(&pdev->interrupt_regs->extirq);
+
+ if (reg_ep_ists || (reg_usb_irq & reg_usb_irq_m) ||
+ (reg_ext_irq & reg_ext_irq_m))
+ return IRQ_WAKE_THREAD;
+
+ writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
+ writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
+ writel(~0, &pdev->adma_regs->ep_ien);
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t cdns2_thread_usb_irq_handler(struct cdns2_device *pdev)
+{
+ u8 usb_irq, ext_irq;
+ int speed;
+ int i;
+
+ ext_irq = readb(&pdev->interrupt_regs->extirq) & EXTIRQ_WAKEUP;
+ writeb(ext_irq, &pdev->interrupt_regs->extirq);
+
+ usb_irq = readb(&pdev->interrupt_regs->usbirq) & USB_IEN_INIT;
+ writeb(usb_irq, &pdev->interrupt_regs->usbirq);
+
+ if (!ext_irq && !usb_irq)
+ return IRQ_NONE;
+
+ trace_cdns2_usb_irq(usb_irq, ext_irq);
+
+ if (ext_irq & EXTIRQ_WAKEUP) {
+ if (pdev->gadget_driver && pdev->gadget_driver->resume) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->resume(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+ }
+
+ if (usb_irq & USBIRQ_LPM) {
+ u8 reg = readb(&pdev->usb_regs->lpmctrl);
+
+ /* LPM1 enter */
+ if (!(reg & LPMCTRLLH_LPMNYET))
+ writeb(0, &pdev->usb_regs->sleep_clkgate);
+ }
+
+ if (usb_irq & USBIRQ_SUSPEND) {
+ if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->suspend(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+ }
+
+ if (usb_irq & USBIRQ_URESET) {
+ if (pdev->gadget_driver) {
+ pdev->dev_address = 0;
+
+ spin_unlock(&pdev->lock);
+ usb_gadget_udc_reset(&pdev->gadget,
+ pdev->gadget_driver);
+ spin_lock(&pdev->lock);
+
+ /*
+ * The USBIRQ_URESET is reported at the beginning of
+ * reset signal. 100ms is enough time to finish reset
+ * process. For high-speed reset procedure is completed
+ * when controller detect HS mode.
+ */
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ speed = cdns2_get_speed(pdev);
+ if (speed == USB_SPEED_HIGH)
+ break;
+ }
+
+ pdev->gadget.speed = speed;
+ cdns2_enable_l1(pdev, 0);
+ cdns2_ep0_config(pdev);
+ pdev->may_wakeup = 0;
+ }
+ }
+
+ if (usb_irq & USBIRQ_SUDAV) {
+ pdev->ep0_stage = CDNS2_SETUP_STAGE;
+ cdns2_handle_setup_packet(pdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Deferred USB interrupt handler. */
+static irqreturn_t cdns2_thread_irq_handler(int irq, void *data)
+{
+ struct cdns2_device *pdev = data;
+ unsigned long dma_ep_ists;
+ unsigned long flags;
+ unsigned int bit;
+
+ local_bh_disable();
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ cdns2_thread_usb_irq_handler(pdev);
+
+ dma_ep_ists = readl(&pdev->adma_regs->ep_ists);
+ if (!dma_ep_ists)
+ goto unlock;
+
+ trace_cdns2_dma_ep_ists(dma_ep_ists);
+
+ /* Handle default endpoint OUT. */
+ if (dma_ep_ists & DMA_EP_ISTS_EP_OUT0)
+ cdns2_handle_ep0_interrupt(pdev, USB_DIR_OUT);
+
+ /* Handle default endpoint IN. */
+ if (dma_ep_ists & DMA_EP_ISTS_EP_IN0)
+ cdns2_handle_ep0_interrupt(pdev, USB_DIR_IN);
+
+ dma_ep_ists &= ~(DMA_EP_ISTS_EP_OUT0 | DMA_EP_ISTS_EP_IN0);
+
+ for_each_set_bit(bit, &dma_ep_ists, sizeof(u32) * BITS_PER_BYTE) {
+ u8 ep_idx = bit > 16 ? (bit - 16) * 2 : (bit * 2) - 1;
+
+ /*
+ * Endpoints in pdev->eps[] are held in order:
+ * ep0, ep1out, ep1in, ep2out, ep2in... ep15out, ep15in.
+ * but in dma_ep_ists in order:
+ * ep0 ep1out ep2out ... ep15out ep0in ep1in .. ep15in
+ */
+ cdns2_handle_epx_interrupt(&pdev->eps[ep_idx]);
+ }
+
+unlock:
+ writel(~0, &pdev->adma_regs->ep_ien);
+ writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
+ writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ local_bh_enable();
+
+ return IRQ_HANDLED;
+}
+
+/* Calculates and assigns onchip memory for endpoints. */
+static void cdns2_eps_onchip_buffer_init(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep;
+ int min_buf_tx = 0;
+ int min_buf_rx = 0;
+ u16 tx_offset = 0;
+ u16 rx_offset = 0;
+ int free;
+ int i;
+
+ for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
+ pep = &pdev->eps[i];
+
+ if (!(pep->ep_state & EP_CLAIMED))
+ continue;
+
+ if (pep->dir)
+ min_buf_tx += pep->buffering;
+ else
+ min_buf_rx += pep->buffering;
+ }
+
+ for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
+ pep = &pdev->eps[i];
+
+ if (!(pep->ep_state & EP_CLAIMED))
+ continue;
+
+ if (pep->dir) {
+ free = pdev->onchip_tx_buf - min_buf_tx;
+
+ if (free + pep->buffering >= 4)
+ free = 4;
+ else
+ free = free + pep->buffering;
+
+ min_buf_tx = min_buf_tx - pep->buffering + free;
+
+ pep->buffering = free;
+
+ writel(tx_offset,
+ &pdev->epx_regs->txstaddr[pep->num - 1]);
+ pdev->epx_regs->txstaddr[pep->num - 1] = tx_offset;
+
+ dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
+ pep->name, tx_offset, pep->buffering);
+
+ tx_offset += pep->buffering * 1024;
+ } else {
+ free = pdev->onchip_rx_buf - min_buf_rx;
+
+ if (free + pep->buffering >= 4)
+ free = 4;
+ else
+ free = free + pep->buffering;
+
+ min_buf_rx = min_buf_rx - pep->buffering + free;
+
+ pep->buffering = free;
+ writel(rx_offset,
+ &pdev->epx_regs->rxstaddr[pep->num - 1]);
+
+ dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
+ pep->name, rx_offset, pep->buffering);
+
+ rx_offset += pep->buffering * 1024;
+ }
+ }
+}
+
+/* Configure hardware endpoint. */
+static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
+{
+ bool is_iso_ep = (pep->type == USB_ENDPOINT_XFER_ISOC);
+ struct cdns2_device *pdev = pep->pdev;
+ u32 max_packet_size;
+ u8 dir = 0;
+ u8 ep_cfg;
+ u8 mult;
+ u32 val;
+ int ret;
+
+ switch (pep->type) {
+ case USB_ENDPOINT_XFER_INT:
+ ep_cfg = EPX_CON_TYPE_INT;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep_cfg = EPX_CON_TYPE_BULK;
+ break;
+ default:
+ mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
+ ep_cfg = mult << EPX_CON_ISOD_SHIFT;
+ ep_cfg |= EPX_CON_TYPE_ISOC;
+
+ if (pep->dir) {
+ set_reg_bit_8(&pdev->epx_regs->isoautoarm, BIT(pep->num));
+ set_reg_bit_8(&pdev->epx_regs->isoautodump, BIT(pep->num));
+ set_reg_bit_8(&pdev->epx_regs->isodctrl, BIT(pep->num));
+ }
+ }
+
+ switch (pdev->gadget.speed) {
+ case USB_SPEED_FULL:
+ max_packet_size = is_iso_ep ? 1023 : 64;
+ break;
+ case USB_SPEED_HIGH:
+ max_packet_size = is_iso_ep ? 1024 : 512;
+ break;
+ default:
+ /* All other speed are not supported. */
+ return -EINVAL;
+ }
+
+ ep_cfg |= (EPX_CON_VAL | (pep->buffering - 1));
+
+ if (pep->dir) {
+ dir = FIFOCTRL_IO_TX;
+ writew(max_packet_size, &pdev->epx_regs->txmaxpack[pep->num - 1]);
+ writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].txcon);
+ } else {
+ writew(max_packet_size, &pdev->epx_regs->rxmaxpack[pep->num - 1]);
+ writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].rxcon);
+ }
+
+ writeb(pep->num | dir | FIFOCTRL_FIFOAUTO,
+ &pdev->usb_regs->fifoctrl);
+ writeb(pep->num | dir, &pdev->epx_regs->endprst);
+ writeb(pep->num | ENDPRST_FIFORST | ENDPRST_TOGRST | dir,
+ &pdev->epx_regs->endprst);
+
+ if (max_packet_size == 1024)
+ pep->trb_burst_size = 128;
+ else if (max_packet_size >= 512)
+ pep->trb_burst_size = 64;
+ else
+ pep->trb_burst_size = 16;
+
+ cdns2_select_ep(pdev, pep->num | pep->dir);
+ writel(DMA_EP_CMD_EPRST | DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
+
+ ret = readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
+ !(val & (DMA_EP_CMD_DFLUSH |
+ DMA_EP_CMD_EPRST)),
+ 1, 1000);
+
+ if (ret)
+ return ret;
+
+ writel(DMA_EP_STS_TRBERR | DMA_EP_STS_ISOERR, &pdev->adma_regs->ep_sts_en);
+
+ if (enable)
+ writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
+
+ trace_cdns2_epx_hw_cfg(pdev, pep);
+
+ dev_dbg(pdev->dev, "Configure %s: with MPS: %08x, ep con: %02x\n",
+ pep->name, max_packet_size, ep_cfg);
+
+ return 0;
+}
+
+struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
+ struct cdns2_request *preq;
+
+ preq = kzalloc(sizeof(*preq), gfp_flags);
+ if (!preq)
+ return NULL;
+
+ preq->pep = pep;
+
+ trace_cdns2_alloc_request(preq);
+
+ return &preq->request;
+}
+
+void cdns2_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdns2_request *preq = to_cdns2_request(request);
+
+ trace_cdns2_free_request(preq);
+ kfree(preq);
+}
+
+static int cdns2_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ u32 reg = DMA_EP_STS_EN_TRBERREN;
+ struct cdns2_endpoint *pep;
+ struct cdns2_device *pdev;
+ unsigned long flags;
+ int enable = 1;
+ int ret = 0;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+ !desc->wMaxPacketSize) {
+ return -EINVAL;
+ }
+
+ pep = ep_to_cdns2_ep(ep);
+ pdev = pep->pdev;
+
+ if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
+ "%s is already enabled\n", pep->name))
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ pep->type = usb_endpoint_type(desc);
+ pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+ if (pdev->gadget.speed == USB_SPEED_FULL)
+ if (pep->type == USB_ENDPOINT_XFER_INT)
+ pep->interval = desc->bInterval;
+
+ if (pep->interval > ISO_MAX_INTERVAL &&
+ pep->type == USB_ENDPOINT_XFER_ISOC) {
+ dev_err(pdev->dev, "ISO period is limited to %d (current: %d)\n",
+ ISO_MAX_INTERVAL, pep->interval);
+
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * During ISO OUT traffic DMA reads Transfer Ring for the EP which has
+ * never got doorbell.
+ * This issue was detected only on simulation, but to avoid this issue
+ * driver add protection against it. To fix it driver enable ISO OUT
+ * endpoint before setting DRBL. This special treatment of ISO OUT
+ * endpoints are recommended by controller specification.
+ */
+ if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->dir)
+ enable = 0;
+
+ ret = cdns2_alloc_tr_segment(pep);
+ if (ret)
+ goto exit;
+
+ ret = cdns2_ep_config(pep, enable);
+ if (ret) {
+ cdns2_free_tr_segment(pep);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ trace_cdns2_gadget_ep_enable(pep);
+
+ pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
+ pep->ep_state |= EP_ENABLED;
+ pep->wa1_set = 0;
+ pep->ring.enqueue = 0;
+ pep->ring.dequeue = 0;
+ reg = readl(&pdev->adma_regs->ep_sts);
+ pep->ring.pcs = !!DMA_EP_STS_CCS(reg);
+ pep->ring.ccs = !!DMA_EP_STS_CCS(reg);
+
+ writel(pep->ring.dma, &pdev->adma_regs->ep_traddr);
+
+ /* one TRB is reserved for link TRB used in DMULT mode*/
+ pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
+
+exit:
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdns2_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct cdns2_endpoint *pep;
+ struct cdns2_request *preq;
+ struct cdns2_device *pdev;
+ unsigned long flags;
+ int val;
+
+ if (!ep)
+ return -EINVAL;
+
+ pep = ep_to_cdns2_ep(ep);
+ pdev = pep->pdev;
+
+ if (dev_WARN_ONCE(pdev->dev, !(pep->ep_state & EP_ENABLED),
+ "%s is already disabled\n", pep->name))
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ trace_cdns2_gadget_ep_disable(pep);
+
+ cdns2_select_ep(pdev, ep->desc->bEndpointAddress);
+
+ clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
+
+ /*
+ * Driver needs some time before resetting endpoint.
+ * It need waits for clearing DBUSY bit or for timeout expired.
+ * 10us is enough time for controller to stop transfer.
+ */
+ readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
+ !(val & DMA_EP_STS_DBUSY), 1, 10);
+ writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
+
+ readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
+ !(val & (DMA_EP_CMD_DFLUSH | DMA_EP_CMD_EPRST)),
+ 1, 1000);
+
+ while (!list_empty(&pep->pending_list)) {
+ preq = cdns2_next_preq(&pep->pending_list);
+ cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
+ }
+
+ while (!list_empty(&pep->deferred_list)) {
+ preq = cdns2_next_preq(&pep->deferred_list);
+ cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
+ }
+
+ ep->desc = NULL;
+ pep->ep_state &= ~EP_ENABLED;
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
+ struct cdns2_request *preq,
+ gfp_t gfp_flags)
+{
+ struct cdns2_device *pdev = pep->pdev;
+ struct usb_request *request;
+ int ret;
+
+ request = &preq->request;
+ request->actual = 0;
+ request->status = -EINPROGRESS;
+
+ ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
+ if (ret) {
+ trace_cdns2_request_enqueue_error(preq);
+ return ret;
+ }
+
+ list_add_tail(&preq->list, &pep->deferred_list);
+ trace_cdns2_request_enqueue(preq);
+
+ if (!(pep->ep_state & EP_STALLED) && !(pep->ep_state & EP_STALL_PENDING))
+ cdns2_start_all_request(pdev, pep);
+
+ return 0;
+}
+
+static int cdns2_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct usb_request *zlp_request;
+ struct cdns2_request *preq;
+ struct cdns2_endpoint *pep;
+ struct cdns2_device *pdev;
+ unsigned long flags;
+ int ret;
+
+ if (!request || !ep)
+ return -EINVAL;
+
+ pep = ep_to_cdns2_ep(ep);
+ pdev = pep->pdev;
+
+ if (!(pep->ep_state & EP_ENABLED)) {
+ dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
+ pep->name);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ preq = to_cdns2_request(request);
+ ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
+
+ if (ret == 0 && request->zero && request->length &&
+ (request->length % ep->maxpacket == 0)) {
+ struct cdns2_request *preq;
+
+ zlp_request = cdns2_gadget_ep_alloc_request(ep, GFP_ATOMIC);
+ zlp_request->buf = pdev->zlp_buf;
+ zlp_request->length = 0;
+
+ preq = to_cdns2_request(zlp_request);
+ ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
+ }
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return ret;
+}
+
+int cdns2_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdns2_request *preq, *preq_temp, *cur_preq;
+ struct cdns2_endpoint *pep;
+ struct cdns2_trb *link_trb;
+ u8 req_on_hw_ring = 0;
+ unsigned long flags;
+ u32 buffer;
+ int val, i;
+
+ if (!ep || !request || !ep->desc)
+ return -EINVAL;
+
+ pep = ep_to_cdns2_ep(ep);
+ if (!pep->endpoint.desc) {
+ dev_err(pep->pdev->dev, "%s: can't dequeue to disabled endpoint\n",
+ pep->name);
+ return -ESHUTDOWN;
+ }
+
+ /* Requests has been dequeued during disabling endpoint. */
+ if (!(pep->ep_state & EP_ENABLED))
+ return 0;
+
+ spin_lock_irqsave(&pep->pdev->lock, flags);
+
+ cur_preq = to_cdns2_request(request);
+ trace_cdns2_request_dequeue(cur_preq);
+
+ list_for_each_entry_safe(preq, preq_temp, &pep->pending_list, list) {
+ if (cur_preq == preq) {
+ req_on_hw_ring = 1;
+ goto found;
+ }
+ }
+
+ list_for_each_entry_safe(preq, preq_temp, &pep->deferred_list, list) {
+ if (cur_preq == preq)
+ goto found;
+ }
+
+ goto not_found;
+
+found:
+ link_trb = preq->trb;
+
+ /* Update ring only if removed request is on pending_req_list list. */
+ if (req_on_hw_ring && link_trb) {
+ /* Stop DMA */
+ writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
+
+ /* Wait for DFLUSH cleared. */
+ readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
+ !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
+
+ buffer = cpu_to_le32(TRB_BUFFER(pep->ring.dma +
+ ((preq->end_trb + 1) * TRB_SIZE)));
+
+ for (i = 0; i < preq->num_of_trb; i++) {
+ link_trb->buffer = buffer;
+ link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control)
+ & TRB_CYCLE) | TRB_CHAIN |
+ TRB_TYPE(TRB_LINK));
+
+ trace_cdns2_queue_trb(pep, link_trb);
+ link_trb = cdns2_next_trb(pep, link_trb);
+ }
+
+ if (pep->wa1_trb == preq->trb)
+ cdns2_wa1_restore_cycle_bit(pep);
+ }
+
+ cdns2_gadget_giveback(pep, cur_preq, -ECONNRESET);
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ if (preq)
+ cdns2_rearm_transfer(pep, 1);
+
+not_found:
+ spin_unlock_irqrestore(&pep->pdev->lock, flags);
+ return 0;
+}
+
+int cdns2_halt_endpoint(struct cdns2_device *pdev,
+ struct cdns2_endpoint *pep,
+ int value)
+{
+ u8 __iomem *conf;
+ int dir = 0;
+
+ if (!(pep->ep_state & EP_ENABLED))
+ return -EPERM;
+
+ if (pep->dir) {
+ dir = ENDPRST_IO_TX;
+ conf = &pdev->epx_regs->ep[pep->num - 1].txcon;
+ } else {
+ conf = &pdev->epx_regs->ep[pep->num - 1].rxcon;
+ }
+
+ if (!value) {
+ struct cdns2_trb *trb = NULL;
+ struct cdns2_request *preq;
+ struct cdns2_trb trb_tmp;
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ if (preq) {
+ trb = preq->trb;
+ if (trb) {
+ trb_tmp = *trb;
+ trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ }
+ }
+
+ trace_cdns2_ep_halt(pep, 0, 0);
+
+ /* Resets Sequence Number */
+ writeb(dir | pep->num, &pdev->epx_regs->endprst);
+ writeb(dir | ENDPRST_TOGRST | pep->num,
+ &pdev->epx_regs->endprst);
+
+ clear_reg_bit_8(conf, EPX_CON_STALL);
+
+ pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
+
+ if (preq) {
+ if (trb)
+ *trb = trb_tmp;
+
+ cdns2_rearm_transfer(pep, 1);
+ }
+
+ cdns2_start_all_request(pdev, pep);
+ } else {
+ trace_cdns2_ep_halt(pep, 1, 0);
+ set_reg_bit_8(conf, EPX_CON_STALL);
+ writeb(dir | pep->num, &pdev->epx_regs->endprst);
+ writeb(dir | ENDPRST_FIFORST | pep->num,
+ &pdev->epx_regs->endprst);
+ pep->ep_state |= EP_STALLED;
+ }
+
+ return 0;
+}
+
+/* Sets/clears stall on selected endpoint. */
+static int cdns2_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
+ struct cdns2_device *pdev = pep->pdev;
+ struct cdns2_request *preq;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ preq = cdns2_next_preq(&pep->pending_list);
+ if (value && preq) {
+ trace_cdns2_ep_busy_try_halt_again(pep);
+ ret = -EAGAIN;
+ goto done;
+ }
+
+ if (!value)
+ pep->ep_state &= ~EP_WEDGE;
+
+ ret = cdns2_halt_endpoint(pdev, pep, value);
+
+done:
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return ret;
+}
+
+static int cdns2_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
+
+ cdns2_gadget_ep_set_halt(ep, 1);
+ pep->ep_state |= EP_WEDGE;
+
+ return 0;
+}
+
+static struct
+cdns2_endpoint *cdns2_find_available_ep(struct cdns2_device *pdev,
+ struct usb_endpoint_descriptor *desc)
+{
+ struct cdns2_endpoint *pep;
+ struct usb_ep *ep;
+ int ep_correct;
+
+ list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
+ unsigned long num;
+ int ret;
+ /* ep name pattern likes epXin or epXout. */
+ char c[2] = {ep->name[2], '\0'};
+
+ ret = kstrtoul(c, 10, &num);
+ if (ret)
+ return ERR_PTR(ret);
+ pep = ep_to_cdns2_ep(ep);
+
+ if (pep->num != num)
+ continue;
+
+ ep_correct = (pep->endpoint.caps.dir_in &&
+ usb_endpoint_dir_in(desc)) ||
+ (pep->endpoint.caps.dir_out &&
+ usb_endpoint_dir_out(desc));
+
+ if (ep_correct && !(pep->ep_state & EP_CLAIMED))
+ return pep;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/*
+ * Function used to recognize which endpoints will be used to optimize
+ * on-chip memory usage.
+ */
+static struct
+usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *comp_desc)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ struct cdns2_endpoint *pep;
+ unsigned long flags;
+
+ pep = cdns2_find_available_ep(pdev, desc);
+ if (IS_ERR(pep)) {
+ dev_err(pdev->dev, "no available ep\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
+ pep->buffering = 4;
+ else
+ pep->buffering = 1;
+
+ pep->ep_state |= EP_CLAIMED;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return &pep->endpoint;
+}
+
+static const struct usb_ep_ops cdns2_gadget_ep_ops = {
+ .enable = cdns2_gadget_ep_enable,
+ .disable = cdns2_gadget_ep_disable,
+ .alloc_request = cdns2_gadget_ep_alloc_request,
+ .free_request = cdns2_gadget_ep_free_request,
+ .queue = cdns2_gadget_ep_queue,
+ .dequeue = cdns2_gadget_ep_dequeue,
+ .set_halt = cdns2_gadget_ep_set_halt,
+ .set_wedge = cdns2_gadget_ep_set_wedge,
+};
+
+static int cdns2_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+
+ return readw(&pdev->usb_regs->frmnr);
+}
+
+static int cdns2_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdns2_wakeup(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdns2_gadget_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pdev->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return 0;
+}
+
+/* Disable interrupts and begin the controller halting process. */
+static void cdns2_quiesce(struct cdns2_device *pdev)
+{
+ set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
+
+ /* Disable interrupt. */
+ writeb(0, &pdev->interrupt_regs->extien),
+ writeb(0, &pdev->interrupt_regs->usbien),
+ writew(0, &pdev->adma_regs->ep_ien);
+
+ /* Clear interrupt line. */
+ writeb(0x0, &pdev->interrupt_regs->usbirq);
+}
+
+static void cdns2_gadget_config(struct cdns2_device *pdev)
+{
+ cdns2_ep0_config(pdev);
+
+ /* Enable DMA interrupts for all endpoints. */
+ writel(~0x0, &pdev->adma_regs->ep_ien);
+ cdns2_enable_l1(pdev, 0);
+ writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
+ writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
+ writel(DMA_CONF_DMULT, &pdev->adma_regs->conf);
+}
+
+static int cdns2_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ unsigned long flags;
+
+ trace_cdns2_pullup(is_on);
+
+ /*
+ * Disable events handling while controller is being
+ * enabled/disabled.
+ */
+ disable_irq(pdev->irq);
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (is_on) {
+ cdns2_gadget_config(pdev);
+ clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
+ } else {
+ cdns2_quiesce(pdev);
+ }
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ enable_irq(pdev->irq);
+
+ return 0;
+}
+
+static int cdns2_gadget_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ enum usb_device_speed max_speed = driver->max_speed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pdev->gadget_driver = driver;
+
+ /* Limit speed if necessary. */
+ max_speed = min(driver->max_speed, gadget->max_speed);
+
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ writeb(SPEEDCTRL_HSDISABLE, &pdev->usb_regs->speedctrl);
+ break;
+ case USB_SPEED_HIGH:
+ writeb(0, &pdev->usb_regs->speedctrl);
+ break;
+ default:
+ dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
+ max_speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ /* Default to highspeed. */
+ max_speed = USB_SPEED_HIGH;
+ break;
+ }
+
+ /* Reset all USB endpoints. */
+ writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
+ writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
+ &pdev->usb_regs->endprst);
+ writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->usb_regs->endprst);
+
+ cdns2_eps_onchip_buffer_init(pdev);
+
+ cdns2_gadget_config(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdns2_gadget_udc_stop(struct usb_gadget *gadget)
+{
+ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
+ struct cdns2_endpoint *pep;
+ u32 bEndpointAddress;
+ struct usb_ep *ep;
+ int val;
+
+ pdev->gadget_driver = NULL;
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
+ pep = ep_to_cdns2_ep(ep);
+ bEndpointAddress = pep->num | pep->dir;
+ cdns2_select_ep(pdev, bEndpointAddress);
+ writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
+ readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
+ !(val & DMA_EP_CMD_EPRST), 1, 100);
+ }
+
+ cdns2_quiesce(pdev);
+
+ writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
+ writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
+ &pdev->epx_regs->endprst);
+ writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->epx_regs->endprst);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops cdns2_gadget_ops = {
+ .get_frame = cdns2_gadget_get_frame,
+ .wakeup = cdns2_gadget_wakeup,
+ .set_selfpowered = cdns2_gadget_set_selfpowered,
+ .pullup = cdns2_gadget_pullup,
+ .udc_start = cdns2_gadget_udc_start,
+ .udc_stop = cdns2_gadget_udc_stop,
+ .match_ep = cdns2_gadget_match_ep,
+};
+
+static void cdns2_free_all_eps(struct cdns2_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++)
+ cdns2_free_tr_segment(&pdev->eps[i]);
+}
+
+/* Initializes software endpoints of gadget. */
+static int cdns2_init_eps(struct cdns2_device *pdev)
+{
+ struct cdns2_endpoint *pep;
+ int i;
+
+ for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
+ bool direction = !(i & 1); /* Start from OUT endpoint. */
+ u8 epnum = ((i + 1) >> 1);
+
+ /*
+ * Endpoints are being held in pdev->eps[] in form:
+ * ep0, ep1out, ep1in ... ep15out, ep15in.
+ */
+ if (!CDNS2_IF_EP_EXIST(pdev, epnum, direction))
+ continue;
+
+ pep = &pdev->eps[i];
+ pep->pdev = pdev;
+ pep->num = epnum;
+ /* 0 for OUT, 1 for IN. */
+ pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
+ pep->idx = i;
+
+ /* Ep0in and ep0out are represented by pdev->eps[0]. */
+ if (!epnum) {
+ int ret;
+
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, "BiDir");
+
+ cdns2_init_ep0(pdev, pep);
+
+ ret = cdns2_alloc_tr_segment(pep);
+ if (ret) {
+ dev_err(pdev->dev, "Failed to init ep0\n");
+ return ret;
+ }
+ } else {
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, !!direction ? "in" : "out");
+ pep->endpoint.name = pep->name;
+
+ usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
+ pep->endpoint.ops = &cdns2_gadget_ep_ops;
+ list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list);
+
+ pep->endpoint.caps.dir_in = direction;
+ pep->endpoint.caps.dir_out = !direction;
+
+ pep->endpoint.caps.type_iso = 1;
+ pep->endpoint.caps.type_bulk = 1;
+ pep->endpoint.caps.type_int = 1;
+ }
+
+ pep->endpoint.name = pep->name;
+ pep->ep_state = 0;
+
+ dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
+ "BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
+ pep->name,
+ (pep->endpoint.caps.type_control) ? "yes" : "no",
+ (pep->endpoint.caps.type_int) ? "yes" : "no",
+ (pep->endpoint.caps.type_bulk) ? "yes" : "no",
+ (pep->endpoint.caps.type_iso) ? "yes" : "no",
+ (pep->endpoint.caps.dir_in) ? "yes" : "no",
+ (pep->endpoint.caps.dir_out) ? "yes" : "no");
+
+ INIT_LIST_HEAD(&pep->pending_list);
+ INIT_LIST_HEAD(&pep->deferred_list);
+ }
+
+ return 0;
+}
+
+static int cdns2_gadget_start(struct cdns2_device *pdev)
+{
+ u32 max_speed;
+ void *buf;
+ int val;
+ int ret;
+
+ pdev->usb_regs = pdev->regs;
+ pdev->ep0_regs = pdev->regs;
+ pdev->epx_regs = pdev->regs;
+ pdev->interrupt_regs = pdev->regs;
+ pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
+
+ /* Reset controller. */
+ set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
+
+ ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
+ !(val & CPUCTRL_SW_RST), 1, 10000);
+ if (ret) {
+ dev_err(pdev->dev, "Error: reset controller timeout\n");
+ return -EINVAL;
+ }
+
+ usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
+
+ device_property_read_u16(pdev->dev, "cdns,on-chip-tx-buff-size",
+ &pdev->onchip_tx_buf);
+ device_property_read_u16(pdev->dev, "cdns,on-chip-rx-buff-size",
+ &pdev->onchip_rx_buf);
+ device_property_read_u32(pdev->dev, "cdns,avail-endpoints",
+ &pdev->eps_supported);
+
+ /*
+ * Driver assumes that each USBHS controller has at least
+ * one IN and one OUT non control endpoint.
+ */
+ if (!pdev->onchip_tx_buf && !pdev->onchip_rx_buf) {
+ ret = -EINVAL;
+ dev_err(pdev->dev, "Invalid on-chip memory configuration\n");
+ goto put_gadget;
+ }
+
+ if (!(pdev->eps_supported & ~0x00010001)) {
+ ret = -EINVAL;
+ dev_err(pdev->dev, "No hardware endpoints available\n");
+ goto put_gadget;
+ }
+
+ max_speed = usb_get_maximum_speed(pdev->dev);
+
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ break;
+ default:
+ dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
+ max_speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ max_speed = USB_SPEED_HIGH;
+ break;
+ }
+
+ pdev->gadget.max_speed = max_speed;
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+ pdev->gadget.ops = &cdns2_gadget_ops;
+ pdev->gadget.name = "usbhs-gadget";
+ pdev->gadget.quirk_avoids_skb_reserve = 1;
+ pdev->gadget.irq = pdev->irq;
+
+ spin_lock_init(&pdev->lock);
+ INIT_WORK(&pdev->pending_status_wq, cdns2_pending_setup_status_handler);
+
+ /* Initialize endpoint container. */
+ INIT_LIST_HEAD(&pdev->gadget.ep_list);
+ pdev->eps_dma_pool = dma_pool_create("cdns2_eps_dma_pool", pdev->dev,
+ TR_SEG_SIZE, 8, 0);
+ if (!pdev->eps_dma_pool) {
+ dev_err(pdev->dev, "Failed to create TRB dma pool\n");
+ ret = -ENOMEM;
+ goto put_gadget;
+ }
+
+ ret = cdns2_init_eps(pdev);
+ if (ret) {
+ dev_err(pdev->dev, "Failed to create endpoints\n");
+ goto destroy_dma_pool;
+ }
+
+ pdev->gadget.sg_supported = 1;
+
+ pdev->zlp_buf = kzalloc(CDNS2_EP_ZLP_BUF_SIZE, GFP_KERNEL);
+ if (!pdev->zlp_buf) {
+ ret = -ENOMEM;
+ goto destroy_dma_pool;
+ }
+
+ /* Allocate memory for setup packet buffer. */
+ buf = dma_alloc_coherent(pdev->dev, 8, &pdev->ep0_preq.request.dma,
+ GFP_DMA);
+ pdev->ep0_preq.request.buf = buf;
+
+ if (!pdev->ep0_preq.request.buf) {
+ ret = -ENOMEM;
+ goto free_zlp_buf;
+ }
+
+ /* Add USB gadget device. */
+ ret = usb_add_gadget(&pdev->gadget);
+ if (ret < 0) {
+ dev_err(pdev->dev, "Failed to add gadget\n");
+ goto free_ep0_buf;
+ }
+
+ return 0;
+
+free_ep0_buf:
+ dma_free_coherent(pdev->dev, 8, pdev->ep0_preq.request.buf,
+ pdev->ep0_preq.request.dma);
+free_zlp_buf:
+ kfree(pdev->zlp_buf);
+destroy_dma_pool:
+ dma_pool_destroy(pdev->eps_dma_pool);
+put_gadget:
+ usb_put_gadget(&pdev->gadget);
+
+ return ret;
+}
+
+int cdns2_gadget_suspend(struct cdns2_device *pdev)
+{
+ unsigned long flags;
+
+ cdns2_disconnect_gadget(pdev);
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ trace_cdns2_device_state("notattached");
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
+ cdns2_enable_l1(pdev, 0);
+
+ /* Disable interrupt for device. */
+ writeb(0, &pdev->interrupt_regs->usbien);
+ writel(0, &pdev->adma_regs->ep_ien);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (!pdev->gadget_driver) {
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return 0;
+ }
+
+ cdns2_gadget_config(pdev);
+
+ if (hibernated)
+ clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+void cdns2_gadget_remove(struct cdns2_device *pdev)
+{
+ pm_runtime_mark_last_busy(pdev->dev);
+ pm_runtime_put_autosuspend(pdev->dev);
+
+ usb_del_gadget(&pdev->gadget);
+ cdns2_free_all_eps(pdev);
+
+ dma_pool_destroy(pdev->eps_dma_pool);
+ kfree(pdev->zlp_buf);
+ usb_put_gadget(&pdev->gadget);
+}
+
+int cdns2_gadget_init(struct cdns2_device *pdev)
+{
+ int ret;
+
+ /* Ensure 32-bit DMA Mask. */
+ ret = dma_set_mask_and_coherent(pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(pdev->dev, "Failed to set dma mask: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_get_sync(pdev->dev);
+
+ cdsn2_isoc_burst_opt(pdev);
+
+ ret = cdns2_gadget_start(pdev);
+ if (ret) {
+ pm_runtime_put_sync(pdev->dev);
+ return ret;
+ }
+
+ /*
+ * Because interrupt line can be shared with other components in
+ * driver it can't use IRQF_ONESHOT flag here.
+ */
+ ret = devm_request_threaded_irq(pdev->dev, pdev->irq,
+ cdns2_usb_irq_handler,
+ cdns2_thread_irq_handler,
+ IRQF_SHARED,
+ dev_name(pdev->dev),
+ pdev);
+ if (ret)
+ goto err0;
+
+ return 0;
+
+err0:
+ cdns2_gadget_remove(pdev);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
new file mode 100644
index 000000000000..71e2f62d653a
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
@@ -0,0 +1,707 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USBHS-DEV device controller driver header file
+ *
+ * Copyright (C) 2023 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#ifndef __LINUX_CDNS2_GADGET
+#define __LINUX_CDNS2_GADGET
+
+#include <linux/usb/gadget.h>
+#include <linux/dma-direction.h>
+
+/*
+ * USBHS register interface.
+ * This corresponds to the USBHS Device Controller Interface.
+ */
+
+/**
+ * struct cdns2_ep0_regs - endpoint 0 related registers.
+ * @rxbc: receive (OUT) 0 endpoint byte count register.
+ * @txbc: transmit (IN) 0 endpoint byte count register.
+ * @cs: 0 endpoint control and status register.
+ * @reserved1: reserved.
+ * @fifo: 0 endpoint fifo register.
+ * @reserved2: reserved.
+ * @setupdat: SETUP data register.
+ * @reserved4: reserved.
+ * @maxpack: 0 endpoint max packet size.
+ */
+struct cdns2_ep0_regs {
+ __u8 rxbc;
+ __u8 txbc;
+ __u8 cs;
+ __u8 reserved1[4];
+ __u8 fifo;
+ __le32 reserved2[94];
+ __u8 setupdat[8];
+ __u8 reserved4[88];
+ __u8 maxpack;
+} __packed __aligned(4);
+
+/* EP0CS - bitmasks. */
+/* Endpoint 0 stall bit for status stage. */
+#define EP0CS_STALL BIT(0)
+/* HSNAK bit. */
+#define EP0CS_HSNAK BIT(1)
+/* IN 0 endpoint busy bit. */
+#define EP0CS_TXBSY_MSK BIT(2)
+/* OUT 0 endpoint busy bit. */
+#define EP0CS_RXBSY_MSK BIT(3)
+/* Send STALL in the data stage phase. */
+#define EP0CS_DSTALL BIT(4)
+/* SETUP buffer content was changed. */
+#define EP0CS_CHGSET BIT(7)
+
+/* EP0FIFO - bitmasks. */
+/* Direction. */
+#define EP0_FIFO_IO_TX BIT(4)
+/* FIFO auto bit. */
+#define EP0_FIFO_AUTO BIT(5)
+/* FIFO commit bit. */
+#define EP0_FIFO_COMMIT BIT(6)
+/* FIFO access bit. */
+#define EP0_FIFO_ACCES BIT(7)
+
+/**
+ * struct cdns2_epx_base - base endpoint registers.
+ * @rxbc: OUT endpoint byte count register.
+ * @rxcon: OUT endpoint control register.
+ * @rxcs: OUT endpoint control and status register.
+ * @txbc: IN endpoint byte count register.
+ * @txcon: IN endpoint control register.
+ * @txcs: IN endpoint control and status register.
+ */
+struct cdns2_epx_base {
+ __le16 rxbc;
+ __u8 rxcon;
+ __u8 rxcs;
+ __le16 txbc;
+ __u8 txcon;
+ __u8 txcs;
+} __packed __aligned(4);
+
+/* rxcon/txcon - endpoint control register bitmasks. */
+/* Endpoint buffering: 0 - single buffering ... 3 - quad buffering. */
+#define EPX_CON_BUF GENMASK(1, 0)
+/* Endpoint type. */
+#define EPX_CON_TYPE GENMASK(3, 2)
+/* Endpoint type: isochronous. */
+#define EPX_CON_TYPE_ISOC 0x4
+/* Endpoint type: bulk. */
+#define EPX_CON_TYPE_BULK 0x8
+/* Endpoint type: interrupt. */
+#define EPX_CON_TYPE_INT 0xC
+/* Number of packets per microframe. */
+#define EPX_CON_ISOD GENMASK(5, 4)
+#define EPX_CON_ISOD_SHIFT 0x4
+/* Endpoint stall bit. */
+#define EPX_CON_STALL BIT(6)
+/* Endpoint enable bit.*/
+#define EPX_CON_VAL BIT(7)
+
+/* rxcs/txcs - endpoint control and status bitmasks. */
+/* Data sequence error for the ISO endpoint. */
+#define EPX_CS_ERR(p) ((p) & BIT(0))
+
+/**
+ * struct cdns2_epx_regs - endpoint 1..15 related registers.
+ * @reserved: reserved.
+ * @ep: none control endpoints array.
+ * @reserved2: reserved.
+ * @endprst: endpoint reset register.
+ * @reserved3: reserved.
+ * @isoautoarm: ISO auto-arm register.
+ * @reserved4: reserved.
+ * @isodctrl: ISO control register.
+ * @reserved5: reserved.
+ * @isoautodump: ISO auto dump enable register.
+ * @reserved6: reserved.
+ * @rxmaxpack: receive (OUT) Max packet size register.
+ * @reserved7: reserved.
+ * @rxstaddr: receive (OUT) start address endpoint buffer register.
+ * @reserved8: reserved.
+ * @txstaddr: transmit (IN) start address endpoint buffer register.
+ * @reserved9: reserved.
+ * @txmaxpack: transmit (IN) Max packet size register.
+ */
+struct cdns2_epx_regs {
+ __le32 reserved[2];
+ struct cdns2_epx_base ep[15];
+ __u8 reserved2[290];
+ __u8 endprst;
+ __u8 reserved3[41];
+ __le16 isoautoarm;
+ __u8 reserved4[10];
+ __le16 isodctrl;
+ __le16 reserved5;
+ __le16 isoautodump;
+ __le32 reserved6;
+ __le16 rxmaxpack[15];
+ __le32 reserved7[65];
+ __le32 rxstaddr[15];
+ __u8 reserved8[4];
+ __le32 txstaddr[15];
+ __u8 reserved9[98];
+ __le16 txmaxpack[15];
+} __packed __aligned(4);
+
+/* ENDPRST - bitmasks. */
+/* Endpoint number. */
+#define ENDPRST_EP GENMASK(3, 0)
+/* IN direction bit. */
+#define ENDPRST_IO_TX BIT(4)
+/* Toggle reset bit. */
+#define ENDPRST_TOGRST BIT(5)
+/* FIFO reset bit. */
+#define ENDPRST_FIFORST BIT(6)
+/* Toggle status and reset bit. */
+#define ENDPRST_TOGSETQ BIT(7)
+
+/**
+ * struct cdns2_interrupt_regs - USB interrupt related registers.
+ * @reserved: reserved.
+ * @usbirq: USB interrupt request register.
+ * @extirq: external interrupt request register.
+ * @rxpngirq: external interrupt request register.
+ * @reserved1: reserved.
+ * @usbien: USB interrupt enable register.
+ * @extien: external interrupt enable register.
+ * @reserved2: reserved.
+ * @usbivect: USB interrupt vector register.
+ */
+struct cdns2_interrupt_regs {
+ __u8 reserved[396];
+ __u8 usbirq;
+ __u8 extirq;
+ __le16 rxpngirq;
+ __le16 reserved1[4];
+ __u8 usbien;
+ __u8 extien;
+ __le16 reserved2[3];
+ __u8 usbivect;
+} __packed __aligned(4);
+
+/* EXTIRQ and EXTIEN - bitmasks. */
+/* VBUS fault fall interrupt. */
+#define EXTIRQ_VBUSFAULT_FALL BIT(0)
+/* VBUS fault fall interrupt. */
+#define EXTIRQ_VBUSFAULT_RISE BIT(1)
+/* Wake up interrupt bit. */
+#define EXTIRQ_WAKEUP BIT(7)
+
+/* USBIEN and USBIRQ - bitmasks. */
+/* SETUP data valid interrupt bit.*/
+#define USBIRQ_SUDAV BIT(0)
+/* Start-of-frame interrupt bit. */
+#define USBIRQ_SOF BIT(1)
+/* SETUP token interrupt bit. */
+#define USBIRQ_SUTOK BIT(2)
+/* USB suspend interrupt bit. */
+#define USBIRQ_SUSPEND BIT(3)
+/* USB reset interrupt bit. */
+#define USBIRQ_URESET BIT(4)
+/* USB high-speed mode interrupt bit. */
+#define USBIRQ_HSPEED BIT(5)
+/* Link Power Management interrupt bit. */
+#define USBIRQ_LPM BIT(7)
+
+#define USB_IEN_INIT (USBIRQ_SUDAV | USBIRQ_SUSPEND | USBIRQ_URESET \
+ | USBIRQ_HSPEED | USBIRQ_LPM)
+/**
+ * struct cdns2_usb_regs - USB controller registers.
+ * @reserved: reserved.
+ * @lpmctrl: LPM control register.
+ * @lpmclock: LPM clock register.
+ * @reserved2: reserved.
+ * @endprst: endpoint reset register.
+ * @usbcs: USB control and status register.
+ * @frmnr: USB frame counter register.
+ * @fnaddr: function Address register.
+ * @clkgate: clock gate register.
+ * @fifoctrl: FIFO control register.
+ * @speedctrl: speed Control register.
+ * @sleep_clkgate: sleep Clock Gate register.
+ * @reserved3: reserved.
+ * @cpuctrl: microprocessor control register.
+ */
+struct cdns2_usb_regs {
+ __u8 reserved[4];
+ __u16 lpmctrl;
+ __u8 lpmclock;
+ __u8 reserved2[411];
+ __u8 endprst;
+ __u8 usbcs;
+ __le16 frmnr;
+ __u8 fnaddr;
+ __u8 clkgate;
+ __u8 fifoctrl;
+ __u8 speedctrl;
+ __u8 sleep_clkgate;
+ __u8 reserved3[533];
+ __u8 cpuctrl;
+} __packed __aligned(4);
+
+/* LPMCTRL - bitmasks. */
+/* BESL (Best Effort Service Latency). */
+#define LPMCTRLLL_HIRD GENMASK(7, 4)
+/* Last received Remote Wakeup field from LPM Extended Token packet. */
+#define LPMCTRLLH_BREMOTEWAKEUP BIT(8)
+/* Reflects value of the lpmnyet bit located in the usbcs[1] register. */
+#define LPMCTRLLH_LPMNYET BIT(16)
+
+/* LPMCLOCK - bitmasks. */
+/*
+ * If bit is 1 the controller automatically turns off clock
+ * (utmisleepm goes to low), else the microprocessor should use
+ * sleep clock gate register to turn off clock.
+ */
+#define LPMCLOCK_SLEEP_ENTRY BIT(7)
+
+/* USBCS - bitmasks. */
+/* Send NYET handshake for the LPM transaction. */
+#define USBCS_LPMNYET BIT(2)
+/* Remote wake-up bit. */
+#define USBCS_SIGRSUME BIT(5)
+/* Software disconnect bit. */
+#define USBCS_DISCON BIT(6)
+/* Indicates that a wakeup pin resumed the controller. */
+#define USBCS_WAKESRC BIT(7)
+
+/* FIFOCTRL - bitmasks. */
+/* Endpoint number. */
+#define FIFOCTRL_EP GENMASK(3, 0)
+/* Direction bit. */
+#define FIFOCTRL_IO_TX BIT(4)
+/* FIFO auto bit. */
+#define FIFOCTRL_FIFOAUTO BIT(5)
+/* FIFO commit bit. */
+#define FIFOCTRL_FIFOCMIT BIT(6)
+/* FIFO access bit. */
+#define FIFOCTRL_FIFOACC BIT(7)
+
+/* SPEEDCTRL - bitmasks. */
+/* Device works in Full Speed. */
+#define SPEEDCTRL_FS BIT(1)
+/* Device works in High Speed. */
+#define SPEEDCTRL_HS BIT(2)
+/* Force FS mode. */
+#define SPEEDCTRL_HSDISABLE BIT(7)
+
+/* CPUCTRL- bitmasks. */
+/* Controller reset bit. */
+#define CPUCTRL_SW_RST BIT(1)
+
+/**
+ * struct cdns2_adma_regs - ADMA controller registers.
+ * @conf: DMA global configuration register.
+ * @sts: DMA global Status register.
+ * @reserved1: reserved.
+ * @ep_sel: DMA endpoint select register.
+ * @ep_traddr: DMA endpoint transfer ring address register.
+ * @ep_cfg: DMA endpoint configuration register.
+ * @ep_cmd: DMA endpoint command register.
+ * @ep_sts: DMA endpoint status register.
+ * @reserved2: reserved.
+ * @ep_sts_en: DMA endpoint status enable register.
+ * @drbl: DMA doorbell register.
+ * @ep_ien: DMA endpoint interrupt enable register.
+ * @ep_ists: DMA endpoint interrupt status register.
+ * @axim_ctrl: AXI Master Control register.
+ * @axim_id: AXI Master ID register.
+ * @reserved3: reserved.
+ * @axim_cap: AXI Master Wrapper Extended Capability.
+ * @reserved4: reserved.
+ * @axim_ctrl0: AXI Master Wrapper Extended Capability Control Register 0.
+ * @axim_ctrl1: AXI Master Wrapper Extended Capability Control Register 1.
+ */
+struct cdns2_adma_regs {
+ __le32 conf;
+ __le32 sts;
+ __le32 reserved1[5];
+ __le32 ep_sel;
+ __le32 ep_traddr;
+ __le32 ep_cfg;
+ __le32 ep_cmd;
+ __le32 ep_sts;
+ __le32 reserved2;
+ __le32 ep_sts_en;
+ __le32 drbl;
+ __le32 ep_ien;
+ __le32 ep_ists;
+ __le32 axim_ctrl;
+ __le32 axim_id;
+ __le32 reserved3;
+ __le32 axim_cap;
+ __le32 reserved4;
+ __le32 axim_ctrl0;
+ __le32 axim_ctrl1;
+};
+
+#define CDNS2_ADMA_REGS_OFFSET 0x400
+
+/* DMA_CONF - bitmasks. */
+/* Reset USB device configuration. */
+#define DMA_CONF_CFGRST BIT(0)
+/* Singular DMA transfer mode.*/
+#define DMA_CONF_DSING BIT(8)
+/* Multiple DMA transfers mode.*/
+#define DMA_CONF_DMULT BIT(9)
+
+/* DMA_EP_CFG - bitmasks. */
+/* Endpoint enable. */
+#define DMA_EP_CFG_ENABLE BIT(0)
+
+/* DMA_EP_CMD - bitmasks. */
+/* Endpoint reset. */
+#define DMA_EP_CMD_EPRST BIT(0)
+/* Transfer descriptor ready. */
+#define DMA_EP_CMD_DRDY BIT(6)
+/* Data flush. */
+#define DMA_EP_CMD_DFLUSH BIT(7)
+
+/* DMA_EP_STS - bitmasks. */
+/* Interrupt On Complete. */
+#define DMA_EP_STS_IOC BIT(2)
+/* Interrupt on Short Packet. */
+#define DMA_EP_STS_ISP BIT(3)
+/* Transfer descriptor missing. */
+#define DMA_EP_STS_DESCMIS BIT(4)
+/* TRB error. */
+#define DMA_EP_STS_TRBERR BIT(7)
+/* DMA busy bit. */
+#define DMA_EP_STS_DBUSY BIT(9)
+/* Current Cycle Status. */
+#define DMA_EP_STS_CCS(p) ((p) & BIT(11))
+/* OUT size mismatch. */
+#define DMA_EP_STS_OUTSMM BIT(14)
+/* ISO transmission error. */
+#define DMA_EP_STS_ISOERR BIT(15)
+
+/* DMA_EP_STS_EN - bitmasks. */
+/* OUT transfer missing descriptor enable. */
+#define DMA_EP_STS_EN_DESCMISEN BIT(4)
+/* TRB enable. */
+#define DMA_EP_STS_EN_TRBERREN BIT(7)
+/* OUT size mismatch enable. */
+#define DMA_EP_STS_EN_OUTSMMEN BIT(14)
+/* ISO transmission error enable. */
+#define DMA_EP_STS_EN_ISOERREN BIT(15)
+
+/* DMA_EP_IEN - bitmasks. */
+#define DMA_EP_IEN(index) (1 << (index))
+#define DMA_EP_IEN_EP_OUT0 BIT(0)
+#define DMA_EP_IEN_EP_IN0 BIT(16)
+
+/* DMA_EP_ISTS - bitmasks. */
+#define DMA_EP_ISTS(index) (1 << (index))
+#define DMA_EP_ISTS_EP_OUT0 BIT(0)
+#define DMA_EP_ISTS_EP_IN0 BIT(16)
+
+#define gadget_to_cdns2_device(g) (container_of(g, struct cdns2_device, gadget))
+#define ep_to_cdns2_ep(ep) (container_of(ep, struct cdns2_endpoint, endpoint))
+
+/*-------------------------------------------------------------------------*/
+#define TRBS_PER_SEGMENT 600
+#define ISO_MAX_INTERVAL 8
+#define MAX_TRB_LENGTH BIT(16)
+#define MAX_ISO_SIZE 3076
+/*
+ * To improve performance the TRB buffer pointers can't cross
+ * 4KB boundaries.
+ */
+#define TRB_MAX_ISO_BUFF_SHIFT 12
+#define TRB_MAX_ISO_BUFF_SIZE BIT(TRB_MAX_ISO_BUFF_SHIFT)
+/* How much data is left before the 4KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_ISO_BUFF_SIZE - \
+ ((addr) & (TRB_MAX_ISO_BUFF_SIZE - 1)))
+
+#if TRBS_PER_SEGMENT < 2
+#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
+/**
+ * struct cdns2_trb - represent Transfer Descriptor block.
+ * @buffer: pointer to buffer data.
+ * @length: length of data.
+ * @control: control flags.
+ *
+ * This structure describes transfer block handled by DMA module.
+ */
+struct cdns2_trb {
+ __le32 buffer;
+ __le32 length;
+ __le32 control;
+};
+
+#define TRB_SIZE (sizeof(struct cdns2_trb))
+/*
+ * These two extra TRBs are reserved for isochronous transfer
+ * to inject 0 length packet and extra LINK TRB to synchronize the ISO transfer.
+ */
+#define TRB_ISO_RESERVED 2
+#define TR_SEG_SIZE (TRB_SIZE * (TRBS_PER_SEGMENT + TRB_ISO_RESERVED))
+
+/* TRB bit mask. */
+#define TRB_TYPE_BITMASK GENMASK(15, 10)
+#define TRB_TYPE(p) ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
+
+/* TRB type IDs. */
+/* Used for Bulk, Interrupt, ISOC, and control data stage. */
+#define TRB_NORMAL 1
+/* TRB for linking ring segments. */
+#define TRB_LINK 6
+
+/* Cycle bit - indicates TRB ownership by driver or hw. */
+#define TRB_CYCLE BIT(0)
+/*
+ * When set to '1', the device will toggle its interpretation of the Cycle bit.
+ */
+#define TRB_TOGGLE BIT(1)
+/* Interrupt on short packet. */
+#define TRB_ISP BIT(2)
+/* Chain bit associate this TRB with next one TRB. */
+#define TRB_CHAIN BIT(4)
+/* Interrupt on completion. */
+#define TRB_IOC BIT(5)
+
+/* Transfer_len bitmasks. */
+#define TRB_LEN(p) ((p) & GENMASK(16, 0))
+#define TRB_BURST(p) (((p) << 24) & GENMASK(31, 24))
+#define TRB_FIELD_TO_BURST(p) (((p) & GENMASK(31, 24)) >> 24)
+
+/* Data buffer pointer bitmasks. */
+#define TRB_BUFFER(p) ((p) & GENMASK(31, 0))
+
+/*-------------------------------------------------------------------------*/
+/* Driver numeric constants. */
+
+/* Maximum address that can be assigned to device. */
+#define USB_DEVICE_MAX_ADDRESS 127
+
+/* One control and 15 IN and 15 OUT endpoints. */
+#define CDNS2_ENDPOINTS_NUM 31
+
+#define CDNS2_EP_ZLP_BUF_SIZE 512
+
+/*-------------------------------------------------------------------------*/
+/* Used structures. */
+
+struct cdns2_device;
+
+/**
+ * struct cdns2_ring - transfer ring representation.
+ * @trbs: pointer to transfer ring.
+ * @dma: dma address of transfer ring.
+ * @free_trbs: number of free TRBs in transfer ring.
+ * @pcs: producer cycle state.
+ * @ccs: consumer cycle state.
+ * @enqueue: enqueue index in transfer ring.
+ * @dequeue: dequeue index in transfer ring.
+ */
+struct cdns2_ring {
+ struct cdns2_trb *trbs;
+ dma_addr_t dma;
+ int free_trbs;
+ u8 pcs;
+ u8 ccs;
+ int enqueue;
+ int dequeue;
+};
+
+/**
+ * struct cdns2_endpoint - extended device side representation of USB endpoint.
+ * @endpoint: usb endpoint.
+ * @pending_list: list of requests queuing on transfer ring.
+ * @deferred_list: list of requests waiting for queuing on transfer ring.
+ * @pdev: device associated with endpoint.
+ * @name: a human readable name e.g. ep1out.
+ * @ring: transfer ring associated with endpoint.
+ * @ep_state: state of endpoint.
+ * @idx: index of endpoint in pdev->eps table.
+ * @dir: endpoint direction.
+ * @num: endpoint number (1 - 15).
+ * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK.
+ * @interval: interval between packets used for ISOC and Interrupt endpoint.
+ * @buffering: on-chip buffers assigned to endpoint.
+ * @trb_burst_size: number of burst used in TRB.
+ * @skip: Sometimes the controller cannot process isochronous endpoint ring
+ * quickly enough and it will miss some isoc tds on the ring and
+ * generate ISO transmition error.
+ * Driver sets skip flag when receive a ISO transmition error and
+ * process the missed TDs on the endpoint ring.
+ * @wa1_set: use WA1.
+ * @wa1_trb: TRB assigned to WA1.
+ * @wa1_trb_index: TRB index for WA1.
+ * @wa1_cycle_bit: correct cycle bit for WA1.
+ */
+struct cdns2_endpoint {
+ struct usb_ep endpoint;
+ struct list_head pending_list;
+ struct list_head deferred_list;
+
+ struct cdns2_device *pdev;
+ char name[20];
+
+ struct cdns2_ring ring;
+
+#define EP_ENABLED BIT(0)
+#define EP_STALLED BIT(1)
+#define EP_STALL_PENDING BIT(2)
+#define EP_WEDGE BIT(3)
+#define EP_CLAIMED BIT(4)
+#define EP_RING_FULL BIT(5)
+#define EP_DEFERRED_DRDY BIT(6)
+
+ u32 ep_state;
+
+ u8 idx;
+ u8 dir;
+ u8 num;
+ u8 type;
+ int interval;
+ u8 buffering;
+ u8 trb_burst_size;
+ bool skip;
+
+ unsigned int wa1_set:1;
+ struct cdns2_trb *wa1_trb;
+ unsigned int wa1_trb_index;
+ unsigned int wa1_cycle_bit:1;
+};
+
+/**
+ * struct cdns2_request - extended device side representation of usb_request
+ * object.
+ * @request: generic usb_request object describing single I/O request.
+ * @pep: extended representation of usb_ep object.
+ * @trb: the first TRB association with this request.
+ * @start_trb: number of the first TRB in transfer ring.
+ * @end_trb: number of the last TRB in transfer ring.
+ * @list: used for queuing request in lists.
+ * @finished_trb: number of trb has already finished per request.
+ * @num_of_trb: how many trbs are associated with request.
+ */
+struct cdns2_request {
+ struct usb_request request;
+ struct cdns2_endpoint *pep;
+ struct cdns2_trb *trb;
+ int start_trb;
+ int end_trb;
+ struct list_head list;
+ int finished_trb;
+ int num_of_trb;
+};
+
+#define to_cdns2_request(r) (container_of(r, struct cdns2_request, request))
+
+/* Stages used during enumeration process.*/
+#define CDNS2_SETUP_STAGE 0x0
+#define CDNS2_DATA_STAGE 0x1
+#define CDNS2_STATUS_STAGE 0x2
+
+/**
+ * struct cdns2_device - represent USB device.
+ * @dev: pointer to device structure associated whit this controller.
+ * @gadget: device side representation of the peripheral controller.
+ * @gadget_driver: pointer to the gadget driver.
+ * @lock: for synchronizing.
+ * @irq: interrupt line number.
+ * @regs: base address for registers
+ * @usb_regs: base address for common USB registers.
+ * @ep0_regs: base address for endpoint 0 related registers.
+ * @epx_regs: base address for all none control endpoint registers.
+ * @interrupt_regs: base address for interrupt handling related registers.
+ * @adma_regs: base address for ADMA registers.
+ * @eps_dma_pool: endpoint Transfer Ring pool.
+ * @setup: used while processing usb control requests.
+ * @ep0_preq: private request used while handling EP0.
+ * @ep0_stage: ep0 stage during enumeration process.
+ * @zlp_buf: zlp buffer.
+ * @dev_address: device address assigned by host.
+ * @eps: array of objects describing endpoints.
+ * @selected_ep: actually selected endpoint. It's used only to improve
+ * performance by limiting access to dma_ep_sel register.
+ * @is_selfpowered: device is self powered.
+ * @may_wakeup: allows device to remote wakeup the host.
+ * @status_completion_no_call: indicate that driver is waiting for status
+ * stage completion. It's used in deferred SET_CONFIGURATION request.
+ * @in_lpm: indicate the controller is in low power mode.
+ * @pending_status_wq: workqueue handling status stage for deferred requests.
+ * @pending_status_request: request for which status stage was deferred.
+ * @eps_supported: endpoints supported by controller in form:
+ * bit: 0 - ep0, 1 - epOut1, 2 - epIn1, 3 - epOut2 ...
+ * @burst_opt: array with the best burst size value for different TRB size.
+ * @onchip_tx_buf: size of transmit on-chip buffer in KB.
+ * @onchip_rx_buf: size of receive on-chip buffer in KB.
+ */
+struct cdns2_device {
+ struct device *dev;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+
+ /* generic spin-lock for drivers */
+ spinlock_t lock;
+ int irq;
+ void __iomem *regs;
+ struct cdns2_usb_regs __iomem *usb_regs;
+ struct cdns2_ep0_regs __iomem *ep0_regs;
+ struct cdns2_epx_regs __iomem *epx_regs;
+ struct cdns2_interrupt_regs __iomem *interrupt_regs;
+ struct cdns2_adma_regs __iomem *adma_regs;
+ struct dma_pool *eps_dma_pool;
+ struct usb_ctrlrequest setup;
+ struct cdns2_request ep0_preq;
+ u8 ep0_stage;
+ void *zlp_buf;
+ u8 dev_address;
+ struct cdns2_endpoint eps[CDNS2_ENDPOINTS_NUM];
+ u32 selected_ep;
+ bool is_selfpowered;
+ bool may_wakeup;
+ bool status_completion_no_call;
+ bool in_lpm;
+ struct work_struct pending_status_wq;
+ struct usb_request *pending_status_request;
+ u32 eps_supported;
+ u8 burst_opt[MAX_ISO_SIZE + 1];
+
+ /*in KB */
+ u16 onchip_tx_buf;
+ u16 onchip_rx_buf;
+};
+
+#define CDNS2_IF_EP_EXIST(pdev, ep_num, dir) \
+ ((pdev)->eps_supported & \
+ (BIT(ep_num) << ((dir) ? 0 : 16)))
+
+dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
+ struct cdns2_trb *trb);
+void cdns2_pending_setup_status_handler(struct work_struct *work);
+void cdns2_select_ep(struct cdns2_device *pdev, u32 ep);
+struct cdns2_request *cdns2_next_preq(struct list_head *list);
+struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags);
+void cdns2_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request);
+int cdns2_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request);
+void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
+ struct cdns2_request *priv_req,
+ int status);
+void cdns2_init_ep0(struct cdns2_device *pdev, struct cdns2_endpoint *pep);
+void cdns2_ep0_config(struct cdns2_device *pdev);
+void cdns2_handle_ep0_interrupt(struct cdns2_device *pdev, int dir);
+void cdns2_handle_setup_packet(struct cdns2_device *pdev);
+int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated);
+int cdns2_gadget_suspend(struct cdns2_device *pdev);
+void cdns2_gadget_remove(struct cdns2_device *pdev);
+int cdns2_gadget_init(struct cdns2_device *pdev);
+void set_reg_bit_8(void __iomem *ptr, u8 mask);
+int cdns2_halt_endpoint(struct cdns2_device *pdev, struct cdns2_endpoint *pep,
+ int value);
+
+#endif /* __LINUX_CDNS2_GADGET */
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
new file mode 100644
index 000000000000..1691541c9413
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBHS-DEV controller - PCI Glue driver.
+ *
+ * Copyright (C) 2023 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include "cdns2-gadget.h"
+
+#define PCI_DRIVER_NAME "cdns-pci-usbhs"
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0x0120
+#define PCI_BAR_DEV 0
+#define PCI_DEV_FN_DEVICE 0
+
+static int cdns2_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ resource_size_t rsrc_start, rsrc_len;
+ struct device *dev = &pdev->dev;
+ struct cdns2_device *priv_dev;
+ struct resource *res;
+ int ret;
+
+ /* For GADGET PCI (devfn) function number is 0. */
+ if (!id || pdev->devfn != PCI_DEV_FN_DEVICE ||
+ pdev->class != PCI_CLASS_SERIAL_USB_DEVICE)
+ return -EINVAL;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ priv_dev = devm_kzalloc(&pdev->dev, sizeof(*priv_dev), GFP_KERNEL);
+ if (!priv_dev)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Initialize resources\n");
+ rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
+ rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
+
+ res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
+ if (!res) {
+ dev_dbg(dev, "controller already in use\n");
+ return -EBUSY;
+ }
+
+ priv_dev->regs = devm_ioremap(dev, rsrc_start, rsrc_len);
+ if (!priv_dev->regs) {
+ dev_dbg(dev, "error mapping memory\n");
+ return -EFAULT;
+ }
+
+ priv_dev->irq = pdev->irq;
+ dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
+ &rsrc_start);
+
+ priv_dev->dev = dev;
+
+ priv_dev->eps_supported = 0x000f000f;
+ priv_dev->onchip_tx_buf = 16;
+ priv_dev->onchip_rx_buf = 16;
+
+ ret = cdns2_gadget_init(priv_dev);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, priv_dev);
+
+ device_wakeup_enable(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static void cdns2_pci_remove(struct pci_dev *pdev)
+{
+ struct cdns2_device *priv_dev = pci_get_drvdata(pdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ cdns2_gadget_remove(priv_dev);
+}
+
+static int cdns2_pci_suspend(struct device *dev)
+{
+ struct cdns2_device *priv_dev = dev_get_drvdata(dev);
+
+ return cdns2_gadget_suspend(priv_dev);
+}
+
+static int cdns2_pci_resume(struct device *dev)
+{
+ struct cdns2_device *priv_dev = dev_get_drvdata(dev);
+
+ return cdns2_gadget_resume(priv_dev, 1);
+}
+
+static const struct dev_pm_ops cdns2_pci_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(cdns2_pci_suspend, cdns2_pci_resume)
+};
+
+static const struct pci_device_id cdns2_pci_ids[] = {
+ { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ { 0, }
+};
+
+static struct pci_driver cdns2_pci_driver = {
+ .name = "cdns2-pci",
+ .id_table = &cdns2_pci_ids[0],
+ .probe = cdns2_pci_probe,
+ .remove = cdns2_pci_remove,
+ .driver = {
+ .pm = pm_ptr(&cdns2_pci_pm_ops),
+ }
+};
+
+module_pci_driver(cdns2_pci_driver);
+MODULE_DEVICE_TABLE(pci, cdns2_pci_ids);
+
+MODULE_ALIAS("pci:cdns2");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence CDNS2 PCI driver");
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-trace.c b/drivers/usb/gadget/udc/cdns2/cdns2-trace.c
new file mode 100644
index 000000000000..de6b8cc3d071
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-trace.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USBHS device controller driver Trace Support
+ *
+ * Copyright (C) 2023 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "cdns2-trace.h"
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-trace.h b/drivers/usb/gadget/udc/cdns2/cdns2-trace.h
new file mode 100644
index 000000000000..61f241634ea5
--- /dev/null
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-trace.h
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USBHS-DEV device controller driver.
+ * Trace support header file.
+ *
+ * Copyright (C) 2023 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cdns2-dev
+
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR cdns2_dev
+
+#if !defined(__LINUX_CDNS2_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __LINUX_CDNS2_TRACE
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/ch9.h>
+#include "cdns2-gadget.h"
+#include "cdns2-debug.h"
+
+#define CDNS2_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(cdns2_log_enable_disable,
+ TP_PROTO(int set),
+ TP_ARGS(set),
+ TP_STRUCT__entry(
+ __field(int, set)
+ ),
+ TP_fast_assign(
+ __entry->set = set;
+ ),
+ TP_printk("%s", __entry->set ? "enabled" : "disabled")
+);
+
+DEFINE_EVENT(cdns2_log_enable_disable, cdns2_pullup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdns2_log_enable_disable, cdns2_lpm,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdns2_log_enable_disable, cdns2_may_wakeup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_simple,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(text, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(text, msg);
+ ),
+ TP_printk("%s", __get_str(text))
+);
+
+DEFINE_EVENT(cdns2_log_simple, cdns2_no_room_on_ring,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdns2_log_simple, cdns2_ep0_status_stage,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdns2_log_simple, cdns2_ep0_set_config,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdns2_log_simple, cdns2_ep0_setup,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdns2_log_simple, cdns2_device_state,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+TRACE_EVENT(cdns2_ep_halt,
+ TP_PROTO(struct cdns2_endpoint *ep_priv, u8 halt, u8 flush),
+ TP_ARGS(ep_priv, halt, flush),
+ TP_STRUCT__entry(
+ __string(name, ep_priv->name)
+ __field(u8, halt)
+ __field(u8, flush)
+ ),
+ TP_fast_assign(
+ __assign_str(name, ep_priv->name);
+ __entry->halt = halt;
+ __entry->flush = flush;
+ ),
+ TP_printk("Halt %s for %s: %s", __entry->flush ? " and flush" : "",
+ __get_str(name), __entry->halt ? "set" : "cleared")
+);
+
+TRACE_EVENT(cdns2_wa1,
+ TP_PROTO(struct cdns2_endpoint *ep_priv, char *msg),
+ TP_ARGS(ep_priv, msg),
+ TP_STRUCT__entry(
+ __string(ep_name, ep_priv->name)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, ep_priv->name);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("WA1: %s %s", __get_str(ep_name), __get_str(msg))
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_doorbell,
+ TP_PROTO(struct cdns2_endpoint *pep, u32 ep_trbaddr),
+ TP_ARGS(pep, ep_trbaddr),
+ TP_STRUCT__entry(
+ __string(name, pep->num ? pep->name :
+ (pep->dir ? "ep0in" : "ep0out"))
+ __field(u32, ep_trbaddr)
+ ),
+ TP_fast_assign(
+ __assign_str(name, pep->name);
+ __entry->ep_trbaddr = ep_trbaddr;
+ ),
+ TP_printk("%s, ep_trbaddr %08x", __get_str(name),
+ __entry->ep_trbaddr)
+);
+
+DEFINE_EVENT(cdns2_log_doorbell, cdns2_doorbell_ep0,
+ TP_PROTO(struct cdns2_endpoint *pep, u32 ep_trbaddr),
+ TP_ARGS(pep, ep_trbaddr)
+);
+
+DEFINE_EVENT(cdns2_log_doorbell, cdns2_doorbell_epx,
+ TP_PROTO(struct cdns2_endpoint *pep, u32 ep_trbaddr),
+ TP_ARGS(pep, ep_trbaddr)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_usb_irq,
+ TP_PROTO(u32 usb_irq, u32 ext_irq),
+ TP_ARGS(usb_irq, ext_irq),
+ TP_STRUCT__entry(
+ __field(u32, usb_irq)
+ __field(u32, ext_irq)
+ ),
+ TP_fast_assign(
+ __entry->usb_irq = usb_irq;
+ __entry->ext_irq = ext_irq;
+ ),
+ TP_printk("%s", cdns2_decode_usb_irq(__get_buf(CDNS2_MSG_MAX),
+ CDNS2_MSG_MAX,
+ __entry->usb_irq,
+ __entry->ext_irq))
+);
+
+DEFINE_EVENT(cdns2_log_usb_irq, cdns2_usb_irq,
+ TP_PROTO(u32 usb_irq, u32 ext_irq),
+ TP_ARGS(usb_irq, ext_irq)
+);
+
+TRACE_EVENT(cdns2_dma_ep_ists,
+ TP_PROTO(u32 dma_ep_ists),
+ TP_ARGS(dma_ep_ists),
+ TP_STRUCT__entry(
+ __field(u32, dma_ep_ists)
+ ),
+ TP_fast_assign(
+ __entry->dma_ep_ists = dma_ep_ists;
+ ),
+ TP_printk("OUT: 0x%04x, IN: 0x%04x", (u16)__entry->dma_ep_ists,
+ __entry->dma_ep_ists >> 16)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_epx_irq,
+ TP_PROTO(struct cdns2_device *pdev, struct cdns2_endpoint *pep),
+ TP_ARGS(pdev, pep),
+ TP_STRUCT__entry(
+ __string(ep_name, pep->name)
+ __field(u32, ep_sts)
+ __field(u32, ep_ists)
+ __field(u32, ep_traddr)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, pep->name);
+ __entry->ep_sts = readl(&pdev->adma_regs->ep_sts);
+ __entry->ep_ists = readl(&pdev->adma_regs->ep_ists);
+ __entry->ep_traddr = readl(&pdev->adma_regs->ep_traddr);
+ ),
+ TP_printk("%s, ep_traddr: %08x",
+ cdns2_decode_epx_irq(__get_buf(CDNS2_MSG_MAX), CDNS2_MSG_MAX,
+ __get_str(ep_name),
+ __entry->ep_ists, __entry->ep_sts),
+ __entry->ep_traddr)
+);
+
+DEFINE_EVENT(cdns2_log_epx_irq, cdns2_epx_irq,
+ TP_PROTO(struct cdns2_device *pdev, struct cdns2_endpoint *pep),
+ TP_ARGS(pdev, pep)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_ep0_irq,
+ TP_PROTO(struct cdns2_device *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(
+ __field(int, ep_dir)
+ __field(u32, ep_ists)
+ __field(u32, ep_sts)
+ ),
+ TP_fast_assign(
+ __entry->ep_dir = pdev->selected_ep;
+ __entry->ep_ists = readl(&pdev->adma_regs->ep_ists);
+ __entry->ep_sts = readl(&pdev->adma_regs->ep_sts);
+ ),
+ TP_printk("%s", cdns2_decode_ep0_irq(__get_buf(CDNS2_MSG_MAX),
+ CDNS2_MSG_MAX,
+ __entry->ep_ists, __entry->ep_sts,
+ __entry->ep_dir))
+);
+
+DEFINE_EVENT(cdns2_log_ep0_irq, cdns2_ep0_irq,
+ TP_PROTO(struct cdns2_device *pdev),
+ TP_ARGS(pdev)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_ctrl,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl),
+ TP_STRUCT__entry(
+ __field(u8, bRequestType)
+ __field(u8, bRequest)
+ __field(u16, wValue)
+ __field(u16, wIndex)
+ __field(u16, wLength)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = ctrl->bRequestType;
+ __entry->bRequest = ctrl->bRequest;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
+ ),
+ TP_printk("%s", usb_decode_ctrl(__get_buf(CDNS2_MSG_MAX), CDNS2_MSG_MAX,
+ __entry->bRequestType,
+ __entry->bRequest, __entry->wValue,
+ __entry->wIndex, __entry->wLength)
+ )
+);
+
+DEFINE_EVENT(cdns2_log_ctrl, cdns2_ctrl_req,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_request,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq),
+ TP_STRUCT__entry(
+ __string(name, preq->pep->name)
+ __field(struct usb_request *, request)
+ __field(struct cdns2_request *, preq)
+ __field(void *, buf)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(dma_addr_t, dma)
+ __field(int, zero)
+ __field(int, short_not_ok)
+ __field(int, no_interrupt)
+ __field(struct scatterlist*, sg)
+ __field(unsigned int, num_sgs)
+ __field(unsigned int, num_mapped_sgs)
+ __field(int, start_trb)
+ __field(int, end_trb)
+ ),
+ TP_fast_assign(
+ __assign_str(name, preq->pep->name);
+ __entry->request = &preq->request;
+ __entry->preq = preq;
+ __entry->buf = preq->request.buf;
+ __entry->actual = preq->request.actual;
+ __entry->length = preq->request.length;
+ __entry->status = preq->request.status;
+ __entry->dma = preq->request.dma;
+ __entry->zero = preq->request.zero;
+ __entry->short_not_ok = preq->request.short_not_ok;
+ __entry->no_interrupt = preq->request.no_interrupt;
+ __entry->sg = preq->request.sg;
+ __entry->num_sgs = preq->request.num_sgs;
+ __entry->num_mapped_sgs = preq->request.num_mapped_sgs;
+ __entry->start_trb = preq->start_trb;
+ __entry->end_trb = preq->end_trb;
+ ),
+ TP_printk("%s: req: %p, preq: %p, req buf: %p, length: %u/%u, status: %d,"
+ "buf dma: (%pad), %s%s%s, sg: %p, num_sgs: %d, num_m_sgs: %d,"
+ "trb: [start: %d, end: %d]",
+ __get_str(name), __entry->request, __entry->preq,
+ __entry->buf, __entry->actual, __entry->length,
+ __entry->status, &__entry->dma,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs,
+ __entry->start_trb,
+ __entry->end_trb
+ )
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_request_enqueue,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_request_enqueue_error,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_alloc_request,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_free_request,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_ep_queue,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_request_dequeue,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+DEFINE_EVENT(cdns2_log_request, cdns2_request_giveback,
+ TP_PROTO(struct cdns2_request *preq),
+ TP_ARGS(preq)
+);
+
+TRACE_EVENT(cdns2_ep0_enqueue,
+ TP_PROTO(struct cdns2_device *dev_priv, struct usb_request *request),
+ TP_ARGS(dev_priv, request),
+ TP_STRUCT__entry(
+ __field(int, dir)
+ __field(int, length)
+ ),
+ TP_fast_assign(
+ __entry->dir = dev_priv->eps[0].dir;
+ __entry->length = request->length;
+ ),
+ TP_printk("Queue to ep0%s length: %u", __entry->dir ? "in" : "out",
+ __entry->length)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_map_request,
+ TP_PROTO(struct cdns2_request *priv_req),
+ TP_ARGS(priv_req),
+ TP_STRUCT__entry(
+ __string(name, priv_req->pep->name)
+ __field(struct usb_request *, req)
+ __field(void *, buf)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_req->pep->name);
+ __entry->req = &priv_req->request;
+ __entry->buf = priv_req->request.buf;
+ __entry->dma = priv_req->request.dma;
+ ),
+ TP_printk("%s: req: %p, req buf %p, dma %p",
+ __get_str(name), __entry->req, __entry->buf, &__entry->dma
+ )
+);
+
+DEFINE_EVENT(cdns2_log_map_request, cdns2_map_request,
+ TP_PROTO(struct cdns2_request *req),
+ TP_ARGS(req)
+);
+DEFINE_EVENT(cdns2_log_map_request, cdns2_mapped_request,
+ TP_PROTO(struct cdns2_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_trb,
+ TP_PROTO(struct cdns2_endpoint *pep, struct cdns2_trb *trb),
+ TP_ARGS(pep, trb),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(struct cdns2_trb *, trb)
+ __field(u32, buffer)
+ __field(u32, length)
+ __field(u32, control)
+ __field(u32, type)
+ ),
+ TP_fast_assign(
+ __assign_str(name, pep->name);
+ __entry->trb = trb;
+ __entry->buffer = le32_to_cpu(trb->buffer);
+ __entry->length = le32_to_cpu(trb->length);
+ __entry->control = le32_to_cpu(trb->control);
+ __entry->type = usb_endpoint_type(pep->endpoint.desc);
+ ),
+ TP_printk("%s: trb V: %p, dma buf: P: 0x%08x, %s",
+ __get_str(name), __entry->trb, __entry->buffer,
+ cdns2_decode_trb(__get_buf(CDNS2_MSG_MAX), CDNS2_MSG_MAX,
+ __entry->control, __entry->length,
+ __entry->buffer))
+);
+
+DEFINE_EVENT(cdns2_log_trb, cdns2_queue_trb,
+ TP_PROTO(struct cdns2_endpoint *pep, struct cdns2_trb *trb),
+ TP_ARGS(pep, trb)
+);
+
+DEFINE_EVENT(cdns2_log_trb, cdns2_complete_trb,
+ TP_PROTO(struct cdns2_endpoint *pep, struct cdns2_trb *trb),
+ TP_ARGS(pep, trb)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_ring,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep),
+ TP_STRUCT__entry(
+ __dynamic_array(u8, tr_seg, TR_SEG_SIZE)
+ __dynamic_array(u8, pep, sizeof(struct cdns2_endpoint))
+ __dynamic_array(char, buffer,
+ (TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(pep), pep,
+ sizeof(struct cdns2_endpoint));
+ memcpy(__get_dynamic_array(tr_seg), pep->ring.trbs,
+ TR_SEG_SIZE);
+ ),
+
+ TP_printk("%s",
+ cdns2_raw_ring((struct cdns2_endpoint *)__get_str(pep),
+ (struct cdns2_trb *)__get_str(tr_seg),
+ __get_str(buffer),
+ (TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX))
+);
+
+DEFINE_EVENT(cdns2_log_ring, cdns2_ring,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_ep,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(unsigned int, maxpacket)
+ __field(unsigned int, maxpacket_limit)
+ __field(unsigned int, flags)
+ __field(unsigned int, dir)
+ __field(u8, enqueue)
+ __field(u8, dequeue)
+ ),
+ TP_fast_assign(
+ __assign_str(name, pep->name);
+ __entry->maxpacket = pep->endpoint.maxpacket;
+ __entry->maxpacket_limit = pep->endpoint.maxpacket_limit;
+ __entry->flags = pep->ep_state;
+ __entry->dir = pep->dir;
+ __entry->enqueue = pep->ring.enqueue;
+ __entry->dequeue = pep->ring.dequeue;
+ ),
+ TP_printk("%s: mps: %d/%d, enq idx: %d, deq idx: %d, "
+ "flags: %s%s%s%s, dir: %s",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->enqueue,
+ __entry->dequeue,
+ __entry->flags & EP_ENABLED ? "EN | " : "",
+ __entry->flags & EP_STALLED ? "STALLED | " : "",
+ __entry->flags & EP_WEDGE ? "WEDGE | " : "",
+ __entry->flags & EP_RING_FULL ? "RING FULL |" : "",
+ __entry->dir ? "IN" : "OUT"
+ )
+);
+
+DEFINE_EVENT(cdns2_log_ep, cdns2_gadget_ep_enable,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep)
+);
+
+DEFINE_EVENT(cdns2_log_ep, cdns2_gadget_ep_disable,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep)
+);
+
+DEFINE_EVENT(cdns2_log_ep, cdns2_iso_out_ep_disable,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep)
+);
+
+DEFINE_EVENT(cdns2_log_ep, cdns2_ep_busy_try_halt_again,
+ TP_PROTO(struct cdns2_endpoint *pep),
+ TP_ARGS(pep)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_request_handled,
+ TP_PROTO(struct cdns2_request *priv_req, int current_index,
+ int handled),
+ TP_ARGS(priv_req, current_index, handled),
+ TP_STRUCT__entry(
+ __field(struct cdns2_request *, priv_req)
+ __field(unsigned int, dma_position)
+ __field(unsigned int, handled)
+ __field(unsigned int, dequeue_idx)
+ __field(unsigned int, enqueue_idx)
+ __field(unsigned int, start_trb)
+ __field(unsigned int, end_trb)
+ ),
+ TP_fast_assign(
+ __entry->priv_req = priv_req;
+ __entry->dma_position = current_index;
+ __entry->handled = handled;
+ __entry->dequeue_idx = priv_req->pep->ring.dequeue;
+ __entry->enqueue_idx = priv_req->pep->ring.enqueue;
+ __entry->start_trb = priv_req->start_trb;
+ __entry->end_trb = priv_req->end_trb;
+ ),
+ TP_printk("Req: %p %s, DMA pos: %d, ep deq: %d, ep enq: %d,"
+ " start trb: %d, end trb: %d",
+ __entry->priv_req,
+ __entry->handled ? "handled" : "not handled",
+ __entry->dma_position, __entry->dequeue_idx,
+ __entry->enqueue_idx, __entry->start_trb,
+ __entry->end_trb
+ )
+);
+
+DEFINE_EVENT(cdns2_log_request_handled, cdns2_request_handled,
+ TP_PROTO(struct cdns2_request *priv_req, int current_index,
+ int handled),
+ TP_ARGS(priv_req, current_index, handled)
+);
+
+DECLARE_EVENT_CLASS(cdns2_log_epx_reg_config,
+ TP_PROTO(struct cdns2_device *pdev, struct cdns2_endpoint *pep),
+ TP_ARGS(pdev, pep),
+ TP_STRUCT__entry(
+ __string(ep_name, pep->name)
+ __field(u8, burst_size)
+ __field(__le16, maxpack_reg)
+ __field(__u8, con_reg)
+ __field(u32, ep_sel_reg)
+ __field(u32, ep_sts_en_reg)
+ __field(u32, ep_cfg_reg)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, pep->name);
+ __entry->burst_size = pep->trb_burst_size;
+ __entry->maxpack_reg = pep->dir ? readw(&pdev->epx_regs->txmaxpack[pep->num - 1]) :
+ readw(&pdev->epx_regs->rxmaxpack[pep->num - 1]);
+ __entry->con_reg = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcon) :
+ readb(&pdev->epx_regs->ep[pep->num - 1].rxcon);
+ __entry->ep_sel_reg = readl(&pdev->adma_regs->ep_sel);
+ __entry->ep_sts_en_reg = readl(&pdev->adma_regs->ep_sts_en);
+ __entry->ep_cfg_reg = readl(&pdev->adma_regs->ep_cfg);
+ ),
+
+ TP_printk("%s, maxpack: %d, con: %02x, dma_ep_sel: %08x, dma_ep_sts_en: %08x"
+ " dma_ep_cfg %08x",
+ __get_str(ep_name), __entry->maxpack_reg, __entry->con_reg,
+ __entry->ep_sel_reg, __entry->ep_sts_en_reg,
+ __entry->ep_cfg_reg
+ )
+);
+
+DEFINE_EVENT(cdns2_log_epx_reg_config, cdns2_epx_hw_cfg,
+ TP_PROTO(struct cdns2_device *pdev, struct cdns2_endpoint *pep),
+ TP_ARGS(pdev, pep)
+);
+
+#endif /* __LINUX_CDNS2_TRACE */
+
+/* This part must be outside header guard. */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cdns2-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 83fd1de14784..59188ea065e0 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -61,7 +61,7 @@ struct usb_udc {
struct mutex connect_lock;
};
-static struct class *udc_class;
+static const struct class udc_class;
static LIST_HEAD(udc_list);
/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
@@ -1378,7 +1378,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
- udc->dev.class = udc_class;
+ udc->dev.class = &udc_class;
udc->dev.groups = usb_udc_attr_groups;
udc->dev.parent = gadget->dev.parent;
ret = dev_set_name(&udc->dev, "%s",
@@ -1856,6 +1856,11 @@ static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static const struct class udc_class = {
+ .name = "udc",
+ .dev_uevent = usb_udc_uevent,
+};
+
static const struct bus_type gadget_bus_type = {
.name = "gadget",
.probe = gadget_bind_driver,
@@ -1867,18 +1872,13 @@ static int __init usb_udc_init(void)
{
int rc;
- udc_class = class_create("udc");
- if (IS_ERR(udc_class)) {
- pr_err("failed to create udc class --> %ld\n",
- PTR_ERR(udc_class));
- return PTR_ERR(udc_class);
- }
-
- udc_class->dev_uevent = usb_udc_uevent;
+ rc = class_register(&udc_class);
+ if (rc)
+ return rc;
rc = bus_register(&gadget_bus_type);
if (rc)
- class_destroy(udc_class);
+ class_unregister(&udc_class);
return rc;
}
subsys_initcall(usb_udc_init);
@@ -1886,7 +1886,7 @@ subsys_initcall(usb_udc_init);
static void __exit usb_udc_exit(void)
{
bus_unregister(&gadget_bus_type);
- class_destroy(udc_class);
+ class_unregister(&udc_class);
}
module_exit(usb_udc_exit);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 899ac9f9c279..0953e1b5c030 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1108,13 +1108,12 @@ err_udc:
return rc;
}
-static int dummy_udc_remove(struct platform_device *pdev)
+static void dummy_udc_remove(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
device_remove_file(&dum->gadget.dev, &dev_attr_function);
usb_del_gadget_udc(&dum->gadget);
- return 0;
}
static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
@@ -1150,7 +1149,7 @@ static int dummy_udc_resume(struct platform_device *pdev)
static struct platform_driver dummy_udc_driver = {
.probe = dummy_udc_probe,
- .remove = dummy_udc_remove,
+ .remove_new = dummy_udc_remove,
.suspend = dummy_udc_suspend,
.resume = dummy_udc_resume,
.driver = {
@@ -2701,7 +2700,7 @@ put_usb2_hcd:
return retval;
}
-static int dummy_hcd_remove(struct platform_device *pdev)
+static void dummy_hcd_remove(struct platform_device *pdev)
{
struct dummy *dum;
@@ -2717,8 +2716,6 @@ static int dummy_hcd_remove(struct platform_device *pdev)
dum->hs_hcd = NULL;
dum->ss_hcd = NULL;
-
- return 0;
}
static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state)
@@ -2753,7 +2750,7 @@ static int dummy_hcd_resume(struct platform_device *pdev)
static struct platform_driver dummy_hcd_driver = {
.probe = dummy_hcd_probe,
- .remove = dummy_hcd_remove,
+ .remove_new = dummy_hcd_remove,
.suspend = dummy_hcd_suspend,
.resume = dummy_hcd_resume,
.driver = {
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 3b1cc8fa30c8..9c5dc1c1a68e 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2628,7 +2628,7 @@ static int qe_udc_resume(struct platform_device *dev)
}
#endif
-static int qe_udc_remove(struct platform_device *ofdev)
+static void qe_udc_remove(struct platform_device *ofdev)
{
struct qe_udc *udc = platform_get_drvdata(ofdev);
struct qe_ep *ep;
@@ -2679,8 +2679,6 @@ static int qe_udc_remove(struct platform_device *ofdev)
/* wait for release() of gadget.dev to free udc */
wait_for_completion(&done);
-
- return 0;
}
/*-------------------------------------------------------------------------*/
@@ -2708,7 +2706,7 @@ static struct platform_driver udc_driver = {
.of_match_table = qe_udc_match,
},
.probe = qe_udc_probe,
- .remove = qe_udc_remove,
+ .remove_new = qe_udc_remove,
#ifdef CONFIG_PM
.suspend = qe_udc_suspend,
.resume = qe_udc_resume,
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 08ba9c8c1e67..bd03d475f927 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1338,7 +1338,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
.udc_stop = fusb300_udc_stop,
};
-static int fusb300_remove(struct platform_device *pdev)
+static void fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
int i;
@@ -1352,8 +1352,6 @@ static int fusb300_remove(struct platform_device *pdev)
for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
kfree(fusb300->ep[i]);
kfree(fusb300);
-
- return 0;
}
static int fusb300_probe(struct platform_device *pdev)
@@ -1508,7 +1506,7 @@ clean_up:
}
static struct platform_driver fusb300_driver = {
- .remove = fusb300_remove,
+ .remove_new = fusb300_remove,
.driver = {
.name = udc_name,
},
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 06e21cee431b..e05f45a4b56b 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1512,7 +1512,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = {
.pullup = m66592_pullup,
};
-static int m66592_remove(struct platform_device *pdev)
+static void m66592_remove(struct platform_device *pdev)
{
struct m66592 *m66592 = platform_get_drvdata(pdev);
@@ -1527,7 +1527,6 @@ static int m66592_remove(struct platform_device *pdev)
clk_put(m66592->clk);
}
kfree(m66592);
- return 0;
}
static void nop_completion(struct usb_ep *ep, struct usb_request *r)
@@ -1688,7 +1687,7 @@ clean_up:
/*-------------------------------------------------------------------------*/
static struct platform_driver m66592_driver = {
- .remove = m66592_remove,
+ .remove_new = m66592_remove,
.driver = {
.name = udc_name,
},
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 411b6179782c..3473048a85f5 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1746,7 +1746,7 @@ static irqreturn_t mv_u3d_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int mv_u3d_remove(struct platform_device *dev)
+static void mv_u3d_remove(struct platform_device *dev)
{
struct mv_u3d *u3d = platform_get_drvdata(dev);
@@ -1775,8 +1775,6 @@ static int mv_u3d_remove(struct platform_device *dev)
clk_put(u3d->clk);
kfree(u3d);
-
- return 0;
}
static int mv_u3d_probe(struct platform_device *dev)
@@ -2049,7 +2047,7 @@ static void mv_u3d_shutdown(struct platform_device *dev)
static struct platform_driver mv_u3d_driver = {
.probe = mv_u3d_probe,
- .remove = mv_u3d_remove,
+ .remove_new = mv_u3d_remove,
.shutdown = mv_u3d_shutdown,
.driver = {
.name = "mv-u3d",
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 08474c08d874..79db74e2040b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2077,7 +2077,7 @@ static void gadget_release(struct device *_dev)
complete(udc->done);
}
-static int mv_udc_remove(struct platform_device *pdev)
+static void mv_udc_remove(struct platform_device *pdev)
{
struct mv_udc *udc;
@@ -2099,8 +2099,6 @@ static int mv_udc_remove(struct platform_device *pdev)
/* free dev, wait for the release() finished */
wait_for_completion(udc->done);
-
- return 0;
}
static int mv_udc_probe(struct platform_device *pdev)
@@ -2411,7 +2409,7 @@ static void mv_udc_shutdown(struct platform_device *pdev)
static struct platform_driver udc_driver = {
.probe = mv_udc_probe,
- .remove = mv_udc_remove,
+ .remove_new = mv_udc_remove,
.shutdown = mv_udc_shutdown,
.driver = {
.name = "mv-udc",
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 538c1b9a2883..12e76bb62c20 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2670,7 +2670,7 @@ net2272_plat_probe(struct platform_device *pdev)
return ret;
}
-static int
+static void
net2272_plat_remove(struct platform_device *pdev)
{
struct net2272 *dev = platform_get_drvdata(pdev);
@@ -2681,13 +2681,11 @@ net2272_plat_remove(struct platform_device *pdev)
resource_size(&pdev->resource[0]));
usb_put_gadget(&dev->gadget);
-
- return 0;
}
static struct platform_driver net2272_plat_driver = {
.probe = net2272_plat_probe,
- .remove = net2272_plat_remove,
+ .remove_new = net2272_plat_remove,
.driver = {
.name = driver_name,
},
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 2d87c7cd5f7e..10c5d7f726a1 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2927,7 +2927,7 @@ cleanup0:
return status;
}
-static int omap_udc_remove(struct platform_device *pdev)
+static void omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
@@ -2939,8 +2939,6 @@ static int omap_udc_remove(struct platform_device *pdev)
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
-
- return 0;
}
/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
@@ -2985,7 +2983,7 @@ static int omap_udc_resume(struct platform_device *dev)
static struct platform_driver udc_driver = {
.probe = omap_udc_probe,
- .remove = omap_udc_remove,
+ .remove_new = omap_udc_remove,
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index fdf9cd4506b0..c4e1d957f913 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2445,7 +2445,7 @@ err:
* pxa_udc_remove - removes the udc device driver
* @_dev: platform device
*/
-static int pxa_udc_remove(struct platform_device *_dev)
+static void pxa_udc_remove(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
@@ -2460,8 +2460,6 @@ static int pxa_udc_remove(struct platform_device *_dev)
udc->transceiver = NULL;
the_controller = NULL;
clk_unprepare(udc->clk);
-
- return 0;
}
static void pxa_udc_shutdown(struct platform_device *_dev)
@@ -2541,7 +2539,7 @@ static struct platform_driver udc_driver = {
.of_match_table = of_match_ptr(udc_pxa_dt_ids),
},
.probe = pxa_udc_probe,
- .remove = pxa_udc_remove,
+ .remove_new = pxa_udc_remove,
.shutdown = pxa_udc_shutdown,
#ifdef CONFIG_PM
.suspend = pxa_udc_suspend,
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 38e4d6b505a0..51b665f15c8e 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1805,7 +1805,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = {
.set_selfpowered = r8a66597_set_selfpowered,
};
-static int r8a66597_remove(struct platform_device *pdev)
+static void r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
@@ -1816,8 +1816,6 @@ static int r8a66597_remove(struct platform_device *pdev)
if (r8a66597->pdata->on_chip) {
clk_disable_unprepare(r8a66597->clk);
}
-
- return 0;
}
static void nop_completion(struct usb_ep *ep, struct usb_request *r)
@@ -1966,7 +1964,7 @@ clean_up2:
/*-------------------------------------------------------------------------*/
static struct platform_driver r8a66597_driver = {
- .remove = r8a66597_remove,
+ .remove_new = r8a66597_remove,
.driver = {
.name = udc_name,
},
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index eb008e873361..59bb25de2015 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2653,7 +2653,7 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
}
/*------- platform_driver ------------------------------------------------*/
-static int renesas_usb3_remove(struct platform_device *pdev)
+static void renesas_usb3_remove(struct platform_device *pdev)
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
@@ -2669,8 +2669,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
__renesas_usb3_ep_free_request(usb3->ep0_req);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
@@ -3015,7 +3013,7 @@ static SIMPLE_DEV_PM_OPS(renesas_usb3_pm_ops, renesas_usb3_suspend,
static struct platform_driver renesas_usb3_driver = {
.probe = renesas_usb3_probe,
- .remove = renesas_usb3_remove,
+ .remove_new = renesas_usb3_remove,
.driver = {
.name = udc_name,
.pm = &renesas_usb3_pm_ops,
diff --git a/drivers/usb/gadget/udc/renesas_usbf.c b/drivers/usb/gadget/udc/renesas_usbf.c
index 84ac9fe4ce7f..6cd0af83e91e 100644
--- a/drivers/usb/gadget/udc/renesas_usbf.c
+++ b/drivers/usb/gadget/udc/renesas_usbf.c
@@ -3361,15 +3361,13 @@ static int usbf_probe(struct platform_device *pdev)
return 0;
}
-static int usbf_remove(struct platform_device *pdev)
+static void usbf_remove(struct platform_device *pdev)
{
struct usbf_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
pm_runtime_put(&pdev->dev);
-
- return 0;
}
static const struct of_device_id usbf_match[] = {
@@ -3385,7 +3383,7 @@ static struct platform_driver udc_driver = {
.of_match_table = usbf_match,
},
.probe = usbf_probe,
- .remove = usbf_remove,
+ .remove_new = usbf_remove,
};
module_platform_driver(udc_driver);
diff --git a/drivers/usb/gadget/udc/rzv2m_usb3drd.c b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
index 589c7252e014..36f4ff00d22f 100644
--- a/drivers/usb/gadget/udc/rzv2m_usb3drd.c
+++ b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
@@ -58,7 +58,7 @@ void rzv2m_usb3drd_reset(struct device *dev, bool host)
}
EXPORT_SYMBOL_GPL(rzv2m_usb3drd_reset);
-static int rzv2m_usb3drd_remove(struct platform_device *pdev)
+static void rzv2m_usb3drd_remove(struct platform_device *pdev)
{
struct rzv2m_usb3drd *usb3 = platform_get_drvdata(pdev);
@@ -66,8 +66,6 @@ static int rzv2m_usb3drd_remove(struct platform_device *pdev)
pm_runtime_put(usb3->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(usb3->drd_rstc);
-
- return 0;
}
static int rzv2m_usb3drd_probe(struct platform_device *pdev)
@@ -129,7 +127,7 @@ static struct platform_driver rzv2m_usb3drd_driver = {
.of_match_table = rzv2m_usb3drd_of_match,
},
.probe = rzv2m_usb3drd_probe,
- .remove = rzv2m_usb3drd_remove,
+ .remove_new = rzv2m_usb3drd_remove,
};
module_platform_driver(rzv2m_usb3drd_driver);
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 0d3e705655b9..0ed685db149d 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -225,7 +225,7 @@ exit_phy:
return ret;
}
-static int udc_plat_remove(struct platform_device *pdev)
+static void udc_plat_remove(struct platform_device *pdev)
{
struct udc *dev;
@@ -234,7 +234,7 @@ static int udc_plat_remove(struct platform_device *pdev)
usb_del_gadget_udc(&dev->gadget);
/* gadget driver must not be registered */
if (WARN_ON(dev->driver))
- return 0;
+ return;
/* dma pool cleanup */
free_dma_pools(dev);
@@ -248,8 +248,6 @@ static int udc_plat_remove(struct platform_device *pdev)
extcon_unregister_notifier(dev->edev, EXTCON_USB, &dev->nb);
dev_info(&pdev->dev, "Synopsys UDC platform driver removed\n");
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -315,7 +313,7 @@ MODULE_DEVICE_TABLE(of, of_udc_match);
static struct platform_driver udc_plat_driver = {
.probe = udc_plat_probe,
- .remove = udc_plat_remove,
+ .remove_new = udc_plat_remove,
.driver = {
.name = "snps-udc-plat",
.of_match_table = of_match_ptr(of_udc_match),
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 34e9c1df54c7..83eaa65ddde3 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3906,7 +3906,7 @@ put_padctl:
return err;
}
-static int tegra_xudc_remove(struct platform_device *pdev)
+static void tegra_xudc_remove(struct platform_device *pdev)
{
struct tegra_xudc *xudc = platform_get_drvdata(pdev);
unsigned int i;
@@ -3936,8 +3936,6 @@ static int tegra_xudc_remove(struct platform_device *pdev)
pm_runtime_put(xudc->dev);
tegra_xusb_padctl_put(xudc->padctl);
-
- return 0;
}
static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
@@ -4063,7 +4061,7 @@ static const struct dev_pm_ops tegra_xudc_pm_ops = {
static struct platform_driver tegra_xudc_driver = {
.probe = tegra_xudc_probe,
- .remove = tegra_xudc_remove,
+ .remove_new = tegra_xudc_remove,
.driver = {
.name = "tegra-xudc",
.pm = &tegra_xudc_pm_ops,
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 4827e3cd3834..a4a7b90a97e7 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -192,7 +192,7 @@ struct xusb_udc {
bool dma_enabled;
struct clk *clk;
- unsigned int (*read_fn)(void __iomem *);
+ unsigned int (*read_fn)(void __iomem *reg);
void (*write_fn)(void __iomem *, u32, u32);
};
@@ -2178,14 +2178,12 @@ fail:
*
* Return: 0 always
*/
-static int xudc_remove(struct platform_device *pdev)
+static void xudc_remove(struct platform_device *pdev)
{
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
clk_disable_unprepare(udc->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2257,7 +2255,7 @@ static struct platform_driver xudc_driver = {
.pm = &xudc_pm_ops,
},
.probe = xudc_probe,
- .remove = xudc_remove,
+ .remove_new = xudc_remove,
};
module_platform_driver(xudc_driver);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index c170672f847e..4448d0ab06f0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -376,7 +376,7 @@ config USB_ISP116X_HCD
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
- depends on HAS_IOMEM
+ depends on HAS_IOPORT
depends on COMPILE_TEST # nothing uses this
help
Supports the Philips ISP1362 chip as a host controller
@@ -578,7 +578,7 @@ endif # USB_OHCI_HCD
config USB_UHCI_HCD
tristate "UHCI HCD (most Intel and VIA) support"
- depends on USB_PCI || USB_UHCI_SUPPORT_NON_PCI_HC
+ depends on (USB_PCI && HAS_IOPORT) || USB_UHCI_SUPPORT_NON_PCI_HC
help
The Universal Host Controller Interface is a standard by Intel for
accessing the USB hardware in the PC (which is also called the USB
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 8b775e7bab06..61808c51e702 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -173,7 +173,7 @@ fail_create_hcd:
return retval;
}
-static int ehci_atmel_drv_remove(struct platform_device *pdev)
+static void ehci_atmel_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -181,8 +181,6 @@ static int ehci_atmel_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
atmel_stop_ehci(pdev);
-
- return 0;
}
static int __maybe_unused ehci_atmel_drv_suspend(struct device *dev)
@@ -223,7 +221,7 @@ static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend,
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
- .remove = ehci_atmel_drv_remove,
+ .remove_new = ehci_atmel_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "atmel-ehci",
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index 6a0f64c9e5e8..0362a082abb4 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -188,7 +188,7 @@ err_hcd:
return err;
}
-static int ehci_brcm_remove(struct platform_device *dev)
+static void ehci_brcm_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
@@ -196,7 +196,6 @@ static int ehci_brcm_remove(struct platform_device *dev)
usb_remove_hcd(hcd);
clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
- return 0;
}
static int __maybe_unused ehci_brcm_suspend(struct device *dev)
@@ -250,7 +249,7 @@ static const struct of_device_id brcm_ehci_of_match[] = {
static struct platform_driver ehci_brcm_driver = {
.probe = ehci_brcm_probe,
- .remove = ehci_brcm_remove,
+ .remove_new = ehci_brcm_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-brcm",
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 47c9f06c3d84..20f8c0ec6810 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -230,7 +230,7 @@ fail_clk:
return err;
}
-static int exynos_ehci_remove(struct platform_device *pdev)
+static void exynos_ehci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
@@ -244,8 +244,6 @@ static int exynos_ehci_remove(struct platform_device *pdev)
clk_disable_unprepare(exynos_ehci->clk);
usb_put_hcd(hcd);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -311,7 +309,7 @@ MODULE_DEVICE_TABLE(of, exynos_ehci_match);
static struct platform_driver exynos_ehci_driver = {
.probe = exynos_ehci_probe,
- .remove = exynos_ehci_remove,
+ .remove_new = exynos_ehci_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "exynos-ehci",
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index d74fa5ba845b..81d60a695510 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -684,7 +684,7 @@ static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = {
*
* Reverses the effect of usb_hcd_fsl_probe().
*/
-static int fsl_ehci_drv_remove(struct platform_device *pdev)
+static void fsl_ehci_drv_remove(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -703,13 +703,11 @@ static int fsl_ehci_drv_remove(struct platform_device *pdev)
if (pdata->exit)
pdata->exit(pdev);
usb_put_hcd(hcd);
-
- return 0;
}
static struct platform_driver ehci_fsl_driver = {
.probe = fsl_ehci_drv_probe,
- .remove = fsl_ehci_drv_remove,
+ .remove_new = fsl_ehci_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 0717f2ccf49d..14150e4d3382 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -140,7 +140,7 @@ err_irq:
}
-static int ehci_hcd_grlib_remove(struct platform_device *op)
+static void ehci_hcd_grlib_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
@@ -151,8 +151,6 @@ static int ehci_hcd_grlib_remove(struct platform_device *op)
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
-
- return 0;
}
@@ -170,7 +168,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
static struct platform_driver ehci_grlib_driver = {
.probe = ehci_hcd_grlib_probe,
- .remove = ehci_hcd_grlib_remove,
+ .remove_new = ehci_hcd_grlib_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "grlib-ehci",
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index fa46d217dd10..9320cf0e5bc7 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -235,7 +235,7 @@ err_put_hcd:
return retval;
}
-static int mv_ehci_remove(struct platform_device *pdev)
+static void mv_ehci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
@@ -254,8 +254,6 @@ static int mv_ehci_remove(struct platform_device *pdev)
}
usb_put_hcd(hcd);
-
- return 0;
}
static const struct platform_device_id ehci_id_table[] = {
@@ -282,7 +280,7 @@ static const struct of_device_id ehci_mv_dt_ids[] = {
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
- .remove = mv_ehci_remove,
+ .remove_new = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
.name = "mv-ehci",
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index 63af1a827fcb..ad79ce63afcf 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -106,15 +106,13 @@ fail:
return retval;
}
-static int npcm7xx_ehci_hcd_drv_remove(struct platform_device *pdev)
+static void npcm7xx_ehci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id npcm7xx_ehci_id_table[] = {
@@ -125,7 +123,7 @@ MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table);
static struct platform_driver npcm7xx_ehci_hcd_driver = {
.probe = npcm7xx_ehci_hcd_drv_probe,
- .remove = npcm7xx_ehci_hcd_drv_remove,
+ .remove_new = npcm7xx_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "npcm7xx-ehci",
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 7dd984722a7f..cb6509a735ac 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -237,7 +237,7 @@ err_phy:
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static int ehci_hcd_omap_remove(struct platform_device *pdev)
+static void ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -254,8 +254,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static const struct of_device_id omap_ehci_dt_ids[] = {
@@ -267,7 +265,7 @@ MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
- .remove = ehci_hcd_omap_remove,
+ .remove_new = ehci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index a3454a3ea4e0..2cfb27dc943a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -321,7 +321,7 @@ err:
return err;
}
-static int ehci_orion_drv_remove(struct platform_device *pdev)
+static void ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
@@ -332,8 +332,6 @@ static int ehci_orion_drv_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id ehci_orion_dt_ids[] = {
@@ -345,7 +343,7 @@ MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
- .remove = ehci_orion_drv_remove,
+ .remove_new = ehci_orion_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "orion-ehci",
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 4b148fe5e43b..889dc4426271 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -354,10 +354,11 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+static int ehci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ bool hibernated = (msg.event == PM_EVENT_RESTORE);
if (ehci_resume(hcd, hibernated) != 0)
(void) ehci_pci_reinit(ehci, pdev);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index fe497c876d76..83bf56c9424f 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -400,7 +400,7 @@ err_put_clks:
return err;
}
-static int ehci_platform_remove(struct platform_device *dev)
+static void ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
@@ -424,8 +424,6 @@ static int ehci_platform_remove(struct platform_device *dev)
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
-
- return 0;
}
static int __maybe_unused ehci_platform_suspend(struct device *dev)
@@ -511,7 +509,7 @@ static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend,
static struct platform_driver ehci_platform_driver = {
.id_table = ehci_platform_table,
.probe = ehci_platform_probe,
- .remove = ehci_platform_remove,
+ .remove_new = ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-platform",
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index b3aa464e9d2c..7fd83e806ae4 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -184,7 +184,7 @@ err_irq:
}
-static int ehci_hcd_ppc_of_remove(struct platform_device *op)
+static void ehci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -216,8 +216,6 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
}
}
usb_put_hcd(hcd);
-
- return 0;
}
@@ -232,7 +230,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
- .remove = ehci_hcd_ppc_of_remove,
+ .remove_new = ehci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index c25c51d26f26..0520e762801d 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -147,7 +147,7 @@ fail_create_hcd:
return ret;
}
-static int ehci_hcd_sh_remove(struct platform_device *pdev)
+static void ehci_hcd_sh_remove(struct platform_device *pdev)
{
struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
struct usb_hcd *hcd = priv->hcd;
@@ -157,8 +157,6 @@ static int ehci_hcd_sh_remove(struct platform_device *pdev)
clk_disable(priv->fclk);
clk_disable(priv->iclk);
-
- return 0;
}
static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
@@ -172,7 +170,7 @@ static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
static struct platform_driver ehci_hcd_sh_driver = {
.probe = ehci_hcd_sh_probe,
- .remove = ehci_hcd_sh_remove,
+ .remove_new = ehci_hcd_sh_remove,
.shutdown = ehci_hcd_sh_shutdown,
.driver = {
.name = "sh_ehci",
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index c4ddd1022f60..1407703649be 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -124,7 +124,7 @@ fail:
return retval ;
}
-static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+static void spear_ehci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct spear_ehci *sehci = to_spear_ehci(hcd);
@@ -134,8 +134,6 @@ static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
if (sehci->clk)
clk_disable_unprepare(sehci->clk);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id spear_ehci_id_table[] = {
@@ -146,7 +144,7 @@ MODULE_DEVICE_TABLE(of, spear_ehci_id_table);
static struct platform_driver spear_ehci_hcd_driver = {
.probe = spear_ehci_hcd_drv_probe,
- .remove = spear_ehci_hcd_drv_remove,
+ .remove_new = spear_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "spear-ehci",
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index f731dc98c533..ee0976b815b4 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -252,7 +252,7 @@ err_put_hcd:
return err;
}
-static int st_ehci_platform_remove(struct platform_device *dev)
+static void st_ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
@@ -271,8 +271,6 @@ static int st_ehci_platform_remove(struct platform_device *dev)
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -328,7 +326,7 @@ MODULE_DEVICE_TABLE(of, st_ehci_ids);
static struct platform_driver ehci_platform_driver = {
.probe = st_ehci_platform_probe,
- .remove = st_ehci_platform_remove,
+ .remove_new = st_ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ehci",
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 3d7893747835..a2112c28f631 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -201,7 +201,7 @@ err_irq:
*
* Return: Always return 0
*/
-static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
+static void ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
@@ -210,8 +210,6 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
@@ -222,7 +220,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
- .remove = ehci_hcd_xilinx_of_remove,
+ .remove_new = ehci_hcd_xilinx_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 92794ffc25c8..66a045e01dad 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -757,7 +757,7 @@ err_regs:
return ret;
}
-static int fhci_remove(struct device *dev)
+static void fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
@@ -771,12 +771,11 @@ static int fhci_remove(struct device *dev)
qe_pin_free(fhci->pins[j]);
fhci_dfs_destroy(fhci);
usb_put_hcd(hcd);
- return 0;
}
-static int of_fhci_remove(struct platform_device *ofdev)
+static void of_fhci_remove(struct platform_device *ofdev)
{
- return fhci_remove(&ofdev->dev);
+ fhci_remove(&ofdev->dev);
}
static const struct of_device_id of_fhci_match[] = {
@@ -791,7 +790,7 @@ static struct platform_driver of_fhci_driver = {
.of_match_table = of_fhci_match,
},
.probe = of_fhci_probe,
- .remove = of_fhci_remove,
+ .remove_new = of_fhci_remove,
};
module_platform_driver(of_fhci_driver);
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9db909d12354..a9877f2569f4 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -265,10 +265,9 @@ static int __unregister_subdev(struct device *dev, void *d)
return 0;
}
-static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
+static void fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
{
device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
- return 0;
}
#ifdef CONFIG_PPC_MPC512x
@@ -362,7 +361,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
- .remove = fsl_usb2_mph_dr_of_remove,
+ .remove_new = fsl_usb2_mph_dr_of_remove,
};
module_platform_driver(fsl_usb2_mph_dr_driver);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 49ae01487af4..a82d8926e922 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1526,14 +1526,14 @@ static const struct hc_driver isp116x_hc_driver = {
/*----------------------------------------------------------------*/
-static int isp116x_remove(struct platform_device *pdev)
+static void isp116x_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp116x *isp116x;
struct resource *res;
if (!hcd)
- return 0;
+ return;
isp116x = hcd_to_isp116x(hcd);
remove_debug_file(isp116x);
usb_remove_hcd(hcd);
@@ -1548,7 +1548,6 @@ static int isp116x_remove(struct platform_device *pdev)
release_mem_region(res->start, 2);
usb_put_hcd(hcd);
- return 0;
}
static int isp116x_probe(struct platform_device *pdev)
@@ -1685,7 +1684,7 @@ MODULE_ALIAS("platform:isp116x-hcd");
static struct platform_driver isp116x_driver = {
.probe = isp116x_probe,
- .remove = isp116x_remove,
+ .remove_new = isp116x_remove,
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index b0da143ef4be..606f0a64f3b7 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2606,7 +2606,7 @@ static const struct hc_driver isp1362_hc_driver = {
/*-------------------------------------------------------------------------*/
-static int isp1362_remove(struct platform_device *pdev)
+static void isp1362_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
@@ -2617,8 +2617,6 @@ static int isp1362_remove(struct platform_device *pdev)
DBG(0, "%s: put_hcd\n", __func__);
usb_put_hcd(hcd);
DBG(0, "%s: Done\n", __func__);
-
- return 0;
}
static int isp1362_probe(struct platform_device *pdev)
@@ -2760,7 +2758,7 @@ static int isp1362_resume(struct platform_device *pdev)
static struct platform_driver isp1362_driver = {
.probe = isp1362_probe,
- .remove = isp1362_remove,
+ .remove_new = isp1362_remove,
.suspend = isp1362_suspend,
.resume = isp1362_resume,
diff --git a/drivers/usb/host/octeon-hcd.c b/drivers/usb/host/octeon-hcd.c
index a1cd81d4a114..19d5777f5db2 100644
--- a/drivers/usb/host/octeon-hcd.c
+++ b/drivers/usb/host/octeon-hcd.c
@@ -3680,7 +3680,7 @@ static int octeon_usb_probe(struct platform_device *pdev)
return 0;
}
-static int octeon_usb_remove(struct platform_device *pdev)
+static void octeon_usb_remove(struct platform_device *pdev)
{
int status;
struct device *dev = &pdev->dev;
@@ -3696,8 +3696,6 @@ static int octeon_usb_remove(struct platform_device *pdev)
dev_dbg(dev, "USB shutdown failed with %d\n", status);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id octeon_usb_match[] = {
@@ -3714,7 +3712,7 @@ static struct platform_driver octeon_usb_driver = {
.of_match_table = octeon_usb_match,
},
.probe = octeon_usb_probe,
- .remove = octeon_usb_remove,
+ .remove_new = octeon_usb_remove,
};
static int __init octeon_usb_driver_init(void)
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 533537ef3c21..b9ce8d80f20b 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -599,7 +599,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
}
-static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
+static void ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int i;
@@ -611,7 +611,6 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev);
- return 0;
}
static int __maybe_unused
@@ -683,7 +682,7 @@ static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
- .remove = ohci_hcd_at91_drv_remove,
+ .remove_new = ohci_hcd_at91_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "at91_ohci",
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index d4818e8d652b..e4191a868944 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -469,14 +469,12 @@ err:
return error;
}
-static int ohci_da8xx_remove(struct platform_device *pdev)
+static void ohci_da8xx_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -533,7 +531,7 @@ static const struct ohci_driver_overrides da8xx_overrides __initconst = {
*/
static struct platform_driver ohci_hcd_da8xx_driver = {
.probe = ohci_da8xx_probe,
- .remove = ohci_da8xx_remove,
+ .remove_new = ohci_da8xx_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_da8xx_suspend,
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 8af17c1ee5cc..ab31c459b32d 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -198,7 +198,7 @@ fail_clk:
return err;
}
-static int exynos_ohci_remove(struct platform_device *pdev)
+static void exynos_ohci_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
@@ -212,8 +212,6 @@ static int exynos_ohci_remove(struct platform_device *pdev)
clk_disable_unprepare(exynos_ohci->clk);
usb_put_hcd(hcd);
-
- return 0;
}
static void exynos_ohci_shutdown(struct platform_device *pdev)
@@ -285,7 +283,7 @@ MODULE_DEVICE_TABLE(of, exynos_ohci_match);
static struct platform_driver exynos_ohci_driver = {
.probe = exynos_ohci_probe,
- .remove = exynos_ohci_remove,
+ .remove_new = exynos_ohci_remove,
.shutdown = exynos_ohci_shutdown,
.driver = {
.name = "exynos-ohci",
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 5b32e683e367..c04b2af5c766 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -237,7 +237,7 @@ fail_disable:
return ret;
}
-static int ohci_hcd_nxp_remove(struct platform_device *pdev)
+static void ohci_hcd_nxp_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -246,8 +246,6 @@ static int ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
-
- return 0;
}
/* work with hotplug and coldplug */
@@ -267,7 +265,7 @@ static struct platform_driver ohci_hcd_nxp_driver = {
.of_match_table = of_match_ptr(ohci_hcd_nxp_match),
},
.probe = ohci_hcd_nxp_probe,
- .remove = ohci_hcd_nxp_remove,
+ .remove_new = ohci_hcd_nxp_remove,
};
static int __init ohci_nxp_init(void)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index c82121602511..21a6f6c55e07 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -321,7 +321,7 @@ err_put_hcd:
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static int ohci_hcd_omap_remove(struct platform_device *pdev)
+static void ohci_hcd_omap_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_omap_priv *priv = hcd_to_ohci_omap_priv(hcd);
@@ -340,7 +340,6 @@ static int ohci_hcd_omap_remove(struct platform_device *pdev)
clk_unprepare(priv->usb_host_ck);
clk_put(priv->usb_host_ck);
usb_put_hcd(hcd);
- return 0;
}
/*-------------------------------------------------------------------------*/
@@ -391,7 +390,7 @@ static int ohci_omap_resume(struct platform_device *dev)
*/
static struct platform_driver ohci_hcd_omap_driver = {
.probe = ohci_hcd_omap_probe,
- .remove = ohci_hcd_omap_remove,
+ .remove_new = ohci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index d7b4f40f9ff4..900ea0d368e0 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -301,6 +301,12 @@ static struct pci_driver ohci_pci_driver = {
#endif
};
+#ifdef CONFIG_PM
+static int ohci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
+{
+ return ohci_resume(hcd, msg.event == PM_EVENT_RESTORE);
+}
+#endif
static int __init ohci_pci_init(void)
{
if (usb_disabled())
@@ -311,7 +317,7 @@ static int __init ohci_pci_init(void)
#ifdef CONFIG_PM
/* Entries for the PCI suspend/resume callbacks are special */
ohci_pci_hc_driver.pci_suspend = ohci_suspend;
- ohci_pci_hc_driver.pci_resume = ohci_resume;
+ ohci_pci_hc_driver.pci_resume = ohci_pci_resume;
#endif
return pci_register_driver(&ohci_pci_driver);
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index a84305091c43..45a2c981319e 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -33,7 +33,7 @@
#include "ohci.h"
#define DRIVER_DESC "OHCI generic platform driver"
-#define OHCI_MAX_CLKS 3
+#define OHCI_MAX_CLKS 4
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
@@ -239,7 +239,7 @@ err_put_clks:
return err;
}
-static int ohci_platform_remove(struct platform_device *dev)
+static void ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
@@ -264,8 +264,6 @@ static int ohci_platform_remove(struct platform_device *dev)
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -347,7 +345,7 @@ static const struct dev_pm_ops ohci_platform_pm_ops = {
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
.probe = ohci_platform_probe,
- .remove = ohci_platform_remove,
+ .remove_new = ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-platform",
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index f2f6c832ec98..35a7ad7e2569 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -176,7 +176,7 @@ err_rmr:
return rv;
}
-static int ohci_hcd_ppc_of_remove(struct platform_device *op)
+static void ohci_hcd_ppc_of_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
@@ -187,8 +187,6 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op)
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
-
- return 0;
}
static const struct of_device_id ohci_hcd_ppc_of_match[] = {
@@ -224,7 +222,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
- .remove = ohci_hcd_ppc_of_remove,
+ .remove_new = ohci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index dcac2938789a..7410442f720f 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -501,7 +501,7 @@ static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
+static void ohci_hcd_pxa27x_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
@@ -514,7 +514,6 @@ static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
usb_put_hcd(hcd);
- return 0;
}
/*-------------------------------------------------------------------------*/
@@ -572,7 +571,7 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_probe,
- .remove = ohci_hcd_pxa27x_remove,
+ .remove_new = ohci_hcd_pxa27x_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 85a0a9ae0095..c5c9b4cbcb9a 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -329,7 +329,7 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static int
+static void
ohci_hcd_s3c2410_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -337,7 +337,6 @@ ohci_hcd_s3c2410_remove(struct platform_device *dev)
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
usb_put_hcd(hcd);
- return 0;
}
/*
@@ -458,7 +457,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_probe,
- .remove = ohci_hcd_s3c2410_remove,
+ .remove_new = ohci_hcd_s3c2410_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s3c2410-ohci",
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index f5de586454e3..0468eeb4fcfd 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -185,7 +185,7 @@ err0:
return retval;
}
-static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
+static void ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct resource *mem;
@@ -202,8 +202,6 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
sm501_modify_reg(pdev->dev.parent, SM501_IRQ_MASK, 0, 1 << 6);
sm501_unit_power(pdev->dev.parent, SM501_GATE_USB_HOST, 0);
-
- return 0;
}
/*-------------------------------------------------------------------------*/
@@ -255,7 +253,7 @@ static int ohci_sm501_resume(struct platform_device *pdev)
*/
static struct platform_driver ohci_hcd_sm501_driver = {
.probe = ohci_hcd_sm501_drv_probe,
- .remove = ohci_hcd_sm501_drv_remove,
+ .remove_new = ohci_hcd_sm501_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_sm501_suspend,
.resume = ohci_sm501_resume,
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 196951a27f3f..f4b2656407dd 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -98,7 +98,7 @@ fail:
return retval;
}
-static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
+static void spear_ohci_hcd_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct spear_ohci *sohci_p = to_spear_ohci(hcd);
@@ -108,7 +108,6 @@ static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
clk_disable_unprepare(sohci_p->clk);
usb_put_hcd(hcd);
- return 0;
}
#if defined(CONFIG_PM)
@@ -159,7 +158,7 @@ MODULE_DEVICE_TABLE(of, spear_ohci_id_table);
/* Driver definition to register with the platform bus */
static struct platform_driver spear_ohci_hcd_driver = {
.probe = spear_ohci_hcd_drv_probe,
- .remove = spear_ohci_hcd_drv_remove,
+ .remove_new = spear_ohci_hcd_drv_remove,
#ifdef CONFIG_PM
.suspend = spear_ohci_hcd_drv_suspend,
.resume = spear_ohci_hcd_drv_resume,
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 82eef3c62e11..884e447a8098 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -233,7 +233,7 @@ err_put_hcd:
return err;
}
-static int st_ohci_platform_remove(struct platform_device *dev)
+static void st_ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
@@ -253,8 +253,6 @@ static int st_ohci_platform_remove(struct platform_device *dev)
if (pdata == &ohci_platform_defaults)
dev->dev.platform_data = NULL;
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -306,7 +304,7 @@ MODULE_DEVICE_TABLE(of, st_ohci_platform_ids);
static struct platform_driver ohci_platform_driver = {
.probe = st_ohci_platform_probe,
- .remove = st_ohci_platform_remove,
+ .remove_new = st_ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ohci",
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index f998d3f1a78a..50c1ccabb0f5 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -4278,14 +4278,12 @@ static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
usb_put_hcd(hcd);
}
-static int oxu_drv_remove(struct platform_device *pdev)
+static void oxu_drv_remove(struct platform_device *pdev)
{
struct oxu_info *info = platform_get_drvdata(pdev);
oxu_remove(pdev, info->hcd[0]);
oxu_remove(pdev, info->hcd[1]);
-
- return 0;
}
static void oxu_drv_shutdown(struct platform_device *pdev)
@@ -4317,7 +4315,7 @@ static int oxu_drv_resume(struct device *dev)
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
- .remove = oxu_drv_remove,
+ .remove_new = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index abb88dd40d4e..9f4bf8c5f8a5 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2379,7 +2379,7 @@ static const struct dev_pm_ops r8a66597_dev_pm_ops = {
#define R8A66597_DEV_PM_OPS NULL
#endif
-static int r8a66597_remove(struct platform_device *pdev)
+static void r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
@@ -2390,7 +2390,6 @@ static int r8a66597_remove(struct platform_device *pdev)
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
usb_put_hcd(hcd);
- return 0;
}
static int r8a66597_probe(struct platform_device *pdev)
@@ -2511,7 +2510,7 @@ clean_up:
static struct platform_driver r8a66597_driver = {
.probe = r8a66597_probe,
- .remove = r8a66597_remove,
+ .remove_new = r8a66597_remove,
.driver = {
.name = hcd_name,
.pm = R8A66597_DEV_PM_OPS,
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index b8b90eec9107..0956495bba57 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1579,7 +1579,7 @@ static const struct hc_driver sl811h_hc_driver = {
/*-------------------------------------------------------------------------*/
-static int
+static void
sl811h_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -1599,7 +1599,6 @@ sl811h_remove(struct platform_device *dev)
iounmap(sl811->addr_reg);
usb_put_hcd(hcd);
- return 0;
}
static int
@@ -1783,7 +1782,7 @@ sl811h_resume(struct platform_device *dev)
/* this driver is exported so sl811_cs can depend on it */
struct platform_driver sl811h_driver = {
.probe = sl811h_probe,
- .remove = sl811h_remove,
+ .remove_new = sl811h_remove,
.suspend = sl811h_suspend,
.resume = sl811h_resume,
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 907d5f01edfd..ac3fc5970315 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -147,7 +147,7 @@ err_usb:
return rv;
}
-static int uhci_hcd_grlib_remove(struct platform_device *op)
+static void uhci_hcd_grlib_remove(struct platform_device *op)
{
struct usb_hcd *hcd = platform_get_drvdata(op);
@@ -157,8 +157,6 @@ static int uhci_hcd_grlib_remove(struct platform_device *op)
irq_dispose_mapping(hcd->irq);
usb_put_hcd(hcd);
-
- return 0;
}
/* Make sure the controller is quiescent and that we're not using it
@@ -185,7 +183,7 @@ MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match);
static struct platform_driver uhci_grlib_driver = {
.probe = uhci_hcd_grlib_probe,
- .remove = uhci_hcd_grlib_remove,
+ .remove_new = uhci_hcd_grlib_remove,
.shutdown = uhci_hcd_grlib_shutdown,
.driver = {
.name = "grlib-uhci",
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 7cdc2fa7c28f..fd2408b553cf 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -841,7 +841,7 @@ static int uhci_count_ports(struct usb_hcd *hcd)
static const char hcd_name[] = "uhci_hcd";
-#ifdef CONFIG_USB_PCI
+#if defined(CONFIG_USB_PCI) && defined(CONFIG_HAS_IOPORT)
#include "uhci-pci.c"
#define PCI_DRIVER uhci_pci_driver
#endif
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 0688c3e5bfe2..13ee2a6144b2 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -505,6 +505,14 @@ static inline bool uhci_is_aspeed(const struct uhci_hcd *uhci)
* we use memory mapped registers.
*/
+#ifdef CONFIG_HAS_IOPORT
+#define UHCI_IN(x) x
+#define UHCI_OUT(x) x
+#else
+#define UHCI_IN(x) 0
+#define UHCI_OUT(x) do { } while (0)
+#endif
+
#ifndef CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC
/* Support PCI only */
static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
@@ -539,7 +547,7 @@ static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
#else
/* Support non-PCI host controllers */
-#ifdef CONFIG_USB_PCI
+#if defined(CONFIG_USB_PCI) && defined(HAS_IOPORT)
/* Support PCI and non-PCI host controllers */
#define uhci_has_pci_registers(u) ((u)->io_addr != 0)
#else
@@ -587,7 +595,7 @@ static inline int uhci_aspeed_reg(unsigned int reg)
static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
{
if (uhci_has_pci_registers(uhci))
- return inl(uhci->io_addr + reg);
+ return UHCI_IN(inl(uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
return readl(uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -601,7 +609,7 @@ static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
{
if (uhci_has_pci_registers(uhci))
- outl(val, uhci->io_addr + reg);
+ UHCI_OUT(outl(val, uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
writel(val, uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -615,7 +623,7 @@ static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
{
if (uhci_has_pci_registers(uhci))
- return inw(uhci->io_addr + reg);
+ return UHCI_IN(inw(uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
return readl(uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -629,7 +637,7 @@ static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
{
if (uhci_has_pci_registers(uhci))
- outw(val, uhci->io_addr + reg);
+ UHCI_OUT(outw(val, uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
writel(val, uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -643,7 +651,7 @@ static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
{
if (uhci_has_pci_registers(uhci))
- return inb(uhci->io_addr + reg);
+ return UHCI_IN(inb(uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
return readl(uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -657,7 +665,7 @@ static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
{
if (uhci_has_pci_registers(uhci))
- outb(val, uhci->io_addr + reg);
+ UHCI_OUT(outb(val, uhci->io_addr + reg));
else if (uhci_is_aspeed(uhci))
writel(val, uhci->regs + uhci_aspeed_reg(reg));
#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
@@ -668,6 +676,8 @@ static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
writeb(val, uhci->regs + reg);
}
#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
+#undef UHCI_IN
+#undef UHCI_OUT
/*
* The GRLIB GRUSBHC controller can use big endian format for its descriptors.
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 7bd2fddde770..5edf6a08cf82 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -169,7 +169,7 @@ static void uhci_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_PM
-static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated);
+static int uhci_pci_resume(struct usb_hcd *hcd, pm_message_t state);
static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
@@ -204,14 +204,15 @@ done_okay:
/* Check for race with a wakeup request */
if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
- uhci_pci_resume(hcd, false);
+ uhci_pci_resume(hcd, PMSG_SUSPEND);
rc = -EBUSY;
}
return rc;
}
-static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+static int uhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
+ bool hibernated = (msg.event == PM_EVENT_RESTORE);
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
dev_dbg(uhci_dev(uhci), "%s\n", __func__);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index b2049b47a08d..71ca532fc086 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -152,7 +152,7 @@ err_rmr:
return ret;
}
-static int uhci_hcd_platform_remove(struct platform_device *pdev)
+static void uhci_hcd_platform_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
@@ -160,8 +160,6 @@ static int uhci_hcd_platform_remove(struct platform_device *pdev)
clk_disable_unprepare(uhci->clk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-
- return 0;
}
/* Make sure the controller is quiescent and that we're not using it
@@ -187,7 +185,7 @@ MODULE_DEVICE_TABLE(of, platform_uhci_ids);
static struct platform_driver uhci_platform_driver = {
.probe = uhci_hcd_platform_probe,
- .remove = uhci_hcd_platform_remove,
+ .remove_new = uhci_hcd_platform_remove,
.shutdown = uhci_hcd_platform_shutdown,
.driver = {
.name = "platform-uhci",
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 08369857686e..f9a4a4b0eb57 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -164,16 +164,6 @@ static void xhci_histb_host_disable(struct xhci_hcd_histb *histb)
clk_disable_unprepare(histb->bus_clk);
}
-static void xhci_histb_quirks(struct device *dev, struct xhci_hcd *xhci)
-{
- /*
- * As of now platform drivers don't provide MSI support so we ensure
- * here that the generic code does not try to make a pci_dev from our
- * dev struct in order to setup MSI
- */
- xhci->quirks |= XHCI_PLAT;
-}
-
/* called during probe() after chip reset completes */
static int xhci_histb_setup(struct usb_hcd *hcd)
{
@@ -186,7 +176,7 @@ static int xhci_histb_setup(struct usb_hcd *hcd)
return ret;
}
- return xhci_gen_setup(hcd, xhci_histb_quirks);
+ return xhci_gen_setup(hcd, NULL);
}
static const struct xhci_driver_overrides xhci_histb_overrides __initconst = {
@@ -319,7 +309,7 @@ disable_pm:
return ret;
}
-static int xhci_histb_remove(struct platform_device *dev)
+static void xhci_histb_remove(struct platform_device *dev)
{
struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
@@ -339,8 +329,6 @@ static int xhci_histb_remove(struct platform_device *dev)
usb_put_hcd(hcd);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
-
- return 0;
}
static int __maybe_unused xhci_histb_suspend(struct device *dev)
@@ -367,7 +355,7 @@ static int __maybe_unused xhci_histb_resume(struct device *dev)
if (!device_may_wakeup(dev))
xhci_histb_host_enable(histb);
- return xhci_resume(xhci, 0);
+ return xhci_resume(xhci, PMSG_RESUME);
}
static const struct dev_pm_ops xhci_histb_pm_ops = {
@@ -385,7 +373,7 @@ MODULE_DEVICE_TABLE(of, histb_xhci_of_match);
static struct platform_driver histb_xhci_driver = {
.probe = xhci_histb_probe,
- .remove = xhci_histb_remove,
+ .remove_new = xhci_histb_remove,
.driver = {
.name = "xhci-histb",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 7e106bd804ca..19a402123de0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -143,7 +143,6 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(last, next, ring->type, chain_links);
ring->num_segs += num_segs;
- ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
@@ -422,22 +421,14 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
* Allocate a new ring which has same segment numbers and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
- unsigned int num_trbs, gfp_t flags)
+ unsigned int num_new_segs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
- unsigned int num_segs;
- unsigned int num_segs_needed;
int ret;
- num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
- (TRBS_PER_SEGMENT - 1);
-
- /* Allocate number of segments we needed, or double the ring size */
- num_segs = max(ring->num_segs, num_segs_needed);
-
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_segs, ring->cycle_state, ring->type,
+ num_new_segs, ring->cycle_state, ring->type,
ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
@@ -457,7 +448,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return ret;
}
- xhci_link_rings(xhci, ring, first, last, num_segs);
+ xhci_link_rings(xhci, ring, first, last, num_new_segs);
trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
@@ -1831,13 +1822,15 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* low or high 32 bits of ERSTBA immediately causes the controller to
* dereference the partially cleared 64 bit address, causing IOMMU error.
*/
- tmp = readl(&ir->ir_set->erst_size);
- tmp &= ERST_SIZE_MASK;
- writel(tmp, &ir->ir_set->erst_size);
-
- tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- tmp64 &= (u64) ERST_PTR_MASK;
- xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+ if (ir->ir_set) {
+ tmp = readl(&ir->ir_set->erst_size);
+ tmp &= ERST_SIZE_MASK;
+ writel(tmp, &ir->ir_set->erst_size);
+
+ tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ tmp64 &= (u64) ERST_PTR_MASK;
+ xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+ }
/* free interrrupter event ring */
if (ir->event_ring)
@@ -1968,7 +1961,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision, minor_revision;
+ u8 major_revision, minor_revision, tmp_minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_port_cap *port_cap;
@@ -1988,6 +1981,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
*/
if (minor_revision > 0x00 && minor_revision < 0x10)
minor_revision <<= 4;
+ /*
+ * Some zhaoxin's xHCI controller that follow usb3.1 spec
+ * but only support Gen1.
+ */
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
+ tmp_minor_revision = minor_revision;
+ minor_revision = 0;
+ }
+
} else if (major_revision <= 0x02) {
rhub = &xhci->usb2_rhub;
} else {
@@ -1996,10 +1998,6 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* Ignoring port protocol we can't understand. FIXME */
return;
}
- rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
-
- if (rhub->min_rev < minor_revision)
- rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
@@ -2017,8 +2015,6 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
if (xhci->num_port_caps > max_caps)
return;
- port_cap->maj_rev = major_revision;
- port_cap->min_rev = minor_revision;
port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
if (port_cap->psi_count) {
@@ -2039,6 +2035,11 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
port_cap->psi_uid_count++;
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
+ major_revision == 0x03 &&
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
+ minor_revision = tmp_minor_revision;
+
xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
@@ -2048,6 +2049,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
}
}
+
+ rhub->maj_rev = major_revision;
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
+
+ port_cap->maj_rev = major_revision;
+ port_cap->min_rev = minor_revision;
+
/* cache usb2 port capabilities */
if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
xhci->ext_caps[xhci->num_ext_caps++] = temp;
@@ -2227,43 +2237,50 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
static struct xhci_interrupter *
-xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int intr_num, gfp_t flags)
+xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
- u64 erst_base;
- u32 erst_size;
int ret;
- if (intr_num > xhci->max_interrupters) {
- xhci_warn(xhci, "Can't allocate interrupter %d, max interrupters %d\n",
- intr_num, xhci->max_interrupters);
- return NULL;
- }
-
- if (xhci->interrupter) {
- xhci_warn(xhci, "Can't allocate already set up interrupter %d\n", intr_num);
- return NULL;
- }
-
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
- ir->ir_set = &xhci->run_regs->ir_set[intr_num];
ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
0, flags);
if (!ir->event_ring) {
- xhci_warn(xhci, "Failed to allocate interrupter %d event ring\n", intr_num);
- goto fail_ir;
+ xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ kfree(ir);
+ return NULL;
}
ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
if (ret) {
- xhci_warn(xhci, "Failed to allocate interrupter %d erst\n", intr_num);
- goto fail_ev;
+ xhci_warn(xhci, "Failed to allocate interrupter erst\n");
+ xhci_ring_free(xhci, ir->event_ring);
+ kfree(ir);
+ return NULL;
+ }
+ return ir;
+}
+
+static int
+xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ unsigned int intr_num)
+{
+ u64 erst_base;
+ u32 erst_size;
+
+ if (intr_num > xhci->max_interrupters) {
+ xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
+ intr_num, xhci->max_interrupters);
+ return -EINVAL;
}
+
+ ir->ir_set = &xhci->run_regs->ir_set[intr_num];
+
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
erst_size &= ERST_SIZE_MASK;
@@ -2278,14 +2295,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int intr_num, gfp_t flags
/* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir);
- return ir;
-
-fail_ev:
- xhci_ring_free(xhci, ir->event_ring);
-fail_ir:
- kfree(ir);
-
- return NULL;
+ return 0;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
@@ -2352,8 +2362,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
+ else
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
@@ -2407,15 +2421,17 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"// Doorbell array is located at offset 0x%x from cap regs base addr",
val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
- /* Set ir_set to interrupt register set 0 */
- /* allocate and set up primary interrupter with an event ring. */
+ /* Allocate and set up primary interrupter 0 with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating primary event ring");
- xhci->interrupter = xhci_alloc_interrupter(xhci, 0, flags);
+ xhci->interrupter = xhci_alloc_interrupter(xhci, flags);
if (!xhci->interrupter)
goto fail;
+ if (xhci_add_interrupter(xhci, xhci->interrupter, 0))
+ goto fail;
+
xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 90cf40d6d0c3..51d9d4d4f6a5 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -418,12 +418,6 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
- /*
- * As of now platform drivers don't provide MSI support so we ensure
- * here that the generic code does not try to make a pci_dev from our
- * dev struct in order to setup MSI
- */
- xhci->quirks |= XHCI_PLAT;
xhci->quirks |= XHCI_MTK_HOST;
/*
* MTK host controller gives a spurious successful event after a
@@ -673,7 +667,7 @@ disable_pm:
return ret;
}
-static int xhci_mtk_remove(struct platform_device *pdev)
+static void xhci_mtk_remove(struct platform_device *pdev)
{
struct xhci_hcd_mtk *mtk = platform_get_drvdata(pdev);
struct usb_hcd *hcd = mtk->hcd;
@@ -703,8 +697,6 @@ static int xhci_mtk_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
-
- return 0;
}
static int __maybe_unused xhci_mtk_suspend(struct device *dev)
@@ -824,7 +816,7 @@ MODULE_DEVICE_TABLE(of, mtk_xhci_of_match);
static struct platform_driver mtk_xhci_driver = {
.probe = xhci_mtk_probe,
- .remove = xhci_mtk_remove,
+ .remove_new = xhci_mtk_remove,
.driver = {
.name = "xhci-mtk",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 79b3691f373f..c6742bae41c0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -108,9 +108,6 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- if (xhci->quirks & XHCI_PLAT)
- return;
-
/* return if using legacy interrupt */
if (hcd->irq > 0)
return;
@@ -208,10 +205,6 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
struct pci_dev *pdev;
int ret;
- /* The xhci platform device has set up IRQs through usb_add_hcd. */
- if (xhci->quirks & XHCI_PLAT)
- return 0;
-
pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/*
* Some Fresco Logic host controllers advertise MSI, but fail to
@@ -528,6 +521,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
+ if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) {
+ xhci->quirks |= XHCI_ZHAOXIN_HOST;
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+
+ if (pdev->device == 0x9202) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ }
+
+ if (pdev->device == 0x9203)
+ xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ }
+
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
@@ -832,7 +838,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
return ret;
}
-static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -867,7 +873,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- retval = xhci_resume(xhci, hibernated);
+ retval = xhci_resume(xhci, msg);
return retval;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index b0c8e8efc43b..b26ea7cb4357 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -78,12 +78,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
- /*
- * As of now platform drivers don't provide MSI support so we ensure
- * here that the generic code does not try to make a pci_dev from our
- * dev struct in order to setup MSI
- */
- xhci->quirks |= XHCI_PLAT | priv->quirks;
+ xhci->quirks |= priv->quirks;
}
/* called during probe() after chip reset completes */
@@ -294,10 +289,6 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
"usb-phy", 1);
if (IS_ERR(xhci->shared_hcd->usb_phy)) {
- if (PTR_ERR(xhci->shared_hcd->usb_phy) != -ENODEV)
- dev_err(sysdev, "%s get usb3phy fail (ret=%d)\n",
- __func__,
- (int)PTR_ERR(xhci->shared_hcd->usb_phy));
xhci->shared_hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(xhci->shared_hcd->usb_phy);
@@ -399,7 +390,7 @@ static int xhci_generic_plat_probe(struct platform_device *pdev)
return xhci_plat_probe(pdev, sysdev, priv_match);
}
-int xhci_plat_remove(struct platform_device *dev)
+void xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -407,8 +398,8 @@ int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
- pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
+ pm_runtime_get_sync(&dev->dev);
if (shared_hcd) {
usb_remove_hcd(shared_hcd);
@@ -430,8 +421,6 @@ int xhci_plat_remove(struct platform_device *dev)
pm_runtime_disable(&dev->dev);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_set_suspended(&dev->dev);
-
- return 0;
}
EXPORT_SYMBOL_GPL(xhci_plat_remove);
@@ -478,7 +467,7 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
if (ret)
return ret;
- ret = xhci_resume(xhci, 0);
+ ret = xhci_resume(xhci, PMSG_RESUME);
if (ret)
return ret;
@@ -507,7 +496,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- return xhci_resume(xhci, 0);
+ return xhci_resume(xhci, PMSG_AUTO_RESUME);
}
const struct dev_pm_ops xhci_plat_pm_ops = {
@@ -530,7 +519,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_generic_xhci_driver = {
.probe = xhci_generic_plat_probe,
- .remove = xhci_plat_remove,
+ .remove_new = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 83b5b5aa9f8e..2d15386f2c50 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -25,7 +25,7 @@ struct xhci_plat_priv {
int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev,
const struct xhci_plat_priv *priv_match);
-int xhci_plat_remove(struct platform_device *dev);
+void xhci_plat_remove(struct platform_device *dev);
extern const struct dev_pm_ops xhci_plat_pm_ops;
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index ad966b797b89..bf5261fed32c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -276,10 +276,10 @@ static int xhci_renesas_probe(struct platform_device *pdev)
}
static struct platform_driver usb_xhci_renesas_driver = {
- .probe = xhci_renesas_probe,
- .remove = xhci_plat_remove,
+ .probe = xhci_renesas_probe,
+ .remove_new = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
- .driver = {
+ .driver = {
.name = "xhci-renesas-hcd",
.pm = &xhci_plat_pm_ops,
.of_match_table = usb_xhci_of_match,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2bc82b3a2f98..646ff125def5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -174,12 +174,10 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
/* All other rings have link trbs */
if (!trb_is_link(ring->dequeue)) {
- if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+ if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
xhci_warn(xhci, "Missing link TRB at end of segment\n");
- } else {
+ else
ring->dequeue++;
- ring->num_trbs_free++;
- }
}
while (trb_is_link(ring->dequeue)) {
@@ -221,9 +219,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
- /* If this is not event ring, there is one less usable TRB */
- if (!trb_is_link(ring->enqueue))
- ring->num_trbs_free--;
if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
xhci_err(xhci, "Tried to move enqueue past ring segment\n");
@@ -276,45 +271,84 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
trace_xhci_inc_enq(ring);
}
-static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
- struct xhci_segment *end_seg, union xhci_trb *end,
- unsigned int num_segs)
+/*
+ * Return number of free normal TRBs from enqueue to dequeue pointer on ring.
+ * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment.
+ * Only for transfer and command rings where driver is the producer, not for
+ * event rings.
+ */
+static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
+ struct xhci_segment *enq_seg = ring->enq_seg;
+ union xhci_trb *enq = ring->enqueue;
union xhci_trb *last_on_seg;
- int num = 0;
+ unsigned int free = 0;
int i = 0;
+ /* Ring might be empty even if enq != deq if enq is left on a link trb */
+ if (trb_is_link(enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+
+ /* Empty ring, common case, don't walk the segments */
+ if (enq == ring->dequeue)
+ return ring->num_segs * (TRBS_PER_SEGMENT - 1);
+
do {
- if (start_seg == end_seg && end >= start)
- return num + (end - start);
- last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
- num += last_on_seg - start;
- start_seg = start_seg->next;
- start = start_seg->trbs;
- } while (i++ <= num_segs);
-
- return -EINVAL;
+ if (ring->deq_seg == enq_seg && ring->dequeue >= enq)
+ return free + (ring->dequeue - enq);
+ last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
+ free += last_on_seg - enq;
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ } while (i++ <= ring->num_segs);
+
+ return free;
}
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
+ * return number of new segments needed to ensure this.
*/
-static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
- unsigned int num_trbs)
+
+static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs)
{
- int num_trbs_in_deq_seg;
+ struct xhci_segment *seg;
+ int trbs_past_seg;
+ int enq_used;
+ int new_segs;
+
+ enq_used = ring->enqueue - ring->enq_seg->trbs;
+
+ /* how many trbs will be queued past the enqueue segment? */
+ trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
- if (ring->num_trbs_free < num_trbs)
+ if (trbs_past_seg <= 0)
return 0;
- if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
- num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
- if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
- return 0;
+ /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
+ if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue)
+ return 0;
+
+ new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
+ seg = ring->enq_seg;
+
+ while (new_segs > 0) {
+ seg = seg->next;
+ if (seg == ring->deq_seg) {
+ xhci_dbg(xhci, "Ring expansion by %d segments needed\n",
+ new_segs);
+ xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n",
+ num_trbs, trbs_past_seg % TRBS_PER_SEGMENT);
+ return new_segs;
+ }
+ new_segs--;
}
- return 1;
+ return 0;
}
/* Ring the host controller doorbell after placing a command on the ring */
@@ -1268,10 +1302,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
unsigned int ep_index)
{
union xhci_trb *dequeue_temp;
- int num_trbs_free_temp;
- bool revert = false;
- num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
/* If we get two back-to-back stalls, and the first stalled transfer
@@ -1287,7 +1318,6 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
- ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (trb_is_link(ep_ring->dequeue)) {
if (ep_ring->dequeue ==
@@ -1297,15 +1327,10 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
if (ep_ring->dequeue == dequeue_temp) {
- revert = true;
+ xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
break;
}
}
-
- if (revert) {
- xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
- ep_ring->num_trbs_free = num_trbs_free_temp;
- }
}
/*
@@ -2160,7 +2185,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
u32 trb_comp_code)
{
struct xhci_ep_ctx *ep_ctx;
- int trbs_freed;
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
@@ -2230,13 +2254,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
}
/* Update ring dequeue pointer */
- trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb_seg, td->last_trb,
- ep_ring->num_segs);
- if (trbs_freed < 0)
- xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
- else
- ep_ring->num_trbs_free += trbs_freed;
ep_ring->dequeue = td->last_trb;
ep_ring->deq_seg = td->last_trb_seg;
inc_deq(xhci, ep_ring);
@@ -2460,7 +2477,6 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Update ring dequeue pointer */
ep->ring->dequeue = td->last_trb;
ep->ring->deq_seg = td->last_trb_seg;
- ep->ring->num_trbs_free += td->num_trbs - 1;
inc_deq(xhci, ep->ring);
return xhci_td_cleanup(xhci, td, ep->ring, status);
@@ -3165,13 +3181,13 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
/*
* Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
- * FIXME allocate segments if the ring is full.
+ * expand ring if it start to be full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
- unsigned int num_trbs_needed;
unsigned int link_trb_count = 0;
+ unsigned int new_segs = 0;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@@ -3202,20 +3218,17 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
return -EINVAL;
}
- while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
-
- if (ep_ring == xhci->cmd_ring) {
- xhci_err(xhci, "Do not support expand command ring\n");
- return -ENOMEM;
- }
+ if (ep_ring != xhci->cmd_ring) {
+ new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
+ } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) {
+ xhci_err(xhci, "Do not support expand command ring\n");
+ return -ENOMEM;
+ }
+ if (new_segs) {
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ERROR no room on ep ring, try ring expansion");
- num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
- if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
- mem_flags)) {
+ if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
@@ -4185,7 +4198,6 @@ cleanup:
ep_ring->enqueue = urb_priv->td[0].first_trb;
ep_ring->enq_seg = urb_priv->td[0].start_seg;
ep_ring->cycle_state = start_cycle;
- ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
@@ -4267,7 +4279,6 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
}
skip_start_over:
- ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index c75d93244143..6ca8a37e53e1 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1828,6 +1828,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto remove_usb2;
}
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
err = usb_add_hcd(xhci->shared_hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add shared HCD: %d\n", err);
@@ -1909,7 +1912,7 @@ put_padctl:
return err;
}
-static int tegra_xusb_remove(struct platform_device *pdev)
+static void tegra_xusb_remove(struct platform_device *pdev)
{
struct tegra_xusb *tegra = platform_get_drvdata(pdev);
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
@@ -1939,8 +1942,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_clk_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_padctl_put(tegra->padctl);
-
- return 0;
}
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
@@ -2272,7 +2273,7 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
if (wakeup)
tegra_xhci_disable_phy_sleepwalk(tegra);
- err = xhci_resume(xhci, 0);
+ err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME);
if (err < 0) {
dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
goto disable_phy;
@@ -2650,7 +2651,7 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
static struct platform_driver tegra_xusb_driver = {
.probe = tegra_xusb_probe,
- .remove = tegra_xusb_remove,
+ .remove_new = tegra_xusb_remove,
.driver = {
.name = "tegra-xusb",
.pm = &tegra_xusb_pm_ops,
@@ -2662,7 +2663,6 @@ static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- xhci->quirks |= XHCI_PLAT;
if (tegra && tegra->soc->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
}
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 4286dba5b157..d6b32f2ad90e 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -80,20 +80,16 @@ DECLARE_EVENT_CLASS(xhci_log_ctx,
__field(dma_addr_t, ctx_dma)
__field(u8 *, ctx_va)
__field(unsigned, ctx_ep_num)
- __field(int, slot_id)
__dynamic_array(u32, ctx_data,
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
),
TP_fast_assign(
- struct usb_device *udev;
- udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
__entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
__entry->ctx_type = ctx->type;
__entry->ctx_dma = ctx->dma;
__entry->ctx_va = ctx->bytes;
- __entry->slot_id = udev->slot_id;
__entry->ctx_ep_num = ep_num;
memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
@@ -462,7 +458,6 @@ DECLARE_EVENT_CLASS(xhci_log_ring,
__field(unsigned int, num_segs)
__field(unsigned int, stream_id)
__field(unsigned int, cycle_state)
- __field(unsigned int, num_trbs_free)
__field(unsigned int, bounce_buf_len)
),
TP_fast_assign(
@@ -473,18 +468,16 @@ DECLARE_EVENT_CLASS(xhci_log_ring,
__entry->enq_seg = ring->enq_seg->dma;
__entry->deq_seg = ring->deq_seg->dma;
__entry->cycle_state = ring->cycle_state;
- __entry->num_trbs_free = ring->num_trbs_free;
__entry->bounce_buf_len = ring->bounce_buf_len;
__entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
__entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
),
- TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
+ TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d bounce %d cycle %d",
xhci_ring_type_string(__entry->type), __entry->ring,
&__entry->enq, &__entry->enq_seg,
&__entry->deq, &__entry->deq_seg,
__entry->num_segs,
__entry->stream_id,
- __entry->num_trbs_free,
__entry->bounce_buf_len,
__entry->cycle_state
)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 78790dc13c5f..fae994f679d4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -833,7 +833,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
ports = xhci->usb3_rhub.ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
- if (portsc & PORT_CHANGE_MASK ||
+ if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
(portsc & PORT_PLS_MASK) == XDEV_RESUME)
return true;
}
@@ -960,8 +960,9 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
* This is called when the machine transition from S3/S4 mode.
*
*/
-int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
{
+ bool hibernated = (msg.event == PM_EVENT_RESTORE);
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
int retval = 0;
@@ -1027,7 +1028,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = readl(&xhci->op_regs->status);
/* re-initialize the HC on Restore Error, or Host Controller Error */
- if (temp & (STS_SRE | STS_HCE)) {
+ if ((temp & (STS_SRE | STS_HCE)) &&
+ !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
reinit_xhc = true;
if (!xhci->broken_suspend)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
@@ -1116,7 +1118,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
* the first wake signalling failed, give it that chance.
*/
pending_portevent = xhci_pending_portevent(xhci);
- if (!pending_portevent) {
+ if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
@@ -4604,7 +4606,7 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
+ if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
else
timeout_ns = udev->u1_params.sel;
@@ -4668,7 +4670,7 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
+ if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
else
timeout_ns = udev->u2_params.sel;
@@ -4740,37 +4742,30 @@ static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
return 0;
}
-static int xhci_check_intel_tier_policy(struct usb_device *udev,
+static int xhci_check_tier_policy(struct xhci_hcd *xhci,
+ struct usb_device *udev,
enum usb3_link_state state)
{
- struct usb_device *parent;
- unsigned int num_hubs;
+ struct usb_device *parent = udev->parent;
+ int tier = 1; /* roothub is tier1 */
- /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
- for (parent = udev->parent, num_hubs = 0; parent->parent;
- parent = parent->parent)
- num_hubs++;
+ while (parent) {
+ parent = parent->parent;
+ tier++;
+ }
- if (num_hubs < 2)
- return 0;
+ if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
+ goto fail;
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
+ goto fail;
- dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
- " below second-tier hub.\n");
- dev_dbg(&udev->dev, "Plug device into first-tier hub "
- "to decrease power consumption.\n");
+ return 0;
+fail:
+ dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
+ tier);
return -E2BIG;
}
-static int xhci_check_tier_policy(struct xhci_hcd *xhci,
- struct usb_device *udev,
- enum usb3_link_state state)
-{
- if (xhci->quirks & XHCI_INTEL_HOST)
- return xhci_check_intel_tier_policy(udev, state);
- else
- return 0;
-}
-
/* Returns the U1 or U2 timeout that should be enabled.
* If the tier check or timeout setting functions return with a non-zero exit
* code, that means the timeout value has been finalized and we shouldn't look
@@ -5180,7 +5175,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci->quirks |= quirks;
- get_quirks(dev, xhci);
+ if (get_quirks)
+ get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
* success event after a short transfer. This quirk will ignore such
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6b690ec91ff3..7e282b4522c0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1633,8 +1633,7 @@ struct xhci_ring {
u32 cycle_state;
unsigned int stream_id;
unsigned int num_segs;
- unsigned int num_trbs_free;
- unsigned int num_trbs_free_temp;
+ unsigned int num_trbs_free; /* used only by xhci DbC */
unsigned int bounce_buf_len;
enum xhci_ring_type type;
bool last_td_was_short;
@@ -1874,7 +1873,7 @@ struct xhci_hcd {
#define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
#define XHCI_COMP_MODE_QUIRK BIT_ULL(14)
#define XHCI_AVOID_BEI BIT_ULL(15)
-#define XHCI_PLAT BIT_ULL(16)
+#define XHCI_PLAT BIT_ULL(16) /* Deprecated */
#define XHCI_SLOW_SUSPEND BIT_ULL(17)
#define XHCI_SPURIOUS_WAKEUP BIT_ULL(18)
/* For controllers with a broken beyond repair streams implementation */
@@ -1905,6 +1904,8 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
+#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2140,7 +2141,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
-int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
+int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, void *hcd);
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 65ba5aca2a4f..fe1e3985419a 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -246,11 +246,9 @@ static int isp1760_plat_probe(struct platform_device *pdev)
return 0;
}
-static int isp1760_plat_remove(struct platform_device *pdev)
+static void isp1760_plat_remove(struct platform_device *pdev)
{
isp1760_unregister(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -265,7 +263,7 @@ MODULE_DEVICE_TABLE(of, isp1760_of_match);
static struct platform_driver isp1760_plat_driver = {
.probe = isp1760_plat_probe,
- .remove = isp1760_plat_remove,
+ .remove_new = isp1760_plat_remove,
.driver = {
.name = "isp1760",
.of_match_table = of_match_ptr(isp1760_of_match),
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 12fc6eb67c3b..83f14ca1d38e 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -278,7 +278,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
return 0;
}
-static int onboard_hub_remove(struct platform_device *pdev)
+static void onboard_hub_remove(struct platform_device *pdev)
{
struct onboard_hub *hub = dev_get_drvdata(&pdev->dev);
struct usbdev_node *node;
@@ -306,7 +306,7 @@ static int onboard_hub_remove(struct platform_device *pdev)
mutex_unlock(&hub->lock);
- return onboard_hub_power_off(hub);
+ onboard_hub_power_off(hub);
}
MODULE_DEVICE_TABLE(of, onboard_hub_match);
@@ -317,7 +317,7 @@ static const struct dev_pm_ops __maybe_unused onboard_hub_pm_ops = {
static struct platform_driver onboard_hub_driver = {
.probe = onboard_hub_probe,
- .remove = onboard_hub_remove,
+ .remove_new = onboard_hub_remove,
.driver = {
.name = "onboard-usb-hub",
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index b7f13df00764..7f371ea1248c 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -22,10 +22,10 @@
#define EUD_REG_VBUS_INT_CLR 0x0080
#define EUD_REG_CSR_EUD_EN 0x1014
#define EUD_REG_SW_ATTACH_DET 0x1018
-#define EUD_REG_EUD_EN2 0x0000
+#define EUD_REG_EUD_EN2 0x0000
#define EUD_ENABLE BIT(0)
-#define EUD_INT_PET_EUD BIT(0)
+#define EUD_INT_PET_EUD BIT(0)
#define EUD_INT_VBUS BIT(2)
#define EUD_INT_SAFE_MODE BIT(4)
#define EUD_INT_ALL (EUD_INT_VBUS | EUD_INT_SAFE_MODE)
@@ -217,7 +217,7 @@ static int eud_probe(struct platform_device *pdev)
return 0;
}
-static int eud_remove(struct platform_device *pdev)
+static void eud_remove(struct platform_device *pdev)
{
struct eud_chip *chip = platform_get_drvdata(pdev);
@@ -226,8 +226,6 @@ static int eud_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
disable_irq_wake(chip->irq);
-
- return 0;
}
static const struct of_device_id eud_dt_match[] = {
@@ -238,7 +236,7 @@ MODULE_DEVICE_TABLE(of, eud_dt_match);
static struct platform_driver eud_driver = {
.probe = eud_probe,
- .remove = eud_remove,
+ .remove_new = eud_remove,
.driver = {
.name = "qcom_eud",
.dev_groups = eud_groups,
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index ce1da80d3365..e4edb486b69e 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -416,14 +416,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return dev_err_probe(dev, PTR_ERR(hub->gpio_reset),
"unable to request GPIO reset pin\n");
- if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
+ if (of_property_read_u16(np, "vendor-id", &hub->vendor_id))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
- if (of_property_read_u16_array(np, "product-id",
- &hub->product_id, 1))
+ if (of_property_read_u16(np, "product-id", &hub->product_id))
hub->product_id = data->product_id;
- if (of_property_read_u16_array(np, "device-id", &hub->device_id, 1))
+ if (of_property_read_u16(np, "device-id", &hub->device_id))
hub->device_id = USB251XB_DEF_DEVICE_ID;
hub->conf_data1 = USB251XB_DEF_CONFIG_DATA_1;
@@ -532,7 +531,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (!of_property_read_u32(np, "power-on-time-ms", &property_u32))
hub->power_on_time = min_t(u8, property_u32 / 2, 255);
- if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
+ if (of_property_read_u16(np, "language-id", &hub->lang_id))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
if (of_property_read_u8(np, "boost-up", &hub->boost_up))
@@ -746,7 +745,7 @@ static struct i2c_driver usb251xb_i2c_driver = {
.of_match_table = usb251xb_of_match,
.pm = &usb251xb_pm_ops,
},
- .probe_new = usb251xb_i2c_probe,
+ .probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
};
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index c6cfd1edaf76..72765077932c 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -335,14 +335,12 @@ static int usb3503_platform_probe(struct platform_device *pdev)
return usb3503_probe(hub);
}
-static int usb3503_platform_remove(struct platform_device *pdev)
+static void usb3503_platform_remove(struct platform_device *pdev)
{
struct usb3503 *hub;
hub = platform_get_drvdata(pdev);
clk_disable_unprepare(hub->clk);
-
- return 0;
}
static int __maybe_unused usb3503_suspend(struct usb3503 *hub)
@@ -413,7 +411,7 @@ static struct i2c_driver usb3503_i2c_driver = {
.pm = pm_ptr(&usb3503_i2c_pm_ops),
.of_match_table = of_match_ptr(usb3503_of_match),
},
- .probe_new = usb3503_i2c_probe,
+ .probe = usb3503_i2c_probe,
.remove = usb3503_i2c_remove,
.id_table = usb3503_id,
};
@@ -425,7 +423,7 @@ static struct platform_driver usb3503_platform_driver = {
.pm = pm_ptr(&usb3503_platform_pm_ops),
},
.probe = usb3503_platform_probe,
- .remove = usb3503_platform_remove,
+ .remove_new = usb3503_platform_remove,
};
static int __init usb3503_init(void)
diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c
index 6b5e77231efa..065e269ba4e3 100644
--- a/drivers/usb/misc/usb4604.c
+++ b/drivers/usb/misc/usb4604.c
@@ -154,7 +154,7 @@ static struct i2c_driver usb4604_i2c_driver = {
.pm = pm_ptr(&usb4604_i2c_pm_ops),
.of_match_table = of_match_ptr(usb4604_of_match),
},
- .probe_new = usb4604_i2c_probe,
+ .probe = usb4604_i2c_probe,
.id_table = usb4604_id,
};
module_i2c_driver(usb4604_i2c_driver);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 952c56789258..9ca9305243fe 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -213,7 +213,10 @@ static unsigned char xfer_to_pipe[4] = {
PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
};
-static struct class *mon_bin_class;
+static const struct class mon_bin_class = {
+ .name = "usbmon",
+};
+
static dev_t mon_bin_dev0;
static struct cdev mon_bin_cdev;
@@ -1360,7 +1363,7 @@ int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
if (minor >= MON_BIN_MAX_MINOR)
return 0;
- dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL,
+ dev = device_create(&mon_bin_class, ubus ? ubus->controller : NULL,
MKDEV(MAJOR(mon_bin_dev0), minor), NULL,
"usbmon%d", minor);
if (IS_ERR(dev))
@@ -1372,18 +1375,16 @@ int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
void mon_bin_del(struct mon_bus *mbus)
{
- device_destroy(mon_bin_class, mbus->classdev->devt);
+ device_destroy(&mon_bin_class, mbus->classdev->devt);
}
int __init mon_bin_init(void)
{
int rc;
- mon_bin_class = class_create("usbmon");
- if (IS_ERR(mon_bin_class)) {
- rc = PTR_ERR(mon_bin_class);
+ rc = class_register(&mon_bin_class);
+ if (rc)
goto err_class;
- }
rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
if (rc < 0)
@@ -1401,7 +1402,7 @@ int __init mon_bin_init(void)
err_add:
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
err_dev:
- class_destroy(mon_bin_class);
+ class_unregister(&mon_bin_class);
err_class:
return rc;
}
@@ -1410,5 +1411,5 @@ void mon_bin_exit(void)
{
cdev_del(&mon_bin_cdev);
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
- class_destroy(mon_bin_class);
+ class_unregister(&mon_bin_class);
}
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index f4ee14d98585..993d7525a102 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -132,7 +132,7 @@ static struct i2c_driver isp1301_driver = {
.name = DRV_NAME,
.of_match_table = isp1301_of_match,
},
- .probe_new = isp1301_probe,
+ .probe = isp1301_probe,
.remove = isp1301_remove,
.id_table = isp1301_id,
};
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 47562d49dfc1..5cac31c6029b 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -391,7 +391,7 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0)
- return ret;
+ goto err_remove_phy;
ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
IRQF_ONESHOT,
"tahvo-vbus", tu);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index fa34efabcccf..111b7ee152c4 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -762,7 +762,7 @@ probe_end_pipe_exit:
return ret;
}
-static int usbhs_remove(struct platform_device *pdev)
+static void usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
@@ -780,8 +780,6 @@ static int usbhs_remove(struct platform_device *pdev)
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
-
- return 0;
}
static __maybe_unused int usbhsc_suspend(struct device *dev)
@@ -826,7 +824,7 @@ static struct platform_driver renesas_usbhs_driver = {
.of_match_table = usbhs_of_match,
},
.probe = usbhs_probe,
- .remove = usbhs_remove,
+ .remove_new = usbhs_remove,
};
module_platform_driver(renesas_usbhs_driver);
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 0395bd5dbd3e..ae41578bd014 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -14,7 +14,9 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-static struct class *role_class;
+static const struct class role_class = {
+ .name = "usb_role",
+};
struct usb_role_switch {
struct device dev;
@@ -95,7 +97,7 @@ static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const cha
if (id && !fwnode_property_present(fwnode, id))
return NULL;
- dev = class_find_device_by_fwnode(role_class, fwnode);
+ dev = class_find_device_by_fwnode(&role_class, fwnode);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
@@ -111,7 +113,7 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode)
return NULL;
}
- dev = class_find_device_by_fwnode(role_class, parent);
+ dev = class_find_device_by_fwnode(&role_class, parent);
fwnode_handle_put(parent);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
@@ -191,7 +193,7 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
if (!fwnode)
return NULL;
- dev = class_find_device_by_fwnode(role_class, fwnode);
+ dev = class_find_device_by_fwnode(&role_class, fwnode);
if (dev)
WARN_ON(!try_module_get(dev->parent->driver->owner));
@@ -338,7 +340,7 @@ usb_role_switch_register(struct device *parent,
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
- sw->dev.class = role_class;
+ sw->dev.class = &role_class;
sw->dev.type = &usb_role_dev_type;
dev_set_drvdata(&sw->dev, desc->driver_data);
dev_set_name(&sw->dev, "%s-role-switch",
@@ -392,14 +394,13 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get_drvdata);
static int __init usb_roles_init(void)
{
- role_class = class_create("usb_role");
- return PTR_ERR_OR_ZERO(role_class);
+ return class_register(&role_class);
}
subsys_initcall(usb_roles_init);
static void __exit usb_roles_exit(void)
{
- class_destroy(role_class);
+ class_unregister(&role_class);
}
module_exit(usb_roles_exit);
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 5c96e929acea..e5c6c413a075 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -195,7 +195,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
return 0;
}
-static int intel_xhci_usb_remove(struct platform_device *pdev)
+static void intel_xhci_usb_remove(struct platform_device *pdev)
{
struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
@@ -203,8 +203,6 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
usb_role_switch_unregister(data->role_sw);
fwnode_handle_put(software_node_fwnode(&intel_xhci_usb_node));
-
- return 0;
}
static const struct platform_device_id intel_xhci_usb_table[] = {
@@ -219,7 +217,7 @@ static struct platform_driver intel_xhci_usb_driver = {
},
.id_table = intel_xhci_usb_table,
.probe = intel_xhci_usb_probe,
- .remove = intel_xhci_usb_remove,
+ .remove_new = intel_xhci_usb_remove,
};
module_platform_driver(intel_xhci_usb_driver);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 9452291f1703..67a07cc007f0 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -433,10 +433,11 @@ static int ark3116_tiocmset(struct tty_struct *tty,
return 0;
}
-static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
+static int ark3116_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
+ int ret;
/* LCR is also used for other things: protect access */
mutex_lock(&priv->hw_lock);
@@ -446,9 +447,11 @@ static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
else
priv->lcr &= ~UART_LCR_SBC;
- ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
+ ret = ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
mutex_unlock(&priv->hw_lock);
+
+ return ret;
}
static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 9331a562dac0..cf47ee4ae5d3 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -46,7 +46,7 @@ static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
-static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
+static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
static int belkin_sa_tiocmget(struct tty_struct *tty);
static int belkin_sa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -399,13 +399,19 @@ static void belkin_sa_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
+static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
+ int ret;
- if (BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0) < 0)
+ ret = BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0);
+ if (ret < 0) {
dev_err(&port->dev, "Set break_ctl %d\n", break_state);
+ return ret;
+ }
+
+ return 0;
}
static int belkin_sa_tiocmget(struct tty_struct *tty)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 6e1b87e67304..612bea504d7a 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -562,12 +562,12 @@ static void ch341_set_termios(struct tty_struct *tty,
* TCSBRKP. Due to how the simulation is implemented the duration can't be
* controlled. The duration is always about (1s / 46bd * 9bit) = 196ms.
*/
-static void ch341_simulate_break(struct tty_struct *tty, int break_state)
+static int ch341_simulate_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long now, delay;
- int r;
+ int r, r2;
if (break_state != 0) {
dev_dbg(&port->dev, "enter break state requested\n");
@@ -599,7 +599,7 @@ static void ch341_simulate_break(struct tty_struct *tty, int break_state)
*/
priv->break_end = jiffies + (11 * HZ / CH341_MIN_BPS);
- return;
+ return 0;
}
dev_dbg(&port->dev, "leave break state requested\n");
@@ -615,17 +615,22 @@ static void ch341_simulate_break(struct tty_struct *tty, int break_state)
schedule_timeout_interruptible(delay);
}
+ r = 0;
restore:
/* Restore original baud rate */
- r = ch341_set_baudrate_lcr(port->serial->dev, priv, priv->baud_rate,
- priv->lcr);
- if (r < 0)
+ r2 = ch341_set_baudrate_lcr(port->serial->dev, priv, priv->baud_rate,
+ priv->lcr);
+ if (r2 < 0) {
dev_err(&port->dev,
"restoring original baud rate of %u failed: %d\n",
- priv->baud_rate, r);
+ priv->baud_rate, r2);
+ return r2;
+ }
+
+ return r;
}
-static void ch341_break_ctl(struct tty_struct *tty, int break_state)
+static int ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
@@ -635,17 +640,17 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
uint16_t reg_contents;
uint8_t break_reg[2];
- if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK) {
- ch341_simulate_break(tty, break_state);
- return;
- }
+ if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK)
+ return ch341_simulate_break(tty, break_state);
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
ch341_break_reg, 0, break_reg, 2);
if (r) {
dev_err(&port->dev, "%s - USB control read error (%d)\n",
__func__, r);
- return;
+ if (r > 0)
+ r = -EIO;
+ return r;
}
dev_dbg(&port->dev, "%s - initial ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
@@ -663,9 +668,13 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
reg_contents = get_unaligned_le16(break_reg);
r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
ch341_break_reg, reg_contents);
- if (r < 0)
+ if (r < 0) {
dev_err(&port->dev, "%s - USB control write error (%d)\n",
__func__, r);
+ return r;
+ }
+
+ return 0;
}
static int ch341_tiocmset(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cdea1bff3b70..1e61fe043171 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -39,7 +39,7 @@ static int cp210x_tiocmget(struct tty_struct *);
static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int, unsigned int);
-static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_attach(struct usb_serial *);
static void cp210x_disconnect(struct usb_serial *);
static void cp210x_release(struct usb_serial *);
@@ -1434,18 +1434,26 @@ static int cp210x_tiocmget(struct tty_struct *tty)
return result;
}
-static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
+static int cp210x_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
u16 state;
+ if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ if (cp210x_interface_num(port->serial) == 1)
+ return -ENOTTY;
+ }
+
if (break_state == 0)
state = BREAK_OFF;
else
state = BREAK_ON;
+
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
- cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
+
+ return cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
}
#ifdef CONFIG_GPIOLIB
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 45d688e9b93f..d1dea3850576 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -217,7 +217,7 @@ static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
-static void digi_break_ctl(struct tty_struct *tty, int break_state);
+static int digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
@@ -839,7 +839,7 @@ static void digi_set_termios(struct tty_struct *tty,
}
-static void digi_break_ctl(struct tty_struct *tty, int break_state)
+static int digi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char buf[4];
@@ -848,7 +848,8 @@ static void digi_break_ctl(struct tty_struct *tty, int break_state)
buf[1] = 2; /* length */
buf[2] = break_state ? 1 : 0;
buf[3] = 0; /* pad */
- digi_write_inb_command(port, buf, 4, 0);
+
+ return digi_write_inb_command(port, buf, 4, 0);
}
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 891fb1fe69df..5f7a46bcace6 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -448,7 +448,7 @@ static void f81534a_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static void f81232_break_ctl(struct tty_struct *tty, int break_state)
+static int f81232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct f81232_private *priv = usb_get_serial_port_data(port);
@@ -467,6 +467,8 @@ static void f81232_break_ctl(struct tty_struct *tty, int break_state)
dev_err(&port->dev, "set break failed: %d\n", status);
mutex_unlock(&priv->lock);
+
+ return status;
}
static int f81232_find_clk(speed_t baudrate)
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 4083ae961be4..ef126cd3d94f 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -656,7 +656,7 @@ out_unlock:
return status;
}
-static void f81534_break_ctl(struct tty_struct *tty, int break_state)
+static int f81534_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
@@ -675,6 +675,8 @@ static void f81534_break_ctl(struct tty_struct *tty, int break_state)
dev_err(&port->dev, "set break failed: %d\n", status);
mutex_unlock(&port_priv->lcr_mutex);
+
+ return status;
}
static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 05e28a5ce42b..1bf23611be12 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2550,11 +2550,12 @@ static void ftdi_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
+static int ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
u16 value;
+ int ret;
/* break_state = -1 to turn on break, and 0 to turn off break */
/* see drivers/char/tty_io.c to see it used */
@@ -2565,19 +2566,22 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
else
value = priv->last_set_data_value;
- if (usb_control_msg(port->serial->dev,
+ ret = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
value, priv->channel,
- NULL, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT);
+ if (ret < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state (state was %d)\n",
__func__, break_state);
+ return ret;
}
dev_dbg(&port->dev, "%s break state is %d - urb is %d\n", __func__,
break_state, value);
+ return 0;
}
static bool ftdi_tx_empty(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 3a4c0febf335..abe4bbb0ac65 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1560,12 +1560,12 @@ static int edge_ioctl(struct tty_struct *tty,
* SerialBreak
* this function sends a break to the port
*****************************************************************************/
-static void edge_break(struct tty_struct *tty, int break_state)
+static int edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial);
- int status;
+ int status = 0;
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPChase) {
@@ -1597,6 +1597,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s - error sending break set/clear command.\n",
__func__);
}
+
+ return status;
}
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index bc3c24ea42c1..7a3a6e539456 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2421,7 +2421,7 @@ static int edge_tiocmget(struct tty_struct *tty)
return result;
}
-static void edge_break(struct tty_struct *tty, int break_state)
+static int edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
@@ -2430,10 +2430,15 @@ static void edge_break(struct tty_struct *tty, int break_state)
if (break_state == -1)
bv = 1; /* On */
+
status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
- if (status)
+ if (status) {
dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n",
__func__, status);
+ return status;
+ }
+
+ return 0;
}
static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 2966e0c4941e..93b17e0e05a3 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -599,7 +599,7 @@ struct keyspan_port_private {
#include "keyspan_usa67msg.h"
-static void keyspan_break_ctl(struct tty_struct *tty, int break_state)
+static int keyspan_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
@@ -611,7 +611,10 @@ static void keyspan_break_ctl(struct tty_struct *tty, int break_state)
else
p_priv->break_on = 0;
+ /* FIXME: return errors */
keyspan_send_setup(port, 0);
+
+ return 0;
}
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 6fd15cd9e1eb..0eef358b314a 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -299,7 +299,7 @@ static speed_t keyspan_pda_setbaud(struct usb_serial *serial, speed_t baud)
return baud;
}
-static void keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
+static int keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
@@ -315,9 +315,13 @@ static void keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
4, /* set break */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
value, 0, NULL, 0, 2000);
- if (result < 0)
+ if (result < 0) {
dev_dbg(&port->dev, "%s - error %d from usb_control_msg\n",
__func__, result);
+ return result;
+ }
+
+ return 0;
}
static void keyspan_pda_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d3852feb81a4..6570c8817a80 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -47,7 +47,7 @@ static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
-static void mct_u232_break_ctl(struct tty_struct *tty, int break_state);
+static int mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty);
static int mct_u232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -677,7 +677,7 @@ static void mct_u232_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
} /* mct_u232_set_termios */
-static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
+static int mct_u232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
@@ -691,7 +691,7 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
lcr |= MCT_U232_SET_BREAK;
spin_unlock_irqrestore(&priv->lock, flags);
- mct_u232_set_line_ctrl(port, lcr);
+ return mct_u232_set_line_ctrl(port, lcr);
} /* mct_u232_break_ctl */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 1d1f85fabc28..23544074eb1c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -996,7 +996,7 @@ static void mos7720_close(struct usb_serial_port *port)
mos7720_port->open = 0;
}
-static void mos7720_break(struct tty_struct *tty, int break_state)
+static int mos7720_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char data;
@@ -1007,7 +1007,7 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
- return;
+ return -ENODEV;
if (break_state == -1)
data = mos7720_port->shadowLCR | UART_LCR_SBC;
@@ -1015,8 +1015,9 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
- write_mos_reg(serial, port->port_number, MOS7720_LCR,
- mos7720_port->shadowLCR);
+
+ return write_mos_reg(serial, port->port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
}
/*
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 73370eaae0ab..8b0308d84270 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -787,7 +787,7 @@ static void mos7840_close(struct usb_serial_port *port)
* mos7840_break
* this function sends a break to the port
*****************************************************************************/
-static void mos7840_break(struct tty_struct *tty, int break_state)
+static int mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
@@ -801,8 +801,9 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
/* FIXME: no locking on shadowLCR anywhere in driver */
mos7840_port->shadowLCR = data;
dev_dbg(&port->dev, "%s mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
- mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
- mos7840_port->shadowLCR);
+
+ return mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
+ mos7840_port->shadowLCR);
}
/*****************************************************************************
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index faa0eedfe245..1f7bb3e4fcf2 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1230,7 +1230,7 @@ static void mxuport_close(struct usb_serial_port *port)
}
/* Send a break to the port. */
-static void mxuport_break_ctl(struct tty_struct *tty, int break_state)
+static int mxuport_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
@@ -1244,8 +1244,8 @@ static void mxuport_break_ctl(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s - clearing break\n", __func__);
}
- mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_BREAK,
- enable, port->port_number);
+ return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_BREAK,
+ enable, port->port_number);
}
static int mxuport_resume(struct usb_serial *serial)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fd42e3a0bd18..288a96a74266 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1151,6 +1151,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
.driver_info = RSVD(3) },
/* u-blox products */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1311) }, /* u-blox LARA-R6 01B */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1312), /* u-blox LARA-R6 01B (RMNET) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(UBLOX_VENDOR_ID, 0x1313, 0xff) }, /* u-blox LARA-R6 01B (ECM) */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
.driver_info = RSVD(4) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 8949c1891164..d93f5d584557 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -173,7 +173,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
-static void pl2303_set_break(struct usb_serial_port *port, bool enable);
+static int pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_H,
@@ -1060,7 +1060,7 @@ static int pl2303_carrier_raised(struct usb_serial_port *port)
return 0;
}
-static void pl2303_set_break(struct usb_serial_port *port, bool enable)
+static int pl2303_set_break(struct usb_serial_port *port, bool enable)
{
struct usb_serial *serial = port->serial;
u16 state;
@@ -1077,15 +1077,19 @@ static void pl2303_set_break(struct usb_serial_port *port, bool enable)
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100);
- if (result)
+ if (result) {
dev_err(&port->dev, "error sending break = %d\n", result);
+ return result;
+ }
+
+ return 0;
}
-static void pl2303_break_ctl(struct tty_struct *tty, int state)
+static int pl2303_break_ctl(struct tty_struct *tty, int state)
{
struct usb_serial_port *port = tty->driver_data;
- pl2303_set_break(port, state);
+ return pl2303_set_break(port, state);
}
static void pl2303_update_line_status(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index fee581409bf6..821f25e52ec2 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -741,7 +741,7 @@ static int qt2_tiocmset(struct tty_struct *tty,
return update_mctrl(port_priv, set, clear);
}
-static void qt2_break_ctl(struct tty_struct *tty, int break_state)
+static int qt2_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct qt2_port_private *port_priv;
@@ -754,10 +754,14 @@ static void qt2_break_ctl(struct tty_struct *tty, int break_state)
status = qt2_control_msg(port->serial->dev, QT2_BREAK_CONTROL,
val, port_priv->device_port);
- if (status < 0)
+ if (status < 0) {
dev_warn(&port->dev,
"%s - failed to send control message: %i\n", __func__,
status);
+ return status;
+ }
+
+ return 0;
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b99f78224846..0fba25abf671 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -319,7 +319,7 @@ static void ti_set_termios(struct tty_struct *tty,
static int ti_tiocmget(struct tty_struct *tty);
static int ti_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void ti_break(struct tty_struct *tty, int break_state);
+static int ti_break(struct tty_struct *tty, int break_state);
static void ti_interrupt_callback(struct urb *urb);
static void ti_bulk_in_callback(struct urb *urb);
static void ti_bulk_out_callback(struct urb *urb);
@@ -1071,7 +1071,7 @@ static int ti_tiocmset(struct tty_struct *tty,
}
-static void ti_break(struct tty_struct *tty, int break_state)
+static int ti_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
@@ -1083,8 +1083,12 @@ static void ti_break(struct tty_struct *tty, int break_state)
tport->tp_uart_base_addr + TI_UART_OFFSET_LCR,
TI_LCR_BREAK, break_state == -1 ? TI_LCR_BREAK : 0);
- if (status)
+ if (status) {
dev_dbg(&port->dev, "%s - error setting break, %d\n", __func__, status);
+ return status;
+ }
+
+ return 0;
}
static int ti_get_port_from_code(unsigned char code)
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index c47439bd90fa..46952182e04f 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -238,12 +238,13 @@ static int upd78f0730_tiocmset(struct tty_struct *tty,
return res;
}
-static void upd78f0730_break_ctl(struct tty_struct *tty, int break_state)
+static int upd78f0730_break_ctl(struct tty_struct *tty, int break_state)
{
struct upd78f0730_port_private *private;
struct usb_serial_port *port = tty->driver_data;
struct upd78f0730_set_dtr_rts request;
struct device *dev = &port->dev;
+ int res;
private = usb_get_serial_port_data(port);
@@ -258,8 +259,10 @@ static void upd78f0730_break_ctl(struct tty_struct *tty, int break_state)
request.opcode = UPD78F0730_CMD_SET_DTR_RTS;
request.params = private->line_signals;
- upd78f0730_send_ctl(port, &request, sizeof(request));
+ res = upd78f0730_send_ctl(port, &request, sizeof(request));
mutex_unlock(&private->lock);
+
+ return res;
}
static void upd78f0730_dtr_rts(struct usb_serial_port *port, int on)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f8404073558b..7b4805c1004d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -539,9 +539,9 @@ static int serial_break(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->break_ctl)
- port->serial->type->break_ctl(tty, break_state);
+ return port->serial->type->break_ctl(tty, break_state);
- return 0;
+ return -ENOTTY;
}
static int serial_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index aaf4813e4971..6934970f180d 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -47,12 +47,19 @@ MODULE_DEVICE_TABLE(usb, id_table_combined);
/* This HW really does not support a serial break, so one will be
* emulated when ever the break state is set to true.
*/
-static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
+static int usb_debug_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ int ret;
+
if (!break_state)
- return;
- usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
+ return 0;
+
+ ret = usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static void usb_debug_process_read_urb(struct urb *urb)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 7f82d40753ee..ca48e90a8e81 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -87,7 +87,7 @@ static void whiteheat_set_termios(struct tty_struct *tty,
static int whiteheat_tiocmget(struct tty_struct *tty);
static int whiteheat_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void whiteheat_break_ctl(struct tty_struct *tty, int break_state);
+static int whiteheat_break_ctl(struct tty_struct *tty, int break_state);
static struct usb_serial_driver whiteheat_fake_device = {
.driver = {
@@ -449,10 +449,11 @@ static void whiteheat_set_termios(struct tty_struct *tty,
firm_setup_port(tty);
}
-static void whiteheat_break_ctl(struct tty_struct *tty, int break_state)
+static int whiteheat_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
- firm_set_break(port, break_state);
+
+ return firm_set_break(port, break_state);
}
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index fdb0aae546c3..4ec7c5892b84 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -503,7 +503,7 @@ static void xr_dtr_rts(struct usb_serial_port *port, int on)
xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS);
}
-static void xr_break_ctl(struct tty_struct *tty, int break_state)
+static int xr_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct xr_data *data = usb_get_serial_port_data(port);
@@ -517,7 +517,7 @@ static void xr_break_ctl(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "Turning break %s\n", state == 0 ? "off" : "on");
- xr_set_reg_uart(port, type->tx_break, state);
+ return xr_set_reg_uart(port, type->tx_break, state);
}
/* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 831e7049977d..2f80c2792dbd 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -100,19 +100,6 @@ config TYPEC_STUSB160X
If you choose to build this driver as a dynamically linked module, the
module will be called stusb160x.ko.
-config TYPEC_QCOM_PMIC
- tristate "Qualcomm PMIC USB Type-C driver"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
- help
- Driver for supporting role switch over the Qualcomm PMIC. This will
- handle the USB Type-C role and orientation detection reported by the
- QCOM PMIC if the PMIC has the capability to handle USB Type-C
- detection.
-
- It will also enable the VBUS output to connected devices when a
- DFP connection is made.
-
config TYPEC_WUSB3801
tristate "Willsemi WUSB3801 Type-C port controller driver"
depends on I2C
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 4a83dad51a6c..7a368fea61bc 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tipd/
obj-$(CONFIG_TYPEC_ANX7411) += anx7411.o
obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
-obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom-pmic-typec.o
obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o
obj-$(CONFIG_TYPEC_RT1719) += rt1719.o
obj-$(CONFIG_TYPEC_WUSB3801) += wusb3801.o
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 3d5edce270a4..221604f933a4 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1584,7 +1584,7 @@ static struct i2c_driver anx7411_driver = {
.of_match_table = anx_match_table,
.pm = &anx7411_pm_ops,
},
- .probe_new = anx7411_i2c_probe,
+ .probe = anx7411_i2c_probe,
.remove = anx7411_i2c_remove,
.id_table = anx7411_id,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 349cc2030c90..faa184ae3dac 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2110,7 +2110,7 @@ typec_port_register_altmode(struct typec_port *port,
struct typec_mux *mux;
struct typec_retimer *retimer;
- mux = typec_mux_get(&port->dev, desc);
+ mux = typec_mux_get(&port->dev);
if (IS_ERR(mux))
return ERR_CAST(mux);
@@ -2274,7 +2274,7 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(ret);
}
- port->mux = typec_mux_get(&port->dev, NULL);
+ port->mux = typec_mux_get(&port->dev);
if (IS_ERR(port->mux)) {
ret = PTR_ERR(port->mux);
put_device(&port->dev);
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index 8bbeb9b1e439..fb1242e82ffd 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -292,8 +292,8 @@ static struct i2c_driver hd3ss3220_driver = {
.name = "hd3ss3220",
.of_match_table = dev_ids,
},
- .probe_new = hd3ss3220_probe,
- .remove = hd3ss3220_remove,
+ .probe = hd3ss3220_probe,
+ .remove = hd3ss3220_remove,
};
module_i2c_driver(hd3ss3220_driver);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index d9eaf9a0b0bf..80dd91938d96 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -265,60 +265,19 @@ static int mux_fwnode_match(struct device *dev, const void *fwnode)
static void *typec_mux_match(const struct fwnode_handle *fwnode,
const char *id, void *data)
{
- const struct typec_altmode_desc *desc = data;
struct device *dev;
- bool match;
- int nval;
- u16 *val;
- int ret;
- int i;
/*
- * Check has the identifier already been "consumed". If it
- * has, no need to do any extra connection identification.
+ * Device graph (OF graph) does not give any means to identify the
+ * device type or the device class of the remote port parent that @fwnode
+ * represents, so in order to identify the type or the class of @fwnode
+ * an additional device property is needed. With typec muxes the
+ * property is named "mode-switch" (@id). The value of the device
+ * property is ignored.
*/
- match = !id;
- if (match)
- goto find_mux;
-
- if (!desc) {
- /*
- * Accessory Mode muxes & muxes which explicitly specify
- * the required identifier can avoid SVID matching.
- */
- match = fwnode_property_present(fwnode, "accessory") ||
- fwnode_property_present(fwnode, id);
- if (match)
- goto find_mux;
- return NULL;
- }
-
- /* Alternate Mode muxes */
- nval = fwnode_property_count_u16(fwnode, "svid");
- if (nval <= 0)
+ if (id && !fwnode_property_present(fwnode, id))
return NULL;
- val = kcalloc(nval, sizeof(*val), GFP_KERNEL);
- if (!val)
- return ERR_PTR(-ENOMEM);
-
- ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
- if (ret < 0) {
- kfree(val);
- return ERR_PTR(ret);
- }
-
- for (i = 0; i < nval; i++) {
- match = val[i] == desc->svid;
- if (match) {
- kfree(val);
- goto find_mux;
- }
- }
- kfree(val);
- return NULL;
-
-find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
@@ -328,15 +287,13 @@ find_mux:
/**
* fwnode_typec_mux_get - Find USB Type-C Multiplexer
* @fwnode: The caller device node
- * @desc: Alt Mode description
*
* Finds a mux linked to the caller. This function is primarily meant for the
* Type-C drivers. Returns a reference to the mux on success, NULL if no
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the mux has not been enumerated yet.
*/
-struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc)
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode)
{
struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
struct typec_mux *mux;
@@ -349,7 +306,7 @@ struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
return ERR_PTR(-ENOMEM);
count = fwnode_connection_find_matches(fwnode, "mode-switch",
- (void *)desc, typec_mux_match,
+ NULL, typec_mux_match,
(void **)mux_devs,
ARRAY_SIZE(mux_devs));
if (count <= 0) {
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index c46fa4f9d3df..784b9d8107e9 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -35,4 +35,14 @@ config TYPEC_MUX_INTEL_PMC
control the USB role switch and also the multiplexer/demultiplexer
switches used with USB Type-C Alternate Modes.
+config TYPEC_MUX_NB7VPQ904M
+ tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
+ depends on I2C
+ depends on DRM || DRM=n
+ select DRM_PANEL_BRIDGE if DRM
+ select REGMAP_I2C
+ help
+ Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
+ redriver chip found on some devices with a Type-C port.
+
endmenu
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index dda67e19b58b..76196096ef41 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
+obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index d6495e533e58..e0ee1f621abb 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -25,15 +25,24 @@
#define FSA4480_DELAY_L_MIC 0x0e
#define FSA4480_DELAY_L_SENSE 0x0f
#define FSA4480_DELAY_L_AGND 0x10
+#define FSA4480_FUNCTION_ENABLE 0x12
#define FSA4480_RESET 0x1e
#define FSA4480_MAX_REGISTER 0x1f
#define FSA4480_ENABLE_DEVICE BIT(7)
#define FSA4480_ENABLE_SBU GENMASK(6, 5)
#define FSA4480_ENABLE_USB GENMASK(4, 3)
+#define FSA4480_ENABLE_SENSE BIT(2)
+#define FSA4480_ENABLE_MIC BIT(1)
+#define FSA4480_ENABLE_AGND BIT(0)
#define FSA4480_SEL_SBU_REVERSE GENMASK(6, 5)
#define FSA4480_SEL_USB GENMASK(4, 3)
+#define FSA4480_SEL_SENSE BIT(2)
+#define FSA4480_SEL_MIC BIT(1)
+#define FSA4480_SEL_AGND BIT(0)
+
+#define FSA4480_ENABLE_AUTO_JACK_DETECT BIT(0)
struct fsa4480 {
struct i2c_client *client;
@@ -46,8 +55,11 @@ struct fsa4480 {
struct regmap *regmap;
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+
u8 cur_enable;
- u8 cur_select;
};
static const struct regmap_config fsa4480_regmap_config = {
@@ -58,19 +70,45 @@ static const struct regmap_config fsa4480_regmap_config = {
.disable_locking = true,
};
-static int fsa4480_switch_set(struct typec_switch_dev *sw,
- enum typec_orientation orientation)
+static int fsa4480_set(struct fsa4480 *fsa)
{
- struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
- u8 new_sel;
-
- mutex_lock(&fsa->lock);
- new_sel = FSA4480_SEL_USB;
- if (orientation == TYPEC_ORIENTATION_REVERSE)
- new_sel |= FSA4480_SEL_SBU_REVERSE;
-
- if (new_sel == fsa->cur_select)
- goto out_unlock;
+ bool reverse = (fsa->orientation == TYPEC_ORIENTATION_REVERSE);
+ u8 enable = FSA4480_ENABLE_DEVICE;
+ u8 sel = 0;
+
+ /* USB Mode */
+ if (fsa->mode < TYPEC_STATE_MODAL ||
+ (!fsa->svid && (fsa->mode == TYPEC_MODE_USB2 ||
+ fsa->mode == TYPEC_MODE_USB3))) {
+ enable |= FSA4480_ENABLE_USB;
+ sel = FSA4480_SEL_USB;
+ } else if (fsa->svid) {
+ switch (fsa->mode) {
+ /* DP Only */
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ enable |= FSA4480_ENABLE_SBU;
+ if (reverse)
+ sel = FSA4480_SEL_SBU_REVERSE;
+ break;
+
+ /* DP + USB */
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F:
+ enable |= FSA4480_ENABLE_USB | FSA4480_ENABLE_SBU;
+ sel = FSA4480_SEL_USB;
+ if (reverse)
+ sel |= FSA4480_SEL_SBU_REVERSE;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ } else if (fsa->mode == TYPEC_MODE_AUDIO) {
+ /* Audio Accessory Mode, setup to auto Jack Detection */
+ enable |= FSA4480_ENABLE_USB | FSA4480_ENABLE_AGND;
+ } else
+ return -EOPNOTSUPP;
if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
/* Disable SBU output while re-configuring the switch */
@@ -81,48 +119,64 @@ static int fsa4480_switch_set(struct typec_switch_dev *sw,
usleep_range(35, 1000);
}
- regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, new_sel);
- fsa->cur_select = new_sel;
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, sel);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, enable);
- if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
- regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+ /* Start AUDIO JACK DETECTION to setup MIC, AGND & Sense muxes */
+ if (enable & FSA4480_ENABLE_AGND)
+ regmap_write(fsa->regmap, FSA4480_FUNCTION_ENABLE,
+ FSA4480_ENABLE_AUTO_JACK_DETECT);
+ if (enable & FSA4480_ENABLE_SBU) {
/* 15us to allow the SBU switch to turn on again */
usleep_range(15, 1000);
}
-out_unlock:
- mutex_unlock(&fsa->lock);
+ fsa->cur_enable = enable;
return 0;
}
+static int fsa4480_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
+ int ret = 0;
+
+ mutex_lock(&fsa->lock);
+
+ if (fsa->orientation != orientation) {
+ fsa->orientation = orientation;
+
+ ret = fsa4480_set(fsa);
+ }
+
+ mutex_unlock(&fsa->lock);
+
+ return ret;
+}
+
static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct fsa4480 *fsa = typec_mux_get_drvdata(mux);
- u8 new_enable;
+ int ret = 0;
mutex_lock(&fsa->lock);
- new_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
- if (state->mode >= TYPEC_DP_STATE_A)
- new_enable |= FSA4480_ENABLE_SBU;
+ if (fsa->mode != state->mode) {
+ fsa->mode = state->mode;
- if (new_enable == fsa->cur_enable)
- goto out_unlock;
+ if (state->alt)
+ fsa->svid = state->alt->svid;
+ else
+ fsa->svid = 0; // No SVID
- regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, new_enable);
- fsa->cur_enable = new_enable;
-
- if (new_enable & FSA4480_ENABLE_SBU) {
- /* 15us to allow the SBU switch to turn off */
- usleep_range(15, 1000);
+ ret = fsa4480_set(fsa);
}
-out_unlock:
mutex_unlock(&fsa->lock);
- return 0;
+ return ret;
}
static int fsa4480_probe(struct i2c_client *client)
@@ -143,8 +197,10 @@ static int fsa4480_probe(struct i2c_client *client)
if (IS_ERR(fsa->regmap))
return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+ /* Safe mode */
fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
- fsa->cur_select = FSA4480_SEL_USB;
+ fsa->mode = TYPEC_STATE_SAFE;
+ fsa->orientation = TYPEC_ORIENTATION_NONE;
/* set default settings */
regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00);
@@ -156,7 +212,7 @@ static int fsa4480_probe(struct i2c_client *client)
regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09);
- regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, fsa->cur_select);
+ regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, FSA4480_SEL_USB);
regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
sw_desc.drvdata = fsa;
@@ -206,7 +262,7 @@ static struct i2c_driver fsa4480_driver = {
.name = "fsa4480",
.of_match_table = fsa4480_of_table,
},
- .probe_new = fsa4480_probe,
+ .probe = fsa4480_probe,
.remove = fsa4480_remove,
.id_table = fsa4480_table,
};
diff --git a/drivers/usb/typec/mux/gpio-sbu-mux.c b/drivers/usb/typec/mux/gpio-sbu-mux.c
index f62516dafe8f..ad60fd4e8431 100644
--- a/drivers/usb/typec/mux/gpio-sbu-mux.c
+++ b/drivers/usb/typec/mux/gpio-sbu-mux.c
@@ -3,14 +3,11 @@
* Copyright (C) 2022 Linaro Ltd.
*/
-#include <linux/bits.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
@@ -140,7 +137,7 @@ static int gpio_sbu_mux_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_sbu_mux_remove(struct platform_device *pdev)
+static void gpio_sbu_mux_remove(struct platform_device *pdev)
{
struct gpio_sbu_mux *sbu_mux = platform_get_drvdata(pdev);
@@ -148,8 +145,6 @@ static int gpio_sbu_mux_remove(struct platform_device *pdev)
typec_mux_unregister(sbu_mux->mux);
typec_switch_unregister(sbu_mux->sw);
-
- return 0;
}
static const struct of_device_id gpio_sbu_mux_match[] = {
@@ -160,7 +155,7 @@ MODULE_DEVICE_TABLE(of, gpio_sbu_mux_match);
static struct platform_driver gpio_sbu_mux_driver = {
.probe = gpio_sbu_mux_probe,
- .remove = gpio_sbu_mux_remove,
+ .remove_new = gpio_sbu_mux_remove,
.driver = {
.name = "gpio_sbu_mux",
.of_match_table = gpio_sbu_mux_match,
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 34e4188a40ff..5e8edf3881c0 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -15,6 +15,8 @@
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_tbt.h>
+#include <linux/debugfs.h>
+#include <linux/usb.h>
#include <asm/intel_scu_ipc.h>
@@ -143,8 +145,12 @@ struct pmc_usb {
struct acpi_device *iom_adev;
void __iomem *iom_base;
u32 iom_port_status_offset;
+
+ struct dentry *dentry;
};
+static struct dentry *pmc_mux_debugfs_root;
+
static void update_port_status(struct pmc_usb_port *port)
{
u8 port_num;
@@ -639,6 +645,29 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
return 0;
}
+static int port_iom_status_show(struct seq_file *s, void *unused)
+{
+ struct pmc_usb_port *port = s->private;
+
+ update_port_status(port);
+ seq_printf(s, "0x%08x\n", port->iom_status);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(port_iom_status);
+
+static void pmc_mux_port_debugfs_init(struct pmc_usb_port *port)
+{
+ struct dentry *debugfs_dir;
+ char name[6];
+
+ snprintf(name, sizeof(name), "port%d", port->usb3_port - 1);
+
+ debugfs_dir = debugfs_create_dir(name, port->pmc->dentry);
+ debugfs_create_file("iom_status", 0400, debugfs_dir, port,
+ &port_iom_status_fops);
+}
+
static int pmc_usb_probe(struct platform_device *pdev)
{
struct fwnode_handle *fwnode = NULL;
@@ -674,6 +703,8 @@ static int pmc_usb_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pmc->dentry = debugfs_create_dir(dev_name(pmc->dev), pmc_mux_debugfs_root);
+
/*
* For every physical USB connector (USB2 and USB3 combo) there is a
* child ACPI device node under the PMC mux ACPI device object.
@@ -688,6 +719,8 @@ static int pmc_usb_probe(struct platform_device *pdev)
fwnode_handle_put(fwnode);
goto err_remove_ports;
}
+
+ pmc_mux_port_debugfs_init(&pmc->port[i]);
}
platform_set_drvdata(pdev, pmc);
@@ -703,10 +736,12 @@ err_remove_ports:
acpi_dev_put(pmc->iom_adev);
+ debugfs_remove(pmc->dentry);
+
return ret;
}
-static int pmc_usb_remove(struct platform_device *pdev)
+static void pmc_usb_remove(struct platform_device *pdev)
{
struct pmc_usb *pmc = platform_get_drvdata(pdev);
int i;
@@ -719,7 +754,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
acpi_dev_put(pmc->iom_adev);
- return 0;
+ debugfs_remove(pmc->dentry);
}
static const struct acpi_device_id pmc_usb_acpi_ids[] = {
@@ -734,10 +769,23 @@ static struct platform_driver pmc_usb_driver = {
.acpi_match_table = ACPI_PTR(pmc_usb_acpi_ids),
},
.probe = pmc_usb_probe,
- .remove = pmc_usb_remove,
+ .remove_new = pmc_usb_remove,
};
-module_platform_driver(pmc_usb_driver);
+static int __init pmc_usb_init(void)
+{
+ pmc_mux_debugfs_root = debugfs_create_dir("intel_pmc_mux", usb_debug_root);
+
+ return platform_driver_register(&pmc_usb_driver);
+}
+module_init(pmc_usb_init);
+
+static void __exit pmc_usb_exit(void)
+{
+ platform_driver_unregister(&pmc_usb_driver);
+ debugfs_remove(pmc_mux_debugfs_root);
+}
+module_exit(pmc_usb_exit);
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
new file mode 100644
index 000000000000..80e580d50129
--- /dev/null
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OnSemi NB7VPQ904M Type-C driver
+ *
+ * Copyright (C) 2023 Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
+#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#define NB7_CHNA 0
+#define NB7_CHNB 1
+#define NB7_CHNC 2
+#define NB7_CHND 3
+#define NB7_IS_CHAN_AD(channel) (channel == NB7_CHNA || channel == NB7_CHND)
+
+#define GEN_DEV_SET_REG 0x00
+
+#define GEN_DEV_SET_CHIP_EN BIT(0)
+#define GEN_DEV_SET_CHNA_EN BIT(4)
+#define GEN_DEV_SET_CHNB_EN BIT(5)
+#define GEN_DEV_SET_CHNC_EN BIT(6)
+#define GEN_DEV_SET_CHND_EN BIT(7)
+
+#define GEN_DEV_SET_OP_MODE_MASK GENMASK(3, 1)
+
+#define GEN_DEV_SET_OP_MODE_DP_CC2 0
+#define GEN_DEV_SET_OP_MODE_DP_CC1 1
+#define GEN_DEV_SET_OP_MODE_DP_4LANE 2
+#define GEN_DEV_SET_OP_MODE_USB 5
+
+#define EQ_SETTING_REG_BASE 0x01
+#define EQ_SETTING_REG(n) (EQ_SETTING_REG_BASE + (n) * 2)
+#define EQ_SETTING_MASK GENMASK(3, 1)
+
+#define OUTPUT_COMPRESSION_AND_POL_REG_BASE 0x02
+#define OUTPUT_COMPRESSION_AND_POL_REG(n) (OUTPUT_COMPRESSION_AND_POL_REG_BASE + (n) * 2)
+#define OUTPUT_COMPRESSION_MASK GENMASK(2, 1)
+
+#define FLAT_GAIN_REG_BASE 0x18
+#define FLAT_GAIN_REG(n) (FLAT_GAIN_REG_BASE + (n) * 2)
+#define FLAT_GAIN_MASK GENMASK(1, 0)
+
+#define LOSS_MATCH_REG_BASE 0x19
+#define LOSS_MATCH_REG(n) (LOSS_MATCH_REG_BASE + (n) * 2)
+#define LOSS_MATCH_MASK GENMASK(1, 0)
+
+#define AUX_CC_REG 0x09
+
+#define CHIP_VERSION_REG 0x17
+
+struct nb7vpq904m {
+ struct i2c_client *client;
+ struct gpio_desc *enable_gpio;
+ struct regulator *vcc_supply;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_retimer *retimer;
+
+ bool swap_data_lanes;
+ struct typec_switch *typec_switch;
+
+ struct drm_bridge bridge;
+
+ struct mutex lock; /* protect non-concurrent retimer & switch */
+
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+};
+
+static void nb7vpq904m_set_channel(struct nb7vpq904m *nb7, unsigned int channel, bool dp)
+{
+ u8 eq, out_comp, flat_gain, loss_match;
+
+ if (dp) {
+ eq = NB7_IS_CHAN_AD(channel) ? 0x6 : 0x4;
+ out_comp = 0x3;
+ flat_gain = NB7_IS_CHAN_AD(channel) ? 0x2 : 0x1;
+ loss_match = 0x3;
+ } else {
+ eq = 0x4;
+ out_comp = 0x3;
+ flat_gain = NB7_IS_CHAN_AD(channel) ? 0x3 : 0x1;
+ loss_match = NB7_IS_CHAN_AD(channel) ? 0x1 : 0x3;
+ }
+
+ regmap_update_bits(nb7->regmap, EQ_SETTING_REG(channel),
+ EQ_SETTING_MASK, FIELD_PREP(EQ_SETTING_MASK, eq));
+ regmap_update_bits(nb7->regmap, OUTPUT_COMPRESSION_AND_POL_REG(channel),
+ OUTPUT_COMPRESSION_MASK, FIELD_PREP(OUTPUT_COMPRESSION_MASK, out_comp));
+ regmap_update_bits(nb7->regmap, FLAT_GAIN_REG(channel),
+ FLAT_GAIN_MASK, FIELD_PREP(FLAT_GAIN_MASK, flat_gain));
+ regmap_update_bits(nb7->regmap, LOSS_MATCH_REG(channel),
+ LOSS_MATCH_MASK, FIELD_PREP(LOSS_MATCH_MASK, loss_match));
+}
+
+static int nb7vpq904m_set(struct nb7vpq904m *nb7)
+{
+ bool reverse = (nb7->orientation == TYPEC_ORIENTATION_REVERSE);
+
+ switch (nb7->mode) {
+ case TYPEC_STATE_SAFE:
+ regmap_write(nb7->regmap, GEN_DEV_SET_REG,
+ GEN_DEV_SET_CHIP_EN |
+ GEN_DEV_SET_CHNA_EN |
+ GEN_DEV_SET_CHNB_EN |
+ GEN_DEV_SET_CHNC_EN |
+ GEN_DEV_SET_CHND_EN |
+ FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
+ GEN_DEV_SET_OP_MODE_USB));
+ nb7vpq904m_set_channel(nb7, NB7_CHNA, false);
+ nb7vpq904m_set_channel(nb7, NB7_CHNB, false);
+ nb7vpq904m_set_channel(nb7, NB7_CHNC, false);
+ nb7vpq904m_set_channel(nb7, NB7_CHND, false);
+ regmap_write(nb7->regmap, AUX_CC_REG, 0x2);
+
+ return 0;
+
+ case TYPEC_STATE_USB:
+ /*
+ * Normal Orientation (CC1)
+ * A -> USB RX
+ * B -> USB TX
+ * C -> X
+ * D -> X
+ * Flipped Orientation (CC2)
+ * A -> X
+ * B -> X
+ * C -> USB TX
+ * D -> USB RX
+ *
+ * Reversed if data lanes are swapped
+ */
+ if (reverse ^ nb7->swap_data_lanes) {
+ regmap_write(nb7->regmap, GEN_DEV_SET_REG,
+ GEN_DEV_SET_CHIP_EN |
+ GEN_DEV_SET_CHNA_EN |
+ GEN_DEV_SET_CHNB_EN |
+ FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
+ GEN_DEV_SET_OP_MODE_USB));
+ nb7vpq904m_set_channel(nb7, NB7_CHNA, false);
+ nb7vpq904m_set_channel(nb7, NB7_CHNB, false);
+ } else {
+ regmap_write(nb7->regmap, GEN_DEV_SET_REG,
+ GEN_DEV_SET_CHIP_EN |
+ GEN_DEV_SET_CHNC_EN |
+ GEN_DEV_SET_CHND_EN |
+ FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
+ GEN_DEV_SET_OP_MODE_USB));
+ nb7vpq904m_set_channel(nb7, NB7_CHNC, false);
+ nb7vpq904m_set_channel(nb7, NB7_CHND, false);
+ }
+ regmap_write(nb7->regmap, AUX_CC_REG, 0x2);
+
+ return 0;
+
+ default:
+ if (nb7->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ break;
+ }
+
+ /* DP Altmode Setup */
+
+ regmap_write(nb7->regmap, AUX_CC_REG, reverse ? 0x1 : 0x0);
+
+ switch (nb7->mode) {
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ /*
+ * Normal Orientation (CC1)
+ * A -> DP3
+ * B -> DP2
+ * C -> DP1
+ * D -> DP0
+ * Flipped Orientation (CC2)
+ * A -> DP0
+ * B -> DP1
+ * C -> DP2
+ * D -> DP3
+ */
+ regmap_write(nb7->regmap, GEN_DEV_SET_REG,
+ GEN_DEV_SET_CHIP_EN |
+ GEN_DEV_SET_CHNA_EN |
+ GEN_DEV_SET_CHNB_EN |
+ GEN_DEV_SET_CHNC_EN |
+ GEN_DEV_SET_CHND_EN |
+ FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
+ GEN_DEV_SET_OP_MODE_DP_4LANE));
+ nb7vpq904m_set_channel(nb7, NB7_CHNA, true);
+ nb7vpq904m_set_channel(nb7, NB7_CHNB, true);
+ nb7vpq904m_set_channel(nb7, NB7_CHNC, true);
+ nb7vpq904m_set_channel(nb7, NB7_CHND, true);
+ break;
+
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F:
+ regmap_write(nb7->regmap, GEN_DEV_SET_REG,
+ GEN_DEV_SET_CHIP_EN |
+ GEN_DEV_SET_CHNA_EN |
+ GEN_DEV_SET_CHNB_EN |
+ GEN_DEV_SET_CHNC_EN |
+ GEN_DEV_SET_CHND_EN |
+ FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
+ reverse ^ nb7->swap_data_lanes ?
+ GEN_DEV_SET_OP_MODE_DP_CC2
+ : GEN_DEV_SET_OP_MODE_DP_CC1));
+
+ /*
+ * Normal Orientation (CC1)
+ * A -> USB RX
+ * B -> USB TX
+ * C -> DP1
+ * D -> DP0
+ * Flipped Orientation (CC2)
+ * A -> DP0
+ * B -> DP1
+ * C -> USB TX
+ * D -> USB RX
+ *
+ * Reversed if data lanes are swapped
+ */
+ if (nb7->swap_data_lanes) {
+ nb7vpq904m_set_channel(nb7, NB7_CHNA, !reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHNB, !reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHNC, reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHND, reverse);
+ } else {
+ nb7vpq904m_set_channel(nb7, NB7_CHNA, reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHNB, reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHNC, !reverse);
+ nb7vpq904m_set_channel(nb7, NB7_CHND, !reverse);
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nb7vpq904m_sw_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
+{
+ struct nb7vpq904m *nb7 = typec_switch_get_drvdata(sw);
+ int ret;
+
+ ret = typec_switch_set(nb7->typec_switch, orientation);
+ if (ret)
+ return ret;
+
+ mutex_lock(&nb7->lock);
+
+ if (nb7->orientation != orientation) {
+ nb7->orientation = orientation;
+
+ ret = nb7vpq904m_set(nb7);
+ }
+
+ mutex_unlock(&nb7->lock);
+
+ return ret;
+}
+
+static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ struct nb7vpq904m *nb7 = typec_retimer_get_drvdata(retimer);
+ int ret = 0;
+
+ mutex_lock(&nb7->lock);
+
+ if (nb7->mode != state->mode) {
+ nb7->mode = state->mode;
+
+ if (state->alt)
+ nb7->svid = state->alt->svid;
+ else
+ nb7->svid = 0; // No SVID
+
+ ret = nb7vpq904m_set(nb7);
+ }
+
+ mutex_unlock(&nb7->lock);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
+ struct drm_bridge *next_bridge;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
+ return PTR_ERR(next_bridge);
+ }
+
+ return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
+ .attach = nb7vpq904m_bridge_attach,
+};
+
+static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
+{
+ nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
+ nb7->bridge.of_node = nb7->client->dev.of_node;
+
+ return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
+}
+#else
+static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
+{
+ return 0;
+}
+#endif
+
+static const struct regmap_config nb7_regmap = {
+ .max_register = 0x1f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+enum {
+ NORMAL_LANE_MAPPING,
+ INVERT_LANE_MAPPING,
+};
+
+#define DATA_LANES_COUNT 4
+
+static const int supported_data_lane_mapping[][DATA_LANES_COUNT] = {
+ [NORMAL_LANE_MAPPING] = { 0, 1, 2, 3 },
+ [INVERT_LANE_MAPPING] = { 3, 2, 1, 0 },
+};
+
+static int nb7vpq904m_parse_data_lanes_mapping(struct nb7vpq904m *nb7)
+{
+ struct device_node *ep;
+ u32 data_lanes[4];
+ int ret, i, j;
+
+ ep = of_graph_get_endpoint_by_regs(nb7->client->dev.of_node, 1, 0);
+
+ if (ep) {
+ ret = of_property_count_u32_elems(ep, "data-lanes");
+ if (ret == -EINVAL)
+ /* Property isn't here, consider default mapping */
+ goto out_done;
+ if (ret < 0)
+ goto out_error;
+
+ if (ret != DATA_LANES_COUNT) {
+ dev_err(&nb7->client->dev, "expected 4 data lanes\n");
+ ret = -EINVAL;
+ goto out_error;
+ }
+
+ ret = of_property_read_u32_array(ep, "data-lanes", data_lanes, DATA_LANES_COUNT);
+ if (ret)
+ goto out_error;
+
+ for (i = 0; i < ARRAY_SIZE(supported_data_lane_mapping); i++) {
+ for (j = 0; j < DATA_LANES_COUNT; j++) {
+ if (data_lanes[j] != supported_data_lane_mapping[i][j])
+ break;
+ }
+
+ if (j == DATA_LANES_COUNT)
+ break;
+ }
+
+ switch (i) {
+ case NORMAL_LANE_MAPPING:
+ break;
+ case INVERT_LANE_MAPPING:
+ nb7->swap_data_lanes = true;
+ dev_info(&nb7->client->dev, "using inverted data lanes mapping\n");
+ break;
+ default:
+ dev_err(&nb7->client->dev, "invalid data lanes mapping\n");
+ ret = -EINVAL;
+ goto out_error;
+ }
+ }
+
+out_done:
+ ret = 0;
+
+out_error:
+ of_node_put(ep);
+
+ return ret;
+}
+
+static int nb7vpq904m_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_retimer_desc retimer_desc = { };
+ struct nb7vpq904m *nb7;
+ int ret;
+
+ nb7 = devm_kzalloc(dev, sizeof(*nb7), GFP_KERNEL);
+ if (!nb7)
+ return -ENOMEM;
+
+ nb7->client = client;
+
+ nb7->regmap = devm_regmap_init_i2c(client, &nb7_regmap);
+ if (IS_ERR(nb7->regmap)) {
+ dev_err(&client->dev, "Failed to allocate register map\n");
+ return PTR_ERR(nb7->regmap);
+ }
+
+ nb7->mode = TYPEC_STATE_SAFE;
+ nb7->orientation = TYPEC_ORIENTATION_NONE;
+
+ mutex_init(&nb7->lock);
+
+ nb7->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(nb7->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(nb7->enable_gpio),
+ "unable to acquire enable gpio\n");
+
+ nb7->vcc_supply = devm_regulator_get_optional(dev, "vcc");
+ if (IS_ERR(nb7->vcc_supply))
+ return PTR_ERR(nb7->vcc_supply);
+
+ nb7->typec_switch = fwnode_typec_switch_get(dev->fwnode);
+ if (IS_ERR(nb7->typec_switch))
+ return dev_err_probe(dev, PTR_ERR(nb7->typec_switch),
+ "failed to acquire orientation-switch\n");
+
+ ret = nb7vpq904m_parse_data_lanes_mapping(nb7);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(nb7->vcc_supply);
+ if (ret)
+ dev_warn(dev, "Failed to enable vcc: %d\n", ret);
+
+ gpiod_set_value(nb7->enable_gpio, 1);
+
+ ret = nb7vpq904m_register_bridge(nb7);
+ if (ret)
+ return ret;
+
+ sw_desc.drvdata = nb7;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = nb7vpq904m_sw_set;
+
+ nb7->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(nb7->sw))
+ return dev_err_probe(dev, PTR_ERR(nb7->sw),
+ "Error registering typec switch\n");
+
+ retimer_desc.drvdata = nb7;
+ retimer_desc.fwnode = dev->fwnode;
+ retimer_desc.set = nb7vpq904m_retimer_set;
+
+ nb7->retimer = typec_retimer_register(dev, &retimer_desc);
+ if (IS_ERR(nb7->retimer)) {
+ typec_switch_unregister(nb7->sw);
+ return dev_err_probe(dev, PTR_ERR(nb7->retimer),
+ "Error registering typec retimer\n");
+ }
+
+ return 0;
+}
+
+static void nb7vpq904m_remove(struct i2c_client *client)
+{
+ struct nb7vpq904m *nb7 = i2c_get_clientdata(client);
+
+ typec_retimer_unregister(nb7->retimer);
+ typec_switch_unregister(nb7->sw);
+
+ gpiod_set_value(nb7->enable_gpio, 0);
+
+ regulator_disable(nb7->vcc_supply);
+}
+
+static const struct i2c_device_id nb7vpq904m_table[] = {
+ { "nb7vpq904m" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nb7vpq904m_table);
+
+static const struct of_device_id nb7vpq904m_of_table[] = {
+ { .compatible = "onnn,nb7vpq904m" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nb7vpq904m_of_table);
+
+static struct i2c_driver nb7vpq904m_driver = {
+ .driver = {
+ .name = "nb7vpq904m",
+ .of_match_table = nb7vpq904m_of_table,
+ },
+ .probe_new = nb7vpq904m_probe,
+ .remove = nb7vpq904m_remove,
+ .id_table = nb7vpq904m_table,
+};
+
+module_i2c_driver(nb7vpq904m_driver);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("OnSemi NB7VPQ904M Type-C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 1cd388b55c30..8eeec135dcdb 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -178,7 +178,7 @@ static struct i2c_driver pi3usb30532_driver = {
.driver = {
.name = "pi3usb30532",
},
- .probe_new = pi3usb30532_probe,
+ .probe = pi3usb30532_probe,
.remove = pi3usb30532_remove,
.id_table = pi3usb30532_table,
};
diff --git a/drivers/usb/typec/qcom-pmic-typec.c b/drivers/usb/typec/qcom-pmic-typec.c
deleted file mode 100644
index 432ea62f1bab..000000000000
--- a/drivers/usb/typec/qcom-pmic-typec.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/usb/role.h>
-#include <linux/usb/typec_mux.h>
-
-#define TYPEC_MISC_STATUS 0xb
-#define CC_ATTACHED BIT(0)
-#define CC_ORIENTATION BIT(1)
-#define SNK_SRC_MODE BIT(6)
-#define TYPEC_MODE_CFG 0x44
-#define TYPEC_DISABLE_CMD BIT(0)
-#define EN_SNK_ONLY BIT(1)
-#define EN_SRC_ONLY BIT(2)
-#define TYPEC_VCONN_CONTROL 0x46
-#define VCONN_EN_SRC BIT(0)
-#define VCONN_EN_VAL BIT(1)
-#define TYPEC_EXIT_STATE_CFG 0x50
-#define SEL_SRC_UPPER_REF BIT(2)
-#define TYPEC_INTR_EN_CFG_1 0x5e
-#define TYPEC_INTR_EN_CFG_1_MASK GENMASK(7, 0)
-
-struct qcom_pmic_typec {
- struct device *dev;
- struct regmap *regmap;
- u32 base;
-
- struct typec_port *port;
- struct usb_role_switch *role_sw;
-
- struct regulator *vbus_reg;
- bool vbus_enabled;
-};
-
-static void qcom_pmic_typec_enable_vbus_regulator(struct qcom_pmic_typec
- *qcom_usb, bool enable)
-{
- int ret;
-
- if (enable == qcom_usb->vbus_enabled)
- return;
-
- if (enable) {
- ret = regulator_enable(qcom_usb->vbus_reg);
- if (ret)
- return;
- } else {
- ret = regulator_disable(qcom_usb->vbus_reg);
- if (ret)
- return;
- }
- qcom_usb->vbus_enabled = enable;
-}
-
-static void qcom_pmic_typec_check_connection(struct qcom_pmic_typec *qcom_usb)
-{
- enum typec_orientation orientation;
- enum usb_role role;
- unsigned int stat;
- bool enable_vbus;
-
- regmap_read(qcom_usb->regmap, qcom_usb->base + TYPEC_MISC_STATUS,
- &stat);
-
- if (stat & CC_ATTACHED) {
- orientation = (stat & CC_ORIENTATION) ?
- TYPEC_ORIENTATION_REVERSE :
- TYPEC_ORIENTATION_NORMAL;
- typec_set_orientation(qcom_usb->port, orientation);
-
- role = (stat & SNK_SRC_MODE) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
- if (role == USB_ROLE_HOST)
- enable_vbus = true;
- else
- enable_vbus = false;
- } else {
- role = USB_ROLE_NONE;
- enable_vbus = false;
- }
-
- qcom_pmic_typec_enable_vbus_regulator(qcom_usb, enable_vbus);
- usb_role_switch_set_role(qcom_usb->role_sw, role);
-}
-
-static irqreturn_t qcom_pmic_typec_interrupt(int irq, void *_qcom_usb)
-{
- struct qcom_pmic_typec *qcom_usb = _qcom_usb;
-
- qcom_pmic_typec_check_connection(qcom_usb);
- return IRQ_HANDLED;
-}
-
-static void qcom_pmic_typec_typec_hw_init(struct qcom_pmic_typec *qcom_usb,
- enum typec_port_type type)
-{
- u8 mode = 0;
-
- regmap_update_bits(qcom_usb->regmap,
- qcom_usb->base + TYPEC_INTR_EN_CFG_1,
- TYPEC_INTR_EN_CFG_1_MASK, 0);
-
- if (type == TYPEC_PORT_SRC)
- mode = EN_SRC_ONLY;
- else if (type == TYPEC_PORT_SNK)
- mode = EN_SNK_ONLY;
-
- regmap_update_bits(qcom_usb->regmap, qcom_usb->base + TYPEC_MODE_CFG,
- EN_SNK_ONLY | EN_SRC_ONLY, mode);
-
- regmap_update_bits(qcom_usb->regmap,
- qcom_usb->base + TYPEC_VCONN_CONTROL,
- VCONN_EN_SRC | VCONN_EN_VAL, VCONN_EN_SRC);
- regmap_update_bits(qcom_usb->regmap,
- qcom_usb->base + TYPEC_EXIT_STATE_CFG,
- SEL_SRC_UPPER_REF, SEL_SRC_UPPER_REF);
-}
-
-static int qcom_pmic_typec_probe(struct platform_device *pdev)
-{
- struct qcom_pmic_typec *qcom_usb;
- struct device *dev = &pdev->dev;
- struct fwnode_handle *fwnode;
- struct typec_capability cap;
- const char *buf;
- int ret, irq, role;
- u32 reg;
-
- ret = device_property_read_u32(dev, "reg", &reg);
- if (ret < 0) {
- dev_err(dev, "missing base address\n");
- return ret;
- }
-
- qcom_usb = devm_kzalloc(dev, sizeof(*qcom_usb), GFP_KERNEL);
- if (!qcom_usb)
- return -ENOMEM;
-
- qcom_usb->dev = dev;
- qcom_usb->base = reg;
-
- qcom_usb->regmap = dev_get_regmap(dev->parent, NULL);
- if (!qcom_usb->regmap) {
- dev_err(dev, "Failed to get regmap\n");
- return -EINVAL;
- }
-
- qcom_usb->vbus_reg = devm_regulator_get(qcom_usb->dev, "usb_vbus");
- if (IS_ERR(qcom_usb->vbus_reg))
- return PTR_ERR(qcom_usb->vbus_reg);
-
- fwnode = device_get_named_child_node(dev, "connector");
- if (!fwnode)
- return -EINVAL;
-
- ret = fwnode_property_read_string(fwnode, "power-role", &buf);
- if (!ret) {
- role = typec_find_port_power_role(buf);
- if (role < 0)
- role = TYPEC_PORT_SNK;
- } else {
- role = TYPEC_PORT_SNK;
- }
- cap.type = role;
-
- ret = fwnode_property_read_string(fwnode, "data-role", &buf);
- if (!ret) {
- role = typec_find_port_data_role(buf);
- if (role < 0)
- role = TYPEC_PORT_UFP;
- } else {
- role = TYPEC_PORT_UFP;
- }
- cap.data = role;
-
- cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- cap.fwnode = fwnode;
- qcom_usb->port = typec_register_port(dev, &cap);
- if (IS_ERR(qcom_usb->port)) {
- ret = PTR_ERR(qcom_usb->port);
- dev_err(dev, "Failed to register type c port %d\n", ret);
- goto err_put_node;
- }
- fwnode_handle_put(fwnode);
-
- qcom_usb->role_sw = fwnode_usb_role_switch_get(dev_fwnode(qcom_usb->dev));
- if (IS_ERR(qcom_usb->role_sw)) {
- ret = dev_err_probe(dev, PTR_ERR(qcom_usb->role_sw),
- "failed to get role switch\n");
- goto err_typec_port;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- goto err_usb_role_sw;
-
- ret = devm_request_threaded_irq(qcom_usb->dev, irq, NULL,
- qcom_pmic_typec_interrupt, IRQF_ONESHOT,
- "qcom-pmic-typec", qcom_usb);
- if (ret) {
- dev_err(&pdev->dev, "Could not request IRQ\n");
- goto err_usb_role_sw;
- }
-
- platform_set_drvdata(pdev, qcom_usb);
- qcom_pmic_typec_typec_hw_init(qcom_usb, cap.type);
- qcom_pmic_typec_check_connection(qcom_usb);
-
- return 0;
-
-err_usb_role_sw:
- usb_role_switch_put(qcom_usb->role_sw);
-err_typec_port:
- typec_unregister_port(qcom_usb->port);
-err_put_node:
- fwnode_handle_put(fwnode);
-
- return ret;
-}
-
-static int qcom_pmic_typec_remove(struct platform_device *pdev)
-{
- struct qcom_pmic_typec *qcom_usb = platform_get_drvdata(pdev);
-
- usb_role_switch_set_role(qcom_usb->role_sw, USB_ROLE_NONE);
- qcom_pmic_typec_enable_vbus_regulator(qcom_usb, 0);
-
- typec_unregister_port(qcom_usb->port);
- usb_role_switch_put(qcom_usb->role_sw);
-
- return 0;
-}
-
-static const struct of_device_id qcom_pmic_typec_table[] = {
- { .compatible = "qcom,pm8150b-usb-typec" },
- { }
-};
-MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
-
-static struct platform_driver qcom_pmic_typec = {
- .driver = {
- .name = "qcom,pmic-typec",
- .of_match_table = qcom_pmic_typec_table,
- },
- .probe = qcom_pmic_typec_probe,
- .remove = qcom_pmic_typec_remove,
-};
-module_platform_driver(qcom_pmic_typec);
-
-MODULE_DESCRIPTION("QCOM PMIC USB type C driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/rt1719.c b/drivers/usb/typec/rt1719.c
index ea8b700b0ceb..be02d420920e 100644
--- a/drivers/usb/typec/rt1719.c
+++ b/drivers/usb/typec/rt1719.c
@@ -949,7 +949,7 @@ static struct i2c_driver rt1719_driver = {
.name = "rt1719",
.of_match_table = rt1719_device_table,
},
- .probe_new = rt1719_probe,
+ .probe = rt1719_probe,
.remove = rt1719_remove,
};
module_i2c_driver(rt1719_driver);
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index 494b371151e0..3ab118df1bd4 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -870,7 +870,7 @@ static struct i2c_driver stusb160x_driver = {
.pm = &stusb160x_pm_ops,
.of_match_table = stusb160x_of_match,
},
- .probe_new = stusb160x_probe,
+ .probe = stusb160x_probe,
.remove = stusb160x_remove,
};
module_i2c_driver(stusb160x_driver);
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index e6b88ca4a4b9..5d393f520fc2 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -76,4 +76,15 @@ config TYPEC_WCOVE
To compile this driver as module, choose M here: the module will be
called typec_wcove.ko
+config TYPEC_QCOM_PMIC
+ tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ A Type-C port and Power Delivery driver which aggregates two
+ discrete pieces of silicon in the PM8150b PMIC block: the
+ Type-C port controller and the Power Delivery PHY.
+
+ This driver enables Type-C role switching, orientation, Alternate
+ mode and Power Delivery support both for VBUS and VCONN.
+
endif # TYPEC_TCPM
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index 08e57bb499cb..7a8cad0c0bdb 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
obj-$(CONFIG_TYPEC_TCPCI_MT6370) += tcpci_mt6370.o
obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
tcpci_maxim-y += tcpci_maxim_core.o maxim_contaminant.o
+obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom/
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 62ba53357612..7fc1ffa14f76 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1836,7 +1836,7 @@ static struct i2c_driver fusb302_driver = {
.pm = &fusb302_pm_ops,
.of_match_table = of_match_ptr(fusb302_dt_match),
},
- .probe_new = fusb302_probe,
+ .probe = fusb302_probe,
.remove = fusb302_remove,
.id_table = fusb302_i2c_device_id,
};
diff --git a/drivers/usb/typec/tcpm/qcom/Makefile b/drivers/usb/typec/tcpm/qcom/Makefile
new file mode 100644
index 000000000000..dc1e8832e197
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom_pmic_tcpm.o
+qcom_pmic_tcpm-y += qcom_pmic_typec.o \
+ qcom_pmic_typec_port.o \
+ qcom_pmic_typec_pdphy.o
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
new file mode 100644
index 000000000000..a905160dd860
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/role.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec_mux.h>
+#include "qcom_pmic_typec_pdphy.h"
+#include "qcom_pmic_typec_port.h"
+
+struct pmic_typec_resources {
+ struct pmic_typec_pdphy_resources *pdphy_res;
+ struct pmic_typec_port_resources *port_res;
+};
+
+struct pmic_typec {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct tcpc_dev tcpc;
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
+ struct pmic_typec_port *pmic_typec_port;
+ bool vbus_enabled;
+ struct mutex lock; /* VBUS state serialization */
+};
+
+#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
+
+static int qcom_pmic_typec_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ int ret;
+
+ mutex_lock(&tcpm->lock);
+ ret = tcpm->vbus_enabled || qcom_pmic_typec_port_get_vbus(tcpm->pmic_typec_port);
+ mutex_unlock(&tcpm->lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ int ret = 0;
+
+ mutex_lock(&tcpm->lock);
+ if (tcpm->vbus_enabled == on)
+ goto done;
+
+ ret = qcom_pmic_typec_port_set_vbus(tcpm->pmic_typec_port, on);
+ if (ret)
+ goto done;
+
+ tcpm->vbus_enabled = on;
+ tcpm_vbus_change(tcpm->tcpm_port);
+
+done:
+ dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
+ mutex_unlock(&tcpm->lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_set_vconn(struct tcpc_dev *tcpc, bool on)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_port_set_vconn(tcpm->pmic_typec_port, on);
+}
+
+static int qcom_pmic_typec_get_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_port_get_cc(tcpm->pmic_typec_port, cc1, cc2);
+}
+
+static int qcom_pmic_typec_set_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_port_set_cc(tcpm->pmic_typec_port, cc);
+}
+
+static int qcom_pmic_typec_set_polarity(struct tcpc_dev *tcpc,
+ enum typec_cc_polarity pol)
+{
+ /* Polarity is set separately by phy-qcom-qmp.c */
+ return 0;
+}
+
+static int qcom_pmic_typec_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_port_start_toggling(tcpm->pmic_typec_port,
+ port_type, cc);
+}
+
+static int qcom_pmic_typec_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_pdphy_set_roles(tcpm->pmic_typec_pdphy,
+ data_role, power_role);
+}
+
+static int qcom_pmic_typec_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_pdphy_set_pd_rx(tcpm->pmic_typec_pdphy, on);
+}
+
+static int qcom_pmic_typec_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+
+ return qcom_pmic_typec_pdphy_pd_transmit(tcpm->pmic_typec_pdphy, type,
+ msg, negotiated_rev);
+}
+
+static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
+{
+ return 0;
+}
+
+static int qcom_pmic_typec_probe(struct platform_device *pdev)
+{
+ struct pmic_typec *tcpm;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct pmic_typec_resources *res;
+ struct regmap *regmap;
+ u32 base[2];
+ int ret;
+
+ res = of_device_get_match_data(dev);
+ if (!res)
+ return -ENODEV;
+
+ tcpm = devm_kzalloc(dev, sizeof(*tcpm), GFP_KERNEL);
+ if (!tcpm)
+ return -ENOMEM;
+
+ tcpm->dev = dev;
+ tcpm->tcpc.init = qcom_pmic_typec_init;
+ tcpm->tcpc.get_vbus = qcom_pmic_typec_get_vbus;
+ tcpm->tcpc.set_vbus = qcom_pmic_typec_set_vbus;
+ tcpm->tcpc.set_cc = qcom_pmic_typec_set_cc;
+ tcpm->tcpc.get_cc = qcom_pmic_typec_get_cc;
+ tcpm->tcpc.set_polarity = qcom_pmic_typec_set_polarity;
+ tcpm->tcpc.set_vconn = qcom_pmic_typec_set_vconn;
+ tcpm->tcpc.start_toggling = qcom_pmic_typec_start_toggling;
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pd_transmit;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap) {
+ dev_err(dev, "Failed to get regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32_array(np, "reg", base, 2);
+ if (ret)
+ return ret;
+
+ tcpm->pmic_typec_port = qcom_pmic_typec_port_alloc(dev);
+ if (IS_ERR(tcpm->pmic_typec_port))
+ return PTR_ERR(tcpm->pmic_typec_port);
+
+ tcpm->pmic_typec_pdphy = qcom_pmic_typec_pdphy_alloc(dev);
+ if (IS_ERR(tcpm->pmic_typec_pdphy))
+ return PTR_ERR(tcpm->pmic_typec_pdphy);
+
+ ret = qcom_pmic_typec_port_probe(pdev, tcpm->pmic_typec_port,
+ res->port_res, regmap, base[0]);
+ if (ret)
+ return ret;
+
+ ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm->pmic_typec_pdphy,
+ res->pdphy_res, regmap, base[1]);
+ if (ret)
+ return ret;
+
+ mutex_init(&tcpm->lock);
+ platform_set_drvdata(pdev, tcpm);
+
+ tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
+ if (IS_ERR(tcpm->tcpc.fwnode))
+ return PTR_ERR(tcpm->tcpc.fwnode);
+
+ tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
+ if (IS_ERR(tcpm->tcpm_port)) {
+ ret = PTR_ERR(tcpm->tcpm_port);
+ goto fwnode_remove;
+ }
+
+ ret = qcom_pmic_typec_port_start(tcpm->pmic_typec_port,
+ tcpm->tcpm_port);
+ if (ret)
+ goto fwnode_remove;
+
+ ret = qcom_pmic_typec_pdphy_start(tcpm->pmic_typec_pdphy,
+ tcpm->tcpm_port);
+ if (ret)
+ goto fwnode_remove;
+
+ return 0;
+
+fwnode_remove:
+ fwnode_remove_software_node(tcpm->tcpc.fwnode);
+
+ return ret;
+}
+
+static void qcom_pmic_typec_remove(struct platform_device *pdev)
+{
+ struct pmic_typec *tcpm = platform_get_drvdata(pdev);
+
+ qcom_pmic_typec_pdphy_stop(tcpm->pmic_typec_pdphy);
+ qcom_pmic_typec_port_stop(tcpm->pmic_typec_port);
+ tcpm_unregister_port(tcpm->tcpm_port);
+ fwnode_remove_software_node(tcpm->tcpc.fwnode);
+}
+
+static struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
+ .irq_params = {
+ {
+ .virq = PMIC_PDPHY_SIG_TX_IRQ,
+ .irq_name = "sig-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_SIG_RX_IRQ,
+ .irq_name = "sig-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_IRQ,
+ .irq_name = "msg-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_IRQ,
+ .irq_name = "msg-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
+ .irq_name = "msg-tx-failed",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
+ .irq_name = "msg-tx-discarded",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
+ .irq_name = "msg-rx-discarded",
+ },
+ },
+ .nr_irqs = 7,
+};
+
+static struct pmic_typec_port_resources pm8150b_port_res = {
+ .irq_params = {
+ {
+ .irq_name = "vpd-detect",
+ .virq = PMIC_TYPEC_VPD_IRQ,
+ },
+
+ {
+ .irq_name = "cc-state-change",
+ .virq = PMIC_TYPEC_CC_STATE_IRQ,
+ },
+ {
+ .irq_name = "vconn-oc",
+ .virq = PMIC_TYPEC_VCONN_OC_IRQ,
+ },
+
+ {
+ .irq_name = "vbus-change",
+ .virq = PMIC_TYPEC_VBUS_IRQ,
+ },
+
+ {
+ .irq_name = "attach-detach",
+ .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
+ },
+ {
+ .irq_name = "legacy-cable-detect",
+ .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
+ },
+
+ {
+ .irq_name = "try-snk-src-detect",
+ .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
+ },
+ },
+ .nr_irqs = 7,
+};
+
+static struct pmic_typec_resources pm8150b_typec_res = {
+ .pdphy_res = &pm8150b_pdphy_res,
+ .port_res = &pm8150b_port_res,
+};
+
+static const struct of_device_id qcom_pmic_typec_table[] = {
+ { .compatible = "qcom,pm8150b-typec", .data = &pm8150b_typec_res },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
+
+static struct platform_driver qcom_pmic_typec_driver = {
+ .driver = {
+ .name = "qcom,pmic-typec",
+ .of_match_table = qcom_pmic_typec_table,
+ },
+ .probe = qcom_pmic_typec_probe,
+ .remove_new = qcom_pmic_typec_remove,
+};
+
+module_platform_driver(qcom_pmic_typec_driver);
+
+MODULE_DESCRIPTION("QCOM PMIC USB Type-C Port Manager Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
new file mode 100644
index 000000000000..4e1b846627d2
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec_pdphy.h"
+
+struct pmic_typec_pdphy_irq_data {
+ int virq;
+ int irq;
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
+};
+
+struct pmic_typec_pdphy {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct regmap *regmap;
+ u32 base;
+
+ unsigned int nr_irqs;
+ struct pmic_typec_pdphy_irq_data *irq_data;
+
+ struct work_struct reset_work;
+ struct work_struct receive_work;
+ struct regulator *vdd_pdphy;
+ spinlock_t lock; /* Register atomicity */
+};
+
+static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ int ret;
+
+ /* Terminate TX */
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ dev_err(dev, "pd_reset_on error\n");
+}
+
+static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ int ret;
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
+ FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
+ if (ret)
+ dev_err(dev, "pd_reset_off error\n");
+}
+
+static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
+{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
+ reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
+ qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
+
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
+}
+
+static int
+qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ unsigned int val;
+ int ret;
+
+ /* Clear TX control register */
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
+ if (ret)
+ goto done;
+
+ /* Perform readback to ensure sufficient delay for command to latch */
+ ret = regmap_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
+
+done:
+ if (ret)
+ dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
+
+ return ret;
+}
+
+static int
+qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ enum tcpm_transmit_type type,
+ unsigned int negotiated_rev)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ unsigned int val;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ /* Clear TX control register */
+ ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
+ if (ret)
+ goto done;
+
+ val = TX_CONTROL_SEND_SIGNAL;
+ if (negotiated_rev == PD_REV30)
+ val |= TX_CONTROL_RETRY_COUNT(2);
+ else
+ val |= TX_CONTROL_RETRY_COUNT(3);
+
+ if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
+ val |= TX_CONTROL_FRAME_TYPE(1);
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
+
+done:
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
+ type, negotiated_rev, ret);
+
+ return ret;
+}
+
+static int
+qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ unsigned int val, hdr_len, txbuf_len, txsize_len;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ ret = regmap_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
+ &val);
+ if (ret)
+ goto done;
+
+ if (val) {
+ dev_err(dev, "pd_transmit_payload: RX message pending\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Clear TX control register */
+ ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
+ if (ret)
+ goto done;
+
+ hdr_len = sizeof(msg->header);
+ txbuf_len = pd_header_cnt_le(msg->header) * 4;
+ txsize_len = hdr_len + txbuf_len - 1;
+
+ /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
+ ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
+ &msg->header, hdr_len);
+ if (ret)
+ goto done;
+
+ /* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
+ if (txbuf_len) {
+ ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
+ &msg->payload, txbuf_len);
+ if (ret)
+ goto done;
+ }
+
+ /* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
+ txsize_len);
+ if (ret)
+ goto done;
+
+ /* Clear TX control register */
+ ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
+ if (ret)
+ goto done;
+
+ /* Initiate transmit with retry count as indicated by PD revision */
+ val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
+ if (pd_header_rev(msg->header) == PD_REV30)
+ val |= TX_CONTROL_RETRY_COUNT(2);
+ else
+ val |= TX_CONTROL_RETRY_COUNT(3);
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
+
+done:
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ if (ret) {
+ dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
+ hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
+ }
+
+ return ret;
+}
+
+int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ int ret;
+
+ if (msg) {
+ ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
+ type, msg,
+ negotiated_rev);
+ } else {
+ ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
+ type,
+ negotiated_rev);
+ }
+
+ if (ret)
+ dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
+
+ return ret;
+}
+
+static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ struct pd_message msg;
+ unsigned int size, rx_status;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ ret = regmap_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
+ if (ret)
+ goto done;
+
+ /* Hardware requires +1 of the real read value to be passed */
+ if (size < 1 || size > sizeof(msg.payload) + 1) {
+ dev_dbg(dev, "pd_receive: invalid size %d\n", size);
+ goto done;
+ }
+
+ size += 1;
+ ret = regmap_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
+ &rx_status);
+
+ if (ret)
+ goto done;
+
+ ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
+ (u8 *)&msg, size);
+ if (ret)
+ goto done;
+
+ /* Return ownership of RX buffer to hardware */
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
+
+done:
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ if (!ret) {
+ dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
+ tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
+ }
+}
+
+static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
+{
+ struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
+ struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
+ struct device *dev = pmic_typec_pdphy->dev;
+
+ switch (irq_data->virq) {
+ case PMIC_PDPHY_SIG_TX_IRQ:
+ dev_err(dev, "isr: tx_sig\n");
+ break;
+ case PMIC_PDPHY_SIG_RX_IRQ:
+ schedule_work(&pmic_typec_pdphy->reset_work);
+ break;
+ case PMIC_PDPHY_MSG_TX_IRQ:
+ tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
+ TCPC_TX_SUCCESS);
+ break;
+ case PMIC_PDPHY_MSG_RX_IRQ:
+ qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
+ break;
+ case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
+ tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
+ TCPC_TX_FAILED);
+ break;
+ case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
+ tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
+ TCPC_TX_DISCARDED);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
+
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
+
+ return ret;
+}
+
+int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ bool data_role_host, bool power_role_src)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
+ ret = regmap_update_bits(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
+ MSG_CONFIG_PORT_DATA_ROLE |
+ MSG_CONFIG_PORT_POWER_ROLE,
+ data_role_host << 3 | power_role_src << 2);
+
+ spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+
+ dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
+ data_role_host, power_role_src);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ struct device *dev = pmic_typec_pdphy->dev;
+ int ret;
+
+ ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
+ if (ret)
+ return ret;
+
+ /* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
+ ret = regmap_update_bits(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
+ MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
+ if (ret)
+ goto done;
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
+ if (ret)
+ goto done;
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
+ CONTROL_ENABLE);
+ if (ret)
+ goto done;
+
+ qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
+done:
+ if (ret) {
+ regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+ dev_err(dev, "pdphy_enable fail %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ int ret;
+
+ qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
+
+ ret = regmap_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
+
+ regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+
+ return ret;
+}
+
+static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ int ret;
+
+ ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
+ if (ret)
+ goto done;
+
+ usleep_range(400, 500);
+ ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
+done:
+ return ret;
+}
+
+int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ struct tcpm_port *tcpm_port)
+{
+ int i;
+ int ret;
+
+ pmic_typec_pdphy->tcpm_port = tcpm_port;
+
+ ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
+ enable_irq(pmic_typec_pdphy->irq_data[i].irq);
+
+ return 0;
+}
+
+void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
+{
+ int i;
+
+ for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
+ disable_irq(pmic_typec_pdphy->irq_data[i].irq);
+
+ qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
+}
+
+struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
+{
+ return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
+}
+
+int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
+ struct pmic_typec_pdphy *pmic_typec_pdphy,
+ struct pmic_typec_pdphy_resources *res,
+ struct regmap *regmap,
+ u32 base)
+{
+ struct device *dev = &pdev->dev;
+ struct pmic_typec_pdphy_irq_data *irq_data;
+ int i, ret, irq;
+
+ if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
+ return -EINVAL;
+
+ irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
+ GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
+ if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
+ return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
+
+ pmic_typec_pdphy->dev = dev;
+ pmic_typec_pdphy->base = base;
+ pmic_typec_pdphy->regmap = regmap;
+ pmic_typec_pdphy->nr_irqs = res->nr_irqs;
+ pmic_typec_pdphy->irq_data = irq_data;
+ spin_lock_init(&pmic_typec_pdphy->lock);
+ INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
+
+ for (i = 0; i < res->nr_irqs; i++, irq_data++) {
+ irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
+ if (irq < 0)
+ return irq;
+
+ irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
+ irq_data->irq = irq;
+ irq_data->virq = res->irq_params[i].virq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ qcom_pmic_typec_pdphy_isr,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ res->irq_params[i].irq_name,
+ irq_data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
new file mode 100644
index 000000000000..e67954e31b14
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+#ifndef __QCOM_PMIC_PDPHY_H__
+#define __QCOM_PMIC_PDPHY_H__
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/tcpm.h>
+
+#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
+#define USB_PDPHY_MSG_HDR_LEN 2
+
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG_REG 0x40
+#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL_REG 0x46
+#define CONTROL_ENABLE BIT(0)
+
+#define USB_PDPHY_RX_STATUS_REG 0x4A
+#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER_REG 0x4C
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+#define FRAME_FILTER_EN_SOP BIT(0)
+
+#define USB_PDPHY_TX_SIZE_REG 0x42
+#define TX_SIZE_MASK 0xF
+
+#define USB_PDPHY_TX_CONTROL_REG 0x44
+#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
+#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL BIT(1)
+#define TX_CONTROL_SEND_MSG BIT(0)
+
+#define USB_PDPHY_RX_SIZE_REG 0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
+#define RX_BUFFER_TOKEN BIT(0)
+
+#define USB_PDPHY_BIST_MODE_REG 0x4E
+#define BIST_MODE_MASK 0xF
+#define BIST_ENABLE BIT(7)
+#define PD_MSG_BIST 0x3
+#define PD_BIST_TEST_DATA_MODE 0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
+#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
+
+#define USB_PDPHY_RX_BUFFER_REG 0x80
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+
+/* Message Spec Rev field */
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
+/* Interrupt numbers */
+#define PMIC_PDPHY_SIG_TX_IRQ 0x0
+#define PMIC_PDPHY_SIG_RX_IRQ 0x1
+#define PMIC_PDPHY_MSG_TX_IRQ 0x2
+#define PMIC_PDPHY_MSG_RX_IRQ 0x3
+#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
+#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
+#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
+#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
+
+/* Resources */
+#define PMIC_PDPHY_MAX_IRQS 0x08
+
+struct pmic_typec_pdphy_irq_params {
+ int virq;
+ char *irq_name;
+};
+
+struct pmic_typec_pdphy_resources {
+ unsigned int nr_irqs;
+ struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
+};
+
+/* API */
+struct pmic_typec_pdphy;
+
+struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev);
+
+int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
+ struct pmic_typec_pdphy *pmic_typec_pdphy,
+ struct pmic_typec_pdphy_resources *res,
+ struct regmap *regmap,
+ u32 base);
+
+int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ struct tcpm_port *tcpm_port);
+
+void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy);
+
+int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ bool power_role_src, bool data_role_host);
+
+int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on);
+
+int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev);
+
+#endif /* __QCOM_PMIC_TYPEC_PDPHY_H__ */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
new file mode 100644
index 000000000000..94285f64b67d
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/workqueue.h>
+#include "qcom_pmic_typec_port.h"
+
+struct pmic_typec_port_irq_data {
+ int virq;
+ int irq;
+ struct pmic_typec_port *pmic_typec_port;
+};
+
+struct pmic_typec_port {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct regmap *regmap;
+ u32 base;
+ unsigned int nr_irqs;
+ struct pmic_typec_port_irq_data *irq_data;
+
+ struct regulator *vdd_vbus;
+
+ int cc;
+ bool debouncing_cc;
+ struct delayed_work cc_debounce_dwork;
+
+ spinlock_t lock; /* Register atomicity */
+};
+
+static const char * const typec_cc_status_name[] = {
+ [TYPEC_CC_OPEN] = "Open",
+ [TYPEC_CC_RA] = "Ra",
+ [TYPEC_CC_RD] = "Rd",
+ [TYPEC_CC_RP_DEF] = "Rp-def",
+ [TYPEC_CC_RP_1_5] = "Rp-1.5",
+ [TYPEC_CC_RP_3_0] = "Rp-3.0",
+};
+
+static const char *rp_unknown = "unknown";
+
+static const char *cc_to_name(enum typec_cc_status cc)
+{
+ if (cc > TYPEC_CC_RP_3_0)
+ return rp_unknown;
+
+ return typec_cc_status_name[cc];
+}
+
+static const char * const rp_sel_name[] = {
+ [TYPEC_SRC_RP_SEL_80UA] = "Rp-def-80uA",
+ [TYPEC_SRC_RP_SEL_180UA] = "Rp-1.5-180uA",
+ [TYPEC_SRC_RP_SEL_330UA] = "Rp-3.0-330uA",
+};
+
+static const char *rp_sel_to_name(int rp_sel)
+{
+ if (rp_sel > TYPEC_SRC_RP_SEL_330UA)
+ return rp_unknown;
+
+ return rp_sel_name[rp_sel];
+}
+
+#define misc_to_cc(msic) !!(misc & CC_ORIENTATION) ? "cc1" : "cc2"
+#define misc_to_vconn(msic) !!(misc & CC_ORIENTATION) ? "cc2" : "cc1"
+
+static void qcom_pmic_typec_port_cc_debounce(struct work_struct *work)
+{
+ struct pmic_typec_port *pmic_typec_port =
+ container_of(work, struct pmic_typec_port, cc_debounce_dwork.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmic_typec_port->lock, flags);
+ pmic_typec_port->debouncing_cc = false;
+ spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+
+ dev_dbg(pmic_typec_port->dev, "Debounce cc complete\n");
+}
+
+static irqreturn_t pmic_typec_port_isr(int irq, void *dev_id)
+{
+ struct pmic_typec_port_irq_data *irq_data = dev_id;
+ struct pmic_typec_port *pmic_typec_port = irq_data->pmic_typec_port;
+ u32 misc_stat;
+ bool vbus_change = false;
+ bool cc_change = false;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_port->lock, flags);
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
+ &misc_stat);
+ if (ret)
+ goto done;
+
+ switch (irq_data->virq) {
+ case PMIC_TYPEC_VBUS_IRQ:
+ vbus_change = true;
+ break;
+ case PMIC_TYPEC_CC_STATE_IRQ:
+ case PMIC_TYPEC_ATTACH_DETACH_IRQ:
+ if (!pmic_typec_port->debouncing_cc)
+ cc_change = true;
+ break;
+ }
+
+done:
+ spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+
+ if (vbus_change)
+ tcpm_vbus_change(pmic_typec_port->tcpm_port);
+
+ if (cc_change)
+ tcpm_cc_change(pmic_typec_port->tcpm_port);
+
+ return IRQ_HANDLED;
+}
+
+int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
+{
+ struct device *dev = pmic_typec_port->dev;
+ unsigned int misc;
+ int ret;
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
+ &misc);
+ if (ret)
+ misc = 0;
+
+ dev_dbg(dev, "get_vbus: 0x%08x detect %d\n", misc, !!(misc & TYPEC_VBUS_DETECT));
+
+ return !!(misc & TYPEC_VBUS_DETECT);
+}
+
+int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on)
+{
+ u32 sm_stat;
+ u32 val;
+ int ret;
+
+ if (on) {
+ ret = regulator_enable(pmic_typec_port->vdd_vbus);
+ if (ret)
+ return ret;
+
+ val = TYPEC_SM_VBUS_VSAFE5V;
+ } else {
+ ret = regulator_disable(pmic_typec_port->vdd_vbus);
+ if (ret)
+ return ret;
+
+ val = TYPEC_SM_VBUS_VSAFE0V;
+ }
+
+ /* Poll waiting for transition to required vSafe5V or vSafe0V */
+ ret = regmap_read_poll_timeout(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_SM_STATUS_REG,
+ sm_stat, sm_stat & val,
+ 100, 250000);
+ if (ret)
+ dev_warn(pmic_typec_port->dev, "vbus vsafe%dv fail\n", on ? 5 : 0);
+
+ return 0;
+}
+
+int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
+{
+ struct device *dev = pmic_typec_port->dev;
+ unsigned int misc, val;
+ bool attached;
+ int ret = 0;
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
+ if (ret)
+ goto done;
+
+ attached = !!(misc & CC_ATTACHED);
+
+ if (pmic_typec_port->debouncing_cc) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ *cc1 = TYPEC_CC_OPEN;
+ *cc2 = TYPEC_CC_OPEN;
+
+ if (!attached)
+ goto done;
+
+ if (misc & SNK_SRC_MODE) {
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_SRC_STATUS_REG,
+ &val);
+ if (ret)
+ goto done;
+ switch (val & DETECTED_SRC_TYPE_MASK) {
+ case SRC_RD_OPEN:
+ val = TYPEC_CC_RD;
+ break;
+ case SRC_RD_RA_VCONN:
+ val = TYPEC_CC_RD;
+ *cc1 = TYPEC_CC_RA;
+ *cc2 = TYPEC_CC_RA;
+ break;
+ default:
+ dev_warn(dev, "unexpected src status %.2x\n", val);
+ val = TYPEC_CC_RD;
+ break;
+ }
+ } else {
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_SNK_STATUS_REG,
+ &val);
+ if (ret)
+ goto done;
+ switch (val & DETECTED_SNK_TYPE_MASK) {
+ case SNK_RP_STD:
+ val = TYPEC_CC_RP_DEF;
+ break;
+ case SNK_RP_1P5:
+ val = TYPEC_CC_RP_1_5;
+ break;
+ case SNK_RP_3P0:
+ val = TYPEC_CC_RP_3_0;
+ break;
+ default:
+ dev_warn(dev, "unexpected snk status %.2x\n", val);
+ val = TYPEC_CC_RP_DEF;
+ break;
+ }
+ val = TYPEC_CC_RP_DEF;
+ }
+
+ if (misc & CC_ORIENTATION)
+ *cc2 = val;
+ else
+ *cc1 = val;
+
+done:
+ dev_dbg(dev, "get_cc: misc 0x%08x cc1 0x%08x %s cc2 0x%08x %s attached %d cc=%s\n",
+ misc, *cc1, cc_to_name(*cc1), *cc2, cc_to_name(*cc2), attached,
+ misc_to_cc(misc));
+
+ return ret;
+}
+
+static void qcom_pmic_set_cc_debounce(struct pmic_typec_port *pmic_typec_port)
+{
+ pmic_typec_port->debouncing_cc = true;
+ schedule_delayed_work(&pmic_typec_port->cc_debounce_dwork,
+ msecs_to_jiffies(2));
+}
+
+int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
+ enum typec_cc_status cc)
+{
+ struct device *dev = pmic_typec_port->dev;
+ unsigned int mode, currsrc;
+ unsigned int misc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_port->lock, flags);
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
+ &misc);
+ if (ret)
+ goto done;
+
+ mode = EN_SRC_ONLY;
+
+ switch (cc) {
+ case TYPEC_CC_OPEN:
+ currsrc = TYPEC_SRC_RP_SEL_80UA;
+ break;
+ case TYPEC_CC_RP_DEF:
+ currsrc = TYPEC_SRC_RP_SEL_80UA;
+ break;
+ case TYPEC_CC_RP_1_5:
+ currsrc = TYPEC_SRC_RP_SEL_180UA;
+ break;
+ case TYPEC_CC_RP_3_0:
+ currsrc = TYPEC_SRC_RP_SEL_330UA;
+ break;
+ case TYPEC_CC_RD:
+ currsrc = TYPEC_SRC_RP_SEL_80UA;
+ mode = EN_SNK_ONLY;
+ break;
+ default:
+ dev_warn(dev, "unexpected set_cc %d\n", cc);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (mode == EN_SRC_ONLY) {
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_CURRSRC_CFG_REG,
+ currsrc);
+ if (ret)
+ goto done;
+ }
+
+ pmic_typec_port->cc = cc;
+ qcom_pmic_set_cc_debounce(pmic_typec_port);
+ ret = 0;
+
+done:
+ spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+
+ dev_dbg(dev, "set_cc: currsrc=%x %s mode %s debounce %d attached %d cc=%s\n",
+ currsrc, rp_sel_to_name(currsrc),
+ mode == EN_SRC_ONLY ? "EN_SRC_ONLY" : "EN_SNK_ONLY",
+ pmic_typec_port->debouncing_cc, !!(misc & CC_ATTACHED),
+ misc_to_cc(misc));
+
+ return ret;
+}
+
+int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on)
+{
+ struct device *dev = pmic_typec_port->dev;
+ unsigned int orientation, misc, mask, value;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pmic_typec_port->lock, flags);
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
+ if (ret)
+ goto done;
+
+ /* Set VCONN on the inversion of the active CC channel */
+ orientation = (misc & CC_ORIENTATION) ? 0 : VCONN_EN_ORIENTATION;
+ if (on) {
+ mask = VCONN_EN_ORIENTATION | VCONN_EN_VALUE;
+ value = orientation | VCONN_EN_VALUE | VCONN_EN_SRC;
+ } else {
+ mask = VCONN_EN_VALUE;
+ value = 0;
+ }
+
+ ret = regmap_update_bits(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_VCONN_CONTROL_REG,
+ mask, value);
+done:
+ spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+
+ dev_dbg(dev, "set_vconn: orientation %d control 0x%08x state %s cc %s vconn %s\n",
+ orientation, value, on ? "on" : "off", misc_to_vconn(misc), misc_to_cc(misc));
+
+ return ret;
+}
+
+int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
+{
+ struct device *dev = pmic_typec_port->dev;
+ unsigned int misc;
+ u8 mode = 0;
+ unsigned long flags;
+ int ret;
+
+ switch (port_type) {
+ case TYPEC_PORT_SRC:
+ mode = EN_SRC_ONLY;
+ break;
+ case TYPEC_PORT_SNK:
+ mode = EN_SNK_ONLY;
+ break;
+ case TYPEC_PORT_DRP:
+ mode = EN_TRY_SNK;
+ break;
+ }
+
+ spin_lock_irqsave(&pmic_typec_port->lock, flags);
+
+ ret = regmap_read(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
+ if (ret)
+ goto done;
+
+ dev_dbg(dev, "start_toggling: misc 0x%08x attached %d port_type %d current cc %d new %d\n",
+ misc, !!(misc & CC_ATTACHED), port_type, pmic_typec_port->cc, cc);
+
+ qcom_pmic_set_cc_debounce(pmic_typec_port);
+
+ /* force it to toggle at least once */
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MODE_CFG_REG,
+ TYPEC_DISABLE_CMD);
+ if (ret)
+ goto done;
+
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MODE_CFG_REG,
+ mode);
+done:
+ spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+
+ return ret;
+}
+
+#define TYPEC_INTR_EN_CFG_1_MASK \
+ (TYPEC_LEGACY_CABLE_INT_EN | \
+ TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN | \
+ TYPEC_TRYSOURCE_DETECT_INT_EN | \
+ TYPEC_TRYSINK_DETECT_INT_EN | \
+ TYPEC_CCOUT_DETACH_INT_EN | \
+ TYPEC_CCOUT_ATTACH_INT_EN | \
+ TYPEC_VBUS_DEASSERT_INT_EN | \
+ TYPEC_VBUS_ASSERT_INT_EN)
+
+#define TYPEC_INTR_EN_CFG_2_MASK \
+ (TYPEC_STATE_MACHINE_CHANGE_INT_EN | TYPEC_VBUS_ERROR_INT_EN | \
+ TYPEC_DEBOUNCE_DONE_INT_EN)
+
+int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
+ struct tcpm_port *tcpm_port)
+{
+ int i;
+ int mask;
+ int ret;
+
+ /* Configure interrupt sources */
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_INTERRUPT_EN_CFG_1_REG,
+ TYPEC_INTR_EN_CFG_1_MASK);
+ if (ret)
+ goto done;
+
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_INTERRUPT_EN_CFG_2_REG,
+ TYPEC_INTR_EN_CFG_2_MASK);
+ if (ret)
+ goto done;
+
+ /* start in TRY_SNK mode */
+ ret = regmap_write(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_MODE_CFG_REG, EN_TRY_SNK);
+ if (ret)
+ goto done;
+
+ /* Configure VCONN for software control */
+ ret = regmap_update_bits(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_VCONN_CONTROL_REG,
+ VCONN_EN_SRC | VCONN_EN_VALUE, VCONN_EN_SRC);
+ if (ret)
+ goto done;
+
+ /* Set CC threshold to 1.6 Volts | tPDdebounce = 10-20ms */
+ mask = SEL_SRC_UPPER_REF | USE_TPD_FOR_EXITING_ATTACHSRC;
+ ret = regmap_update_bits(pmic_typec_port->regmap,
+ pmic_typec_port->base + TYPEC_EXIT_STATE_CFG_REG,
+ mask, mask);
+ if (ret)
+ goto done;
+
+ pmic_typec_port->tcpm_port = tcpm_port;
+
+ for (i = 0; i < pmic_typec_port->nr_irqs; i++)
+ enable_irq(pmic_typec_port->irq_data[i].irq);
+
+done:
+ return ret;
+}
+
+void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port)
+{
+ int i;
+
+ for (i = 0; i < pmic_typec_port->nr_irqs; i++)
+ disable_irq(pmic_typec_port->irq_data[i].irq);
+}
+
+struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev)
+{
+ return devm_kzalloc(dev, sizeof(struct pmic_typec_port), GFP_KERNEL);
+}
+
+int qcom_pmic_typec_port_probe(struct platform_device *pdev,
+ struct pmic_typec_port *pmic_typec_port,
+ struct pmic_typec_port_resources *res,
+ struct regmap *regmap,
+ u32 base)
+{
+ struct device *dev = &pdev->dev;
+ struct pmic_typec_port_irq_data *irq_data;
+ int i, ret, irq;
+
+ if (!res->nr_irqs || res->nr_irqs > PMIC_TYPEC_MAX_IRQS)
+ return -EINVAL;
+
+ irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
+ GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ pmic_typec_port->vdd_vbus = devm_regulator_get(dev, "vdd-vbus");
+ if (IS_ERR(pmic_typec_port->vdd_vbus))
+ return PTR_ERR(pmic_typec_port->vdd_vbus);
+
+ pmic_typec_port->dev = dev;
+ pmic_typec_port->base = base;
+ pmic_typec_port->regmap = regmap;
+ pmic_typec_port->nr_irqs = res->nr_irqs;
+ pmic_typec_port->irq_data = irq_data;
+ spin_lock_init(&pmic_typec_port->lock);
+ INIT_DELAYED_WORK(&pmic_typec_port->cc_debounce_dwork,
+ qcom_pmic_typec_port_cc_debounce);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ for (i = 0; i < res->nr_irqs; i++, irq_data++) {
+ irq = platform_get_irq_byname(pdev,
+ res->irq_params[i].irq_name);
+ if (irq < 0)
+ return irq;
+
+ irq_data->pmic_typec_port = pmic_typec_port;
+ irq_data->irq = irq;
+ irq_data->virq = res->irq_params[i].virq;
+ ret = devm_request_threaded_irq(dev, irq, NULL, pmic_typec_port_isr,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ res->irq_params[i].irq_name,
+ irq_data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
new file mode 100644
index 000000000000..d4d358c680b6
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+#ifndef __QCOM_PMIC_TYPEC_H__
+#define __QCOM_PMIC_TYPEC_H__
+
+#include <linux/platform_device.h>
+#include <linux/usb/tcpm.h>
+
+#define TYPEC_SNK_STATUS_REG 0x06
+#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
+#define SNK_DAM_MASK GENMASK(6, 4)
+#define SNK_DAM_500MA BIT(6)
+#define SNK_DAM_1500MA BIT(5)
+#define SNK_DAM_3000MA BIT(4)
+#define SNK_RP_STD BIT(3)
+#define SNK_RP_1P5 BIT(2)
+#define SNK_RP_3P0 BIT(1)
+#define SNK_RP_SHORT BIT(0)
+
+#define TYPEC_SRC_STATUS_REG 0x08
+#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
+#define SRC_HIGH_BATT BIT(5)
+#define SRC_DEBUG_ACCESS BIT(4)
+#define SRC_RD_OPEN BIT(3)
+#define SRC_RD_RA_VCONN BIT(2)
+#define SRC_RA_OPEN BIT(1)
+#define AUDIO_ACCESS_RA_RA BIT(0)
+
+#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
+#define TYPEC_ATTACH_DETACH_STATE BIT(5)
+
+#define TYPEC_SM_STATUS_REG 0x0A
+#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
+#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
+#define TYPEC_SM_USBIN_LT_LV BIT(7)
+
+#define TYPEC_MISC_STATUS_REG 0x0B
+#define TYPEC_WATER_DETECTION_STATUS BIT(7)
+#define SNK_SRC_MODE BIT(6)
+#define TYPEC_VBUS_DETECT BIT(5)
+#define TYPEC_VBUS_ERROR_STATUS BIT(4)
+#define TYPEC_DEBOUNCE_DONE BIT(3)
+#define CC_ORIENTATION BIT(1)
+#define CC_ATTACHED BIT(0)
+
+#define LEGACY_CABLE_STATUS_REG 0x0D
+#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
+
+#define TYPEC_U_USB_STATUS_REG 0x0F
+#define U_USB_GROUND_NOVBUS BIT(6)
+#define U_USB_GROUND BIT(4)
+#define U_USB_FMB1 BIT(3)
+#define U_USB_FLOAT1 BIT(2)
+#define U_USB_FMB2 BIT(1)
+#define U_USB_FLOAT2 BIT(0)
+
+#define TYPEC_MODE_CFG_REG 0x44
+#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
+#define EN_TRY_SNK BIT(4)
+#define EN_TRY_SRC BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define EN_SRC_ONLY BIT(2)
+#define EN_SNK_ONLY BIT(1)
+#define TYPEC_DISABLE_CMD BIT(0)
+
+#define TYPEC_VCONN_CONTROL_REG 0x46
+#define VCONN_EN_ORIENTATION BIT(2)
+#define VCONN_EN_VALUE BIT(1)
+#define VCONN_EN_SRC BIT(0)
+
+#define TYPEC_CCOUT_CONTROL_REG 0x48
+#define TYPEC_CCOUT_BUFFER_EN BIT(2)
+#define TYPEC_CCOUT_VALUE BIT(1)
+#define TYPEC_CCOUT_SRC BIT(0)
+
+#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
+#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
+
+#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
+#define EN_SRC_CRUDE_SENSOR BIT(1)
+#define EN_SNK_CRUDE_SENSOR BIT(0)
+
+#define TYPEC_EXIT_STATE_CFG_REG 0x50
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
+#define SEL_SRC_UPPER_REF BIT(2)
+#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
+#define EXIT_SNK_BASED_ON_CC BIT(0)
+
+#define TYPEC_CURRSRC_CFG_REG 0x52
+#define TYPEC_SRC_RP_SEL_330UA BIT(1)
+#define TYPEC_SRC_RP_SEL_180UA BIT(0)
+#define TYPEC_SRC_RP_SEL_80UA 0
+#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
+
+#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
+#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
+#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
+#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
+#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
+
+#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
+#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
+#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
+#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
+#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
+#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
+#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
+#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
+
+#define TYPEC_DEBOUNCE_OPTION_REG 0x62
+#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
+
+#define TYPE_C_SBU_CFG_REG 0x6A
+#define SEL_SBU1_ISRC_VAL 0x04
+#define SEL_SBU2_ISRC_VAL 0x01
+
+#define TYPEC_U_USB_CFG_REG 0x70
+#define EN_MICRO_USB_FACTORY_MODE BIT(1)
+#define EN_MICRO_USB_MODE BIT(0)
+
+#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
+
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
+#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
+
+#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
+#define MICRO_USB_MODE_ONLY BIT(0)
+
+/* Interrupt numbers */
+#define PMIC_TYPEC_OR_RID_IRQ 0x0
+#define PMIC_TYPEC_VPD_IRQ 0x1
+#define PMIC_TYPEC_CC_STATE_IRQ 0x2
+#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
+#define PMIC_TYPEC_VBUS_IRQ 0x4
+#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
+#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
+#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
+
+/* Resources */
+#define PMIC_TYPEC_MAX_IRQS 0x08
+
+struct pmic_typec_port_irq_params {
+ int virq;
+ char *irq_name;
+};
+
+struct pmic_typec_port_resources {
+ unsigned int nr_irqs;
+ struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
+};
+
+/* API */
+struct pmic_typec;
+
+struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev);
+
+int qcom_pmic_typec_port_probe(struct platform_device *pdev,
+ struct pmic_typec_port *pmic_typec_port,
+ struct pmic_typec_port_resources *res,
+ struct regmap *regmap,
+ u32 base);
+
+int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
+ struct tcpm_port *tcpm_port);
+
+void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port);
+
+int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2);
+
+int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
+ enum typec_cc_status cc);
+
+int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port);
+
+int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on);
+
+int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc);
+
+int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on);
+
+#endif /* __QCOM_PMIC_TYPE_C_PORT_H__ */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 8da23240afbe..fc708c289a73 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -895,7 +895,7 @@ static struct i2c_driver tcpci_i2c_driver = {
.name = "tcpci",
.of_match_table = of_match_ptr(tcpci_of_match),
},
- .probe_new = tcpci_probe,
+ .probe = tcpci_probe,
.remove = tcpci_remove,
.id_table = tcpci_id,
};
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index f32cda2a5e3a..9454b12a073c 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -508,7 +508,7 @@ static struct i2c_driver max_tcpci_i2c_driver = {
.name = "maxtcpc",
.of_match_table = of_match_ptr(max_tcpci_of_match),
},
- .probe_new = max_tcpci_probe,
+ .probe = max_tcpci_probe,
.remove = max_tcpci_remove,
.id_table = max_tcpci_id,
};
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
index 6fa8fd5c8041..02b7fd302265 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -178,13 +178,12 @@ static int mt6360_tcpc_probe(struct platform_device *pdev)
return 0;
}
-static int mt6360_tcpc_remove(struct platform_device *pdev)
+static void mt6360_tcpc_remove(struct platform_device *pdev)
{
struct mt6360_tcpc_info *mti = platform_get_drvdata(pdev);
disable_irq(mti->irq);
tcpci_unregister_port(mti->tcpci);
- return 0;
}
static int __maybe_unused mt6360_tcpc_suspend(struct device *dev)
@@ -222,7 +221,7 @@ static struct platform_driver mt6360_tcpc_driver = {
.of_match_table = mt6360_tcpc_of_id,
},
.probe = mt6360_tcpc_probe,
- .remove = mt6360_tcpc_remove,
+ .remove_new = mt6360_tcpc_remove,
};
module_platform_driver(mt6360_tcpc_driver);
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
index c5bb201a5163..2a079464b398 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6370.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -178,12 +178,10 @@ static int mt6370_tcpc_probe(struct platform_device *pdev)
return 0;
}
-static int mt6370_tcpc_remove(struct platform_device *pdev)
+static void mt6370_tcpc_remove(struct platform_device *pdev)
{
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
-
- return 0;
}
static const struct of_device_id mt6370_tcpc_devid_table[] = {
@@ -198,7 +196,7 @@ static struct platform_driver mt6370_tcpc_driver = {
.of_match_table = mt6370_tcpc_devid_table,
},
.probe = mt6370_tcpc_probe,
- .remove = mt6370_tcpc_remove,
+ .remove_new = mt6370_tcpc_remove,
};
module_platform_driver(mt6370_tcpc_driver);
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index a0e9e3fe8564..17ebc5fb684f 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -412,7 +412,7 @@ static struct i2c_driver rt1711h_i2c_driver = {
.name = "rt1711h",
.of_match_table = of_match_ptr(rt1711h_of_match),
},
- .probe_new = rt1711h_probe,
+ .probe = rt1711h_probe,
.remove = rt1711h_remove,
.id_table = rt1711h_id,
};
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 3c6b0c8e2d3a..829d75ebab42 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4885,7 +4885,8 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
- tcpm_set_cc(port, TYPEC_CC_OPEN);
+ tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+ TYPEC_CC_RD : tcpm_rp_cc(port));
tcpm_set_state(port, PORT_RESET_WAIT_OFF,
PD_T_ERROR_RECOVERY);
break;
@@ -6339,6 +6340,27 @@ static int tcpm_psy_get_current_now(struct tcpm_port *port,
return 0;
}
+static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
+ union power_supply_propval *val)
+{
+ unsigned int src_mv, src_ma, max_src_uw = 0;
+ unsigned int i, tmp;
+
+ for (i = 0; i < port->nr_source_caps; i++) {
+ u32 pdo = port->source_caps[i];
+
+ if (pdo_type(pdo) == PDO_TYPE_FIXED) {
+ src_mv = pdo_fixed_voltage(pdo);
+ src_ma = pdo_max_current(pdo);
+ tmp = src_mv * src_ma;
+ max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
+ }
+ }
+
+ val->intval = max_src_uw;
+ return 0;
+}
+
static int tcpm_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -6368,6 +6390,9 @@ static int tcpm_psy_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = tcpm_psy_get_current_now(port, val);
break;
+ case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
+ tcpm_psy_get_input_power_limit(port, val);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 20917d85d6f4..87d4abde0ea2 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -671,7 +671,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
return 0;
}
-static int wcove_typec_remove(struct platform_device *pdev)
+static void wcove_typec_remove(struct platform_device *pdev)
{
struct wcove_typec *wcove = platform_get_drvdata(pdev);
unsigned int val;
@@ -684,8 +684,6 @@ static int wcove_typec_remove(struct platform_device *pdev)
tcpm_unregister_port(wcove->tcpm);
fwnode_remove_software_node(wcove->tcpc.fwnode);
-
- return 0;
}
static struct platform_driver wcove_typec_driver = {
@@ -693,7 +691,7 @@ static struct platform_driver wcove_typec_driver = {
.name = "bxt_wcove_usbc",
},
.probe = wcove_typec_probe,
- .remove = wcove_typec_remove,
+ .remove_new = wcove_typec_remove,
};
module_platform_driver(wcove_typec_driver);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 603dbd44deba..37b56ce75f39 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -950,7 +950,7 @@ static struct i2c_driver tps6598x_i2c_driver = {
.pm = &tps6598x_pm_ops,
.of_match_table = tps6598x_of_match,
},
- .probe_new = tps6598x_probe,
+ .probe = tps6598x_probe,
.remove = tps6598x_remove,
.id_table = tps6598x_id,
};
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
index 56bf56517f75..384b42267f1f 100644
--- a/drivers/usb/typec/ucsi/psy.c
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -27,8 +27,20 @@ static enum power_supply_property ucsi_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_SCOPE,
};
+static int ucsi_psy_get_scope(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u8 scope = POWER_SUPPLY_SCOPE_UNKNOWN;
+ struct device *dev = con->ucsi->dev;
+
+ device_property_read_u8(dev, "scope", &scope);
+ val->intval = scope;
+ return 0;
+}
+
static int ucsi_psy_get_online(struct ucsi_connector *con,
union power_supply_propval *val)
{
@@ -194,6 +206,8 @@ static int ucsi_psy_get_prop(struct power_supply *psy,
return ucsi_psy_get_current_max(con, val);
case POWER_SUPPLY_PROP_CURRENT_NOW:
return ucsi_psy_get_current_now(con, val);
+ case POWER_SUPPLY_PROP_SCOPE:
+ return ucsi_psy_get_scope(con, val);
default:
return -EINVAL;
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index b664ecbb798b..9b6d6b14431f 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -812,6 +812,23 @@ static void ucsi_partner_change(struct ucsi_connector *con)
break;
}
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
+ case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
+ typec_set_mode(con->port, TYPEC_MODE_DEBUG);
+ break;
+ case UCSI_CONSTAT_PARTNER_TYPE_AUDIO:
+ typec_set_mode(con->port, TYPEC_MODE_AUDIO);
+ break;
+ default:
+ if (UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) ==
+ UCSI_CONSTAT_PARTNER_FLAG_USB)
+ typec_set_mode(con->port, TYPEC_STATE_USB);
+ }
+ } else {
+ typec_set_mode(con->port, TYPEC_STATE_SAFE);
+ }
+
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 217355f1f9b9..6bbf490ac401 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -212,7 +212,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return 0;
}
-static int ucsi_acpi_remove(struct platform_device *pdev)
+static void ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
@@ -221,8 +221,6 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
-
- return 0;
}
static int ucsi_acpi_resume(struct device *dev)
@@ -247,7 +245,7 @@ static struct platform_driver ucsi_acpi_platform_driver = {
.acpi_match_table = ACPI_PTR(ucsi_acpi_match),
},
.probe = ucsi_acpi_probe,
- .remove = ucsi_acpi_remove,
+ .remove_new = ucsi_acpi_remove,
};
module_platform_driver(ucsi_acpi_platform_driver);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index e0ed465bd518..607061a37eca 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -1495,7 +1495,7 @@ static struct i2c_driver ucsi_ccg_driver = {
.acpi_match_table = amd_i2c_ucsi_match,
.of_match_table = ucsi_ccg_of_match_table,
},
- .probe_new = ucsi_ccg_probe,
+ .probe = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,
.id_table = ucsi_ccg_device_id,
};
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index b454a5159896..1fe9cb5b6bd9 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -245,7 +245,7 @@ static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv)
struct pmic_glink_ucsi *ucsi = priv;
const struct pmic_glink_hdr *hdr = data;
- switch (hdr->opcode) {
+ switch (le32_to_cpu(hdr->opcode)) {
case UC_UCSI_READ_BUF_REQ:
pmic_glink_ucsi_read_ack(ucsi, data, len);
break;
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 93fead0096b7..93d7806681cf 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -763,7 +763,7 @@ static struct i2c_driver ucsi_stm32g0_i2c_driver = {
.of_match_table = of_match_ptr(ucsi_stm32g0_typec_of_match),
.pm = pm_sleep_ptr(&ucsi_stm32g0_pm_ops),
},
- .probe_new = ucsi_stm32g0_probe,
+ .probe = ucsi_stm32g0_probe,
.remove = ucsi_stm32g0_remove,
.id_table = ucsi_stm32g0_typec_i2c_devid
};
diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c
index a43a18d4b02e..6062875fb04a 100644
--- a/drivers/usb/typec/wusb3801.c
+++ b/drivers/usb/typec/wusb3801.c
@@ -420,7 +420,7 @@ static const struct of_device_id wusb3801_of_match[] = {
MODULE_DEVICE_TABLE(of, wusb3801_of_match);
static struct i2c_driver wusb3801_driver = {
- .probe_new = wusb3801_probe,
+ .probe = wusb3801_probe,
.remove = wusb3801_remove,
.driver = {
.name = "wusb3801",
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index e8c3131a8543..0a6624d37929 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -167,15 +167,13 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
size_t count)
{
- int len;
char busid[BUSID_SIZE];
if (count < 5)
return -EINVAL;
/* busid needs to include \0 termination */
- len = strlcpy(busid, buf + 4, BUSID_SIZE);
- if (sizeof(busid) <= len)
+ if (strscpy(busid, buf + 4, BUSID_SIZE) < 0)
return -EINVAL;
if (!strncmp(buf, "add ", 4)) {
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 233265550fc6..37d1fc34e8a5 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1393,7 +1393,7 @@ put_usb2_hcd:
return ret;
}
-static int vhci_hcd_remove(struct platform_device *pdev)
+static void vhci_hcd_remove(struct platform_device *pdev)
{
struct vhci *vhci = *((void **)dev_get_platdata(&pdev->dev));
@@ -1410,8 +1410,6 @@ static int vhci_hcd_remove(struct platform_device *pdev)
vhci->vhci_hcd_hs = NULL;
vhci->vhci_hcd_ss = NULL;
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1485,7 +1483,7 @@ static int vhci_hcd_resume(struct platform_device *pdev)
static struct platform_driver vhci_driver = {
.probe = vhci_hcd_probe,
- .remove = vhci_hcd_remove,
+ .remove_new = vhci_hcd_remove,
.suspend = vhci_hcd_suspend,
.resume = vhci_hcd_resume,
.driver = {
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index cd6ad92f3f05..656c1cb541de 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -116,4 +116,14 @@ config ALIBABA_ENI_VDPA
This driver includes a HW monitor device that
reads health values from the DPU.
+config PDS_VDPA
+ tristate "vDPA driver for AMD/Pensando DSC devices"
+ select VIRTIO_PCI_LIB
+ depends on PCI_MSI
+ depends on PDS_CORE
+ help
+ vDPA network driver for AMD/Pensando's PDS Core devices.
+ With this driver, the VirtIO dataplane can be
+ offloaded to an AMD/Pensando DSC device.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 59396ff2a318..8f53c6f3cca7 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
obj-$(CONFIG_ALIBABA_ENI_VDPA) += alibaba/
obj-$(CONFIG_SNET_VDPA) += solidrun/
+obj-$(CONFIG_PDS_VDPA) += pds/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 5563b3a773c7..060f837a4f9f 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -69,6 +69,37 @@ static int ifcvf_read_config_range(struct pci_dev *dev,
return 0;
}
+static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
+{
+ u16 queue_size;
+
+ vp_iowrite16(qid, &hw->common_cfg->queue_select);
+ queue_size = vp_ioread16(&hw->common_cfg->queue_size);
+
+ return queue_size;
+}
+
+/* This function returns the max allowed safe size for
+ * all virtqueues. It is the minimal size that can be
+ * suppprted by all virtqueues.
+ */
+u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
+{
+ u16 queue_size, max_size, qid;
+
+ max_size = ifcvf_get_vq_size(hw, 0);
+ for (qid = 1; qid < hw->nr_vring; qid++) {
+ queue_size = ifcvf_get_vq_size(hw, qid);
+ /* 0 means the queue is unavailable */
+ if (!queue_size)
+ continue;
+
+ max_size = min(queue_size, max_size);
+ }
+
+ return max_size;
+}
+
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
{
struct virtio_pci_cap cap;
@@ -134,6 +165,9 @@ next:
}
hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
+ hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL);
+ if (!hw->vring)
+ return -ENOMEM;
for (i = 0; i < hw->nr_vring; i++) {
vp_iowrite16(i, &hw->common_cfg->queue_select);
@@ -170,21 +204,9 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
void ifcvf_reset(struct ifcvf_hw *hw)
{
- hw->config_cb.callback = NULL;
- hw->config_cb.private = NULL;
-
ifcvf_set_status(hw, 0);
- /* flush set_status, make sure VF is stopped, reset */
- ifcvf_get_status(hw);
-}
-
-static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
-{
- if (status != 0)
- status |= ifcvf_get_status(hw);
-
- ifcvf_set_status(hw, status);
- ifcvf_get_status(hw);
+ while (ifcvf_get_status(hw))
+ msleep(1);
}
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
@@ -204,11 +226,29 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
return features;
}
-u64 ifcvf_get_features(struct ifcvf_hw *hw)
+/* return provisioned vDPA dev features */
+u64 ifcvf_get_dev_features(struct ifcvf_hw *hw)
{
return hw->dev_features;
}
+u64 ifcvf_get_driver_features(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u32 features_lo, features_hi;
+ u64 features;
+
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features_lo = vp_ioread32(&cfg->guest_feature);
+
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features_hi = vp_ioread32(&cfg->guest_feature);
+
+ features = ((u64)features_hi << 32) | features_lo;
+
+ return features;
+}
+
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
{
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
@@ -275,7 +315,7 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
vp_iowrite8(*p++, hw->dev_cfg + offset + i);
}
-static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
@@ -286,105 +326,104 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
vp_iowrite32(features >> 32, &cfg->guest_feature);
}
-static int ifcvf_config_features(struct ifcvf_hw *hw)
-{
- ifcvf_set_features(hw, hw->req_features);
- ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
-
- if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
- IFCVF_ERR(hw->pdev, "Failed to set FEATURES_OK status\n");
- return -EIO;
- }
-
- return 0;
-}
-
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
{
- struct ifcvf_lm_cfg __iomem *ifcvf_lm;
- void __iomem *avail_idx_addr;
+ struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
u16 last_avail_idx;
- u32 q_pair_id;
- ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / 2;
- avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
- last_avail_idx = vp_ioread16(avail_idx_addr);
+ last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2);
return last_avail_idx;
}
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
{
- struct ifcvf_lm_cfg __iomem *ifcvf_lm;
- void __iomem *avail_idx_addr;
- u32 q_pair_id;
+ struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
- ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / 2;
- avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
- hw->vring[qid].last_avail_idx = num;
- vp_iowrite16(num, avail_idx_addr);
+ vp_iowrite16(num, &lm_cfg->vq_state_region + qid * 2);
return 0;
}
-static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
{
- struct virtio_pci_common_cfg __iomem *cfg;
- u32 i;
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
- for (i = 0; i < hw->nr_vring; i++) {
- if (!hw->vring[i].ready)
- break;
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(num, &cfg->queue_size);
+}
- vp_iowrite16(i, &cfg->queue_select);
- vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
- &cfg->queue_desc_hi);
- vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
- &cfg->queue_avail_hi);
- vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
- &cfg->queue_used_hi);
- vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
- ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
- vp_iowrite16(1, &cfg->queue_enable);
- }
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+ u64 driver_area, u64 device_area)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
return 0;
}
-static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
{
- u32 i;
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u16 queue_enable;
- ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
- for (i = 0; i < hw->nr_vring; i++) {
- ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
- }
+ vp_iowrite16(qid, &cfg->queue_select);
+ queue_enable = vp_ioread16(&cfg->queue_enable);
+
+ return (bool)queue_enable;
}
-int ifcvf_start_hw(struct ifcvf_hw *hw)
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
{
- ifcvf_reset(hw);
- ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
- ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- if (ifcvf_config_features(hw) < 0)
- return -EINVAL;
+ vp_iowrite16(qid, &cfg->queue_select);
+ vp_iowrite16(ready, &cfg->queue_enable);
+}
- if (ifcvf_hw_enable(hw) < 0)
- return -EINVAL;
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
+{
+ u16 qid;
- ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
+ for (qid = 0; qid < hw->nr_vring; qid++) {
+ hw->vring[qid].cb.callback = NULL;
+ hw->vring[qid].cb.private = NULL;
+ ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
+ }
+}
- return 0;
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+ hw->config_cb.callback = NULL;
+ hw->config_cb.private = NULL;
+ ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
+}
+
+static void ifcvf_synchronize_irq(struct ifcvf_hw *hw)
+{
+ u32 nvectors = hw->num_msix_vectors;
+ struct pci_dev *pdev = hw->pdev;
+ int i, irq;
+
+ for (i = 0; i < nvectors; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (irq >= 0)
+ synchronize_irq(irq);
+ }
}
-void ifcvf_stop_hw(struct ifcvf_hw *hw)
+void ifcvf_stop(struct ifcvf_hw *hw)
{
- ifcvf_hw_disable(hw);
- ifcvf_reset(hw);
+ ifcvf_synchronize_irq(hw);
+ ifcvf_reset_vring(hw);
+ ifcvf_reset_config_handler(hw);
}
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index c20d1c40214e..b57849c643f6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -24,15 +24,9 @@
#define N3000_DEVICE_ID 0x1041
#define N3000_SUBSYS_DEVICE_ID 0x001A
-/* Max 8 data queue pairs(16 queues) and one control vq for now. */
-#define IFCVF_MAX_QUEUES 17
-
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
-#define IFCVF_QUEUE_MAX 32768
#define IFCVF_PCI_MAX_RESOURCE 6
-#define IFCVF_LM_CFG_SIZE 0x40
-#define IFCVF_LM_RING_STATE_OFFSET 0x20
#define IFCVF_LM_BAR 4
#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
@@ -47,12 +41,7 @@
#define MSIX_VECTOR_DEV_SHARED 3
struct vring_info {
- u64 desc;
- u64 avail;
- u64 used;
- u16 size;
u16 last_avail_idx;
- bool ready;
void __iomem *notify_addr;
phys_addr_t notify_pa;
u32 irq;
@@ -60,10 +49,18 @@ struct vring_info {
char msix_name[256];
};
+struct ifcvf_lm_cfg {
+ __le64 control;
+ __le64 status;
+ __le64 lm_mem_log_start_addr;
+ __le64 lm_mem_log_end_addr;
+ __le16 vq_state_region;
+};
+
struct ifcvf_hw {
u8 __iomem *isr;
/* Live migration */
- u8 __iomem *lm_cfg;
+ struct ifcvf_lm_cfg __iomem *lm_cfg;
/* Notification bar number */
u8 notify_bar;
u8 msix_vector_status;
@@ -74,13 +71,12 @@ struct ifcvf_hw {
phys_addr_t notify_base_pa;
u32 notify_off_multiplier;
u32 dev_type;
- u64 req_features;
u64 hw_features;
/* provisioned device features */
u64 dev_features;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *dev_cfg;
- struct vring_info vring[IFCVF_MAX_QUEUES];
+ struct vring_info *vring;
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
@@ -88,6 +84,7 @@ struct ifcvf_hw {
int vqs_reused_irq;
u16 nr_vring;
/* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 num_msix_vectors;
u32 cap_dev_config_size;
struct pci_dev *pdev;
};
@@ -98,16 +95,6 @@ struct ifcvf_adapter {
struct ifcvf_hw *vf;
};
-struct ifcvf_vring_lm_cfg {
- u32 idx_addr[2];
- u8 reserved[IFCVF_LM_CFG_SIZE - 8];
-};
-
-struct ifcvf_lm_cfg {
- u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
- struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
-};
-
struct ifcvf_vdpa_mgmt_dev {
struct vdpa_mgmt_dev mdev;
struct ifcvf_hw vf;
@@ -116,8 +103,7 @@ struct ifcvf_vdpa_mgmt_dev {
};
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
-int ifcvf_start_hw(struct ifcvf_hw *hw);
-void ifcvf_stop_hw(struct ifcvf_hw *hw);
+void ifcvf_stop(struct ifcvf_hw *hw);
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length);
@@ -127,7 +113,7 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
-u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_dev_features(struct ifcvf_hw *hw);
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
@@ -137,4 +123,12 @@ int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+ u64 driver_area, u64 device_area);
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
+void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features);
+u64 ifcvf_get_driver_features(struct ifcvf_hw *hw);
+u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 7f78c47e40d6..e98fa8100f3c 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -125,6 +125,7 @@ static void ifcvf_free_irq(struct ifcvf_hw *vf)
ifcvf_free_vq_irq(vf);
ifcvf_free_config_irq(vf);
ifcvf_free_irq_vectors(pdev);
+ vf->num_msix_vectors = 0;
}
/* ifcvf MSIX vectors allocator, this helper tries to allocate
@@ -343,56 +344,11 @@ static int ifcvf_request_irq(struct ifcvf_hw *vf)
if (ret)
return ret;
- return 0;
-}
-
-static int ifcvf_start_datapath(struct ifcvf_adapter *adapter)
-{
- struct ifcvf_hw *vf = adapter->vf;
- u8 status;
- int ret;
-
- ret = ifcvf_start_hw(vf);
- if (ret < 0) {
- status = ifcvf_get_status(vf);
- status |= VIRTIO_CONFIG_S_FAILED;
- ifcvf_set_status(vf, status);
- }
-
- return ret;
-}
-
-static int ifcvf_stop_datapath(struct ifcvf_adapter *adapter)
-{
- struct ifcvf_hw *vf = adapter->vf;
- int i;
-
- for (i = 0; i < vf->nr_vring; i++)
- vf->vring[i].cb.callback = NULL;
-
- ifcvf_stop_hw(vf);
+ vf->num_msix_vectors = nvectors;
return 0;
}
-static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
-{
- struct ifcvf_hw *vf = adapter->vf;
- int i;
-
- for (i = 0; i < vf->nr_vring; i++) {
- vf->vring[i].last_avail_idx = 0;
- vf->vring[i].desc = 0;
- vf->vring[i].avail = 0;
- vf->vring[i].used = 0;
- vf->vring[i].ready = 0;
- vf->vring[i].cb.callback = NULL;
- vf->vring[i].cb.private = NULL;
- }
-
- ifcvf_reset(vf);
-}
-
static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
{
return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
@@ -414,7 +370,7 @@ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
u64 features;
if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
- features = ifcvf_get_features(vf);
+ features = ifcvf_get_dev_features(vf);
else {
features = 0;
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
@@ -432,7 +388,7 @@ static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 feat
if (ret)
return ret;
- vf->req_features = features;
+ ifcvf_set_driver_features(vf, features);
return 0;
}
@@ -440,8 +396,11 @@ static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 feat
static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u64 features;
+
+ features = ifcvf_get_driver_features(vf);
- return vf->req_features;
+ return features;
}
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
@@ -453,13 +412,11 @@ static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
{
- struct ifcvf_adapter *adapter;
struct ifcvf_hw *vf;
u8 status_old;
int ret;
vf = vdpa_to_vf(vdpa_dev);
- adapter = vdpa_to_adapter(vdpa_dev);
status_old = ifcvf_get_status(vf);
if (status_old == status)
@@ -469,16 +426,9 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
ret = ifcvf_request_irq(vf);
if (ret) {
- status = ifcvf_get_status(vf);
- status |= VIRTIO_CONFIG_S_FAILED;
- ifcvf_set_status(vf, status);
+ IFCVF_ERR(vf->pdev, "failed to request irq with error %d\n", ret);
return;
}
-
- if (ifcvf_start_datapath(adapter) < 0)
- IFCVF_ERR(adapter->pdev,
- "Failed to set ifcvf vdpa status %u\n",
- status);
}
ifcvf_set_status(vf, status);
@@ -486,30 +436,24 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
{
- struct ifcvf_adapter *adapter;
- struct ifcvf_hw *vf;
- u8 status_old;
-
- vf = vdpa_to_vf(vdpa_dev);
- adapter = vdpa_to_adapter(vdpa_dev);
- status_old = ifcvf_get_status(vf);
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u8 status = ifcvf_get_status(vf);
- if (status_old == 0)
- return 0;
+ ifcvf_stop(vf);
- if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
- ifcvf_stop_datapath(adapter);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
ifcvf_free_irq(vf);
- }
- ifcvf_reset_vring(adapter);
+ ifcvf_reset(vf);
return 0;
}
static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
- return IFCVF_QUEUE_MAX;
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_max_vq_size(vf);
}
static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
@@ -542,14 +486,14 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- vf->vring[qid].ready = ready;
+ ifcvf_set_vq_ready(vf, qid, ready);
}
static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return vf->vring[qid].ready;
+ return ifcvf_get_vq_ready(vf, qid);
}
static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
@@ -557,7 +501,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- vf->vring[qid].size = num;
+ ifcvf_set_vq_num(vf, qid, num);
}
static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
@@ -566,11 +510,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- vf->vring[qid].desc = desc_area;
- vf->vring[qid].avail = driver_area;
- vf->vring[qid].used = device_area;
-
- return 0;
+ return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
}
static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
@@ -892,6 +832,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err:
+ kfree(ifcvf_mgmt_dev->vf.vring);
kfree(ifcvf_mgmt_dev);
return ret;
}
@@ -902,6 +843,7 @@ static void ifcvf_remove(struct pci_dev *pdev)
ifcvf_mgmt_dev = pci_get_drvdata(pdev);
vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
+ kfree(ifcvf_mgmt_dev->vf.vring);
kfree(ifcvf_mgmt_dev);
}
@@ -911,7 +853,9 @@ static struct pci_device_id ifcvf_pci_ids[] = {
N3000_DEVICE_ID,
PCI_VENDOR_ID_INTEL,
N3000_SUBSYS_DEVICE_ID) },
- /* C5000X-PL network device */
+ /* C5000X-PL network device
+ * F2000X-PL network device
+ */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
VIRTIO_TRANS_ID_NET,
PCI_VENDOR_ID_INTEL,
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 279ac6a558d2..9138ef2fb2c8 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -83,6 +83,7 @@ struct mlx5_vq_restore_info {
u64 driver_addr;
u16 avail_index;
u16 used_index;
+ struct msi_map map;
bool ready;
bool restore;
};
@@ -118,6 +119,7 @@ struct mlx5_vdpa_virtqueue {
u16 avail_idx;
u16 used_idx;
int fw_state;
+ struct msi_map map;
/* keep last in the struct */
struct mlx5_vq_restore_info ri;
@@ -808,6 +810,13 @@ static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
}
+static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev)
+{
+ return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) &
+ (1 << MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE) &&
+ pci_msix_can_alloc_dyn(mvdev->mdev->pdev);
+}
+
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
@@ -849,9 +858,15 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
if (vq_is_tx(mvq->index))
MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
- MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
+ if (mvq->map.virq) {
+ MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE);
+ MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index);
+ } else {
+ MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
+ MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
+ }
+
MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
- MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
!!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
@@ -1194,6 +1209,56 @@ static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_vir
mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
}
+static irqreturn_t mlx5_vdpa_int_handler(int irq, void *priv)
+{
+ struct vdpa_callback *cb = priv;
+
+ if (cb->callback)
+ return cb->callback(cb->private);
+
+ return IRQ_HANDLED;
+}
+
+static void alloc_vector(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp;
+ struct mlx5_vdpa_irq_pool_entry *ent;
+ int err;
+ int i;
+
+ for (i = 0; i < irqp->num_ent; i++) {
+ ent = &irqp->entries[i];
+ if (!ent->used) {
+ snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d",
+ dev_name(&ndev->mvdev.vdev.dev), mvq->index);
+ ent->dev_id = &ndev->event_cbs[mvq->index];
+ err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0,
+ ent->name, ent->dev_id);
+ if (err)
+ return;
+
+ ent->used = true;
+ mvq->map = ent->map;
+ return;
+ }
+ }
+}
+
+static void dealloc_vector(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp;
+ int i;
+
+ for (i = 0; i < irqp->num_ent; i++)
+ if (mvq->map.virq == irqp->entries[i].map.virq) {
+ free_irq(mvq->map.virq, irqp->entries[i].dev_id);
+ irqp->entries[i].used = false;
+ return;
+ }
+}
+
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u16 idx = mvq->index;
@@ -1223,27 +1288,31 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
err = counter_set_alloc(ndev, mvq);
if (err)
- goto err_counter;
+ goto err_connect;
+ alloc_vector(ndev, mvq);
err = create_virtqueue(ndev, mvq);
if (err)
- goto err_connect;
+ goto err_vq;
if (mvq->ready) {
err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
if (err) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
idx, err);
- goto err_connect;
+ goto err_modify;
}
}
mvq->initialized = true;
return 0;
-err_connect:
+err_modify:
+ destroy_virtqueue(ndev, mvq);
+err_vq:
+ dealloc_vector(ndev, mvq);
counter_set_dealloc(ndev, mvq);
-err_counter:
+err_connect:
qp_destroy(ndev, &mvq->vqqp);
err_vqqp:
qp_destroy(ndev, &mvq->fwqp);
@@ -1288,6 +1357,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
suspend_vq(ndev, mvq);
destroy_virtqueue(ndev, mvq);
+ dealloc_vector(ndev, mvq);
counter_set_dealloc(ndev, mvq);
qp_destroy(ndev, &mvq->vqqp);
qp_destroy(ndev, &mvq->fwqp);
@@ -2505,6 +2575,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
ri->desc_addr = mvq->desc_addr;
ri->device_addr = mvq->device_addr;
ri->driver_addr = mvq->driver_addr;
+ ri->map = mvq->map;
ri->restore = true;
return 0;
}
@@ -2549,6 +2620,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
mvq->desc_addr = ri->desc_addr;
mvq->device_addr = ri->device_addr;
mvq->driver_addr = ri->driver_addr;
+ mvq->map = ri->map;
}
}
@@ -2833,6 +2905,25 @@ static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
return mvdev->vdev.dma_dev;
}
+static void free_irqs(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_irq_pool_entry *ent;
+ int i;
+
+ if (!msix_mode_supported(&ndev->mvdev))
+ return;
+
+ if (!ndev->irqp.entries)
+ return;
+
+ for (i = ndev->irqp.num_ent - 1; i >= 0; i--) {
+ ent = ndev->irqp.entries + i;
+ if (ent->map.virq)
+ pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map);
+ }
+ kfree(ndev->irqp.entries);
+}
+
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2848,6 +2939,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
+ free_irqs(ndev);
kfree(ndev->event_cbs);
kfree(ndev->vqs);
}
@@ -2876,9 +2968,23 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device
return ret;
}
-static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
+static int mlx5_get_vq_irq(struct vdpa_device *vdev, u16 idx)
{
- return -EOPNOTSUPP;
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return -EOPNOTSUPP;
+
+ mvq = &ndev->vqs[idx];
+ if (!mvq->map.virq)
+ return -EOPNOTSUPP;
+
+ return mvq->map.virq;
}
static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
@@ -3155,6 +3261,34 @@ static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
return err;
}
+static void allocate_irqs(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_irq_pool_entry *ent;
+ int i;
+
+ if (!msix_mode_supported(&ndev->mvdev))
+ return;
+
+ if (!ndev->mvdev.mdev->pdev)
+ return;
+
+ ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL);
+ if (!ndev->irqp.entries)
+ return;
+
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+ ent = ndev->irqp.entries + i;
+ snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d",
+ dev_name(&ndev->mvdev.vdev.dev), i);
+ ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL);
+ if (!ent->map.virq)
+ return;
+
+ ndev->irqp.num_ent++;
+ }
+}
+
static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
@@ -3233,6 +3367,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
init_mvqs(ndev);
+ allocate_irqs(ndev);
init_rwsem(&ndev->reslock);
config = &ndev->config;
@@ -3413,6 +3548,17 @@ static void mlx5v_remove(struct auxiliary_device *adev)
kfree(mgtdev);
}
+static void mlx5v_shutdown(struct auxiliary_device *auxdev)
+{
+ struct mlx5_vdpa_mgmtdev *mgtdev;
+ struct mlx5_vdpa_net *ndev;
+
+ mgtdev = auxiliary_get_drvdata(auxdev);
+ ndev = mgtdev->ndev;
+
+ free_irqs(ndev);
+}
+
static const struct auxiliary_device_id mlx5v_id_table[] = {
{ .name = MLX5_ADEV_NAME ".vnet", },
{},
@@ -3424,6 +3570,7 @@ static struct auxiliary_driver mlx5v_driver = {
.name = "vnet",
.probe = mlx5v_probe,
.remove = mlx5v_remove,
+ .shutdown = mlx5v_shutdown,
.id_table = mlx5v_id_table,
};
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
index c90a89e1de4d..36c44d9fdd16 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -26,6 +26,20 @@ static inline u16 key2vid(u64 key)
return (u16)(key >> 48) & 0xfff;
}
+#define MLX5_VDPA_IRQ_NAME_LEN 32
+
+struct mlx5_vdpa_irq_pool_entry {
+ struct msi_map map;
+ bool used;
+ char name[MLX5_VDPA_IRQ_NAME_LEN];
+ void *dev_id;
+};
+
+struct mlx5_vdpa_irq_pool {
+ int num_ent;
+ struct mlx5_vdpa_irq_pool_entry *entries;
+};
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
@@ -49,6 +63,7 @@ struct mlx5_vdpa_net {
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+ struct mlx5_vdpa_irq_pool irqp;
struct dentry *debugfs;
};
diff --git a/drivers/vdpa/pds/Makefile b/drivers/vdpa/pds/Makefile
new file mode 100644
index 000000000000..2e22418e3ab3
--- /dev/null
+++ b/drivers/vdpa/pds/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright(c) 2023 Advanced Micro Devices, Inc
+
+obj-$(CONFIG_PDS_VDPA) := pds_vdpa.o
+
+pds_vdpa-y := aux_drv.o \
+ cmds.o \
+ vdpa_dev.o
+
+pds_vdpa-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/vdpa/pds/aux_drv.c b/drivers/vdpa/pds/aux_drv.c
new file mode 100644
index 000000000000..186e9ee22eb1
--- /dev/null
+++ b/drivers/vdpa/pds/aux_drv.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+#include "aux_drv.h"
+#include "debugfs.h"
+#include "vdpa_dev.h"
+
+static const struct auxiliary_device_id pds_vdpa_id_table[] = {
+ { .name = PDS_VDPA_DEV_NAME, },
+ {},
+};
+
+static int pds_vdpa_device_id_check(struct pci_dev *pdev)
+{
+ if (pdev->device != PCI_DEVICE_ID_PENSANDO_VDPA_VF ||
+ pdev->vendor != PCI_VENDOR_ID_PENSANDO)
+ return -ENODEV;
+
+ return PCI_DEVICE_ID_PENSANDO_VDPA_VF;
+}
+
+static int pds_vdpa_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(aux_dev, struct pds_auxiliary_dev, aux_dev);
+ struct device *dev = &aux_dev->dev;
+ struct pds_vdpa_aux *vdpa_aux;
+ int err;
+
+ vdpa_aux = kzalloc(sizeof(*vdpa_aux), GFP_KERNEL);
+ if (!vdpa_aux)
+ return -ENOMEM;
+
+ vdpa_aux->padev = padev;
+ vdpa_aux->vf_id = pci_iov_vf_id(padev->vf_pdev);
+ auxiliary_set_drvdata(aux_dev, vdpa_aux);
+
+ /* Get device ident info and set up the vdpa_mgmt_dev */
+ err = pds_vdpa_get_mgmt_info(vdpa_aux);
+ if (err)
+ goto err_free_mem;
+
+ /* Find the virtio configuration */
+ vdpa_aux->vd_mdev.pci_dev = padev->vf_pdev;
+ vdpa_aux->vd_mdev.device_id_check = pds_vdpa_device_id_check;
+ vdpa_aux->vd_mdev.dma_mask = DMA_BIT_MASK(PDS_CORE_ADDR_LEN);
+ err = vp_modern_probe(&vdpa_aux->vd_mdev);
+ if (err) {
+ dev_err(dev, "Unable to probe for virtio configuration: %pe\n",
+ ERR_PTR(err));
+ goto err_free_mgmt_info;
+ }
+
+ /* Let vdpa know that we can provide devices */
+ err = vdpa_mgmtdev_register(&vdpa_aux->vdpa_mdev);
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize vdpa_mgmt interface: %pe\n",
+ __func__, ERR_PTR(err));
+ goto err_free_virtio;
+ }
+
+ pds_vdpa_debugfs_add_pcidev(vdpa_aux);
+ pds_vdpa_debugfs_add_ident(vdpa_aux);
+
+ return 0;
+
+err_free_virtio:
+ vp_modern_remove(&vdpa_aux->vd_mdev);
+err_free_mgmt_info:
+ pci_free_irq_vectors(padev->vf_pdev);
+err_free_mem:
+ kfree(vdpa_aux);
+ auxiliary_set_drvdata(aux_dev, NULL);
+
+ return err;
+}
+
+static void pds_vdpa_remove(struct auxiliary_device *aux_dev)
+{
+ struct pds_vdpa_aux *vdpa_aux = auxiliary_get_drvdata(aux_dev);
+ struct device *dev = &aux_dev->dev;
+
+ vdpa_mgmtdev_unregister(&vdpa_aux->vdpa_mdev);
+ vp_modern_remove(&vdpa_aux->vd_mdev);
+ pci_free_irq_vectors(vdpa_aux->padev->vf_pdev);
+
+ pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
+ kfree(vdpa_aux);
+ auxiliary_set_drvdata(aux_dev, NULL);
+
+ dev_info(dev, "Removed\n");
+}
+
+static struct auxiliary_driver pds_vdpa_driver = {
+ .name = PDS_DEV_TYPE_VDPA_STR,
+ .probe = pds_vdpa_probe,
+ .remove = pds_vdpa_remove,
+ .id_table = pds_vdpa_id_table,
+};
+
+static void __exit pds_vdpa_cleanup(void)
+{
+ auxiliary_driver_unregister(&pds_vdpa_driver);
+
+ pds_vdpa_debugfs_destroy();
+}
+module_exit(pds_vdpa_cleanup);
+
+static int __init pds_vdpa_init(void)
+{
+ int err;
+
+ pds_vdpa_debugfs_create();
+
+ err = auxiliary_driver_register(&pds_vdpa_driver);
+ if (err) {
+ pr_err("%s: aux driver register failed: %pe\n",
+ PDS_VDPA_DRV_NAME, ERR_PTR(err));
+ pds_vdpa_debugfs_destroy();
+ }
+
+ return err;
+}
+module_init(pds_vdpa_init);
+
+MODULE_DESCRIPTION(PDS_VDPA_DRV_DESCRIPTION);
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_LICENSE("GPL");
diff --git a/drivers/vdpa/pds/aux_drv.h b/drivers/vdpa/pds/aux_drv.h
new file mode 100644
index 000000000000..26b75344156e
--- /dev/null
+++ b/drivers/vdpa/pds/aux_drv.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _AUX_DRV_H_
+#define _AUX_DRV_H_
+
+#include <linux/virtio_pci_modern.h>
+
+#define PDS_VDPA_DRV_DESCRIPTION "AMD/Pensando vDPA VF Device Driver"
+#define PDS_VDPA_DRV_NAME KBUILD_MODNAME
+
+struct pds_vdpa_aux {
+ struct pds_auxiliary_dev *padev;
+
+ struct vdpa_mgmt_dev vdpa_mdev;
+ struct pds_vdpa_device *pdsv;
+
+ struct pds_vdpa_ident ident;
+
+ int vf_id;
+ struct dentry *dentry;
+ struct virtio_pci_modern_device vd_mdev;
+
+ int nintrs;
+};
+#endif /* _AUX_DRV_H_ */
diff --git a/drivers/vdpa/pds/cmds.c b/drivers/vdpa/pds/cmds.c
new file mode 100644
index 000000000000..80863a41c3cd
--- /dev/null
+++ b/drivers/vdpa/pds/cmds.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+#include "vdpa_dev.h"
+#include "aux_drv.h"
+#include "cmds.h"
+
+int pds_vdpa_init_hw(struct pds_vdpa_device *pdsv)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_init.opcode = PDS_VDPA_CMD_INIT,
+ .vdpa_init.vdpa_index = pdsv->vdpa_index,
+ .vdpa_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ /* Initialize the vdpa/virtio device */
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_init),
+ &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to init hw, status %d: %pe\n",
+ comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_reset(struct pds_vdpa_device *pdsv)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa.opcode = PDS_VDPA_CMD_RESET,
+ .vdpa.vdpa_index = pdsv->vdpa_index,
+ .vdpa.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa), &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to reset hw, status %d: %pe\n",
+ comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_set_status(struct pds_vdpa_device *pdsv, u8 status)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_status.opcode = PDS_VDPA_CMD_STATUS_UPDATE,
+ .vdpa_status.vdpa_index = pdsv->vdpa_index,
+ .vdpa_status.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ .vdpa_status.status = status,
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_status), &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to set status to %#x, error status %d: %pe\n",
+ status, comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_set_mac(struct pds_vdpa_device *pdsv, u8 *mac)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_setattr.opcode = PDS_VDPA_CMD_SET_ATTR,
+ .vdpa_setattr.vdpa_index = pdsv->vdpa_index,
+ .vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ .vdpa_setattr.attr = PDS_VDPA_ATTR_MAC,
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ ether_addr_copy(cmd.vdpa_setattr.mac, mac);
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_setattr),
+ &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to set mac address %pM, status %d: %pe\n",
+ mac, comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_set_max_vq_pairs(struct pds_vdpa_device *pdsv, u16 max_vqp)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_setattr.opcode = PDS_VDPA_CMD_SET_ATTR,
+ .vdpa_setattr.vdpa_index = pdsv->vdpa_index,
+ .vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ .vdpa_setattr.attr = PDS_VDPA_ATTR_MAX_VQ_PAIRS,
+ .vdpa_setattr.max_vq_pairs = cpu_to_le16(max_vqp),
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_setattr),
+ &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to set max vq pairs %u, status %d: %pe\n",
+ max_vqp, comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
+ struct pds_vdpa_vq_info *vq_info)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_vq_init.opcode = PDS_VDPA_CMD_VQ_INIT,
+ .vdpa_vq_init.vdpa_index = pdsv->vdpa_index,
+ .vdpa_vq_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ .vdpa_vq_init.qid = cpu_to_le16(qid),
+ .vdpa_vq_init.len = cpu_to_le16(ilog2(vq_info->q_len)),
+ .vdpa_vq_init.desc_addr = cpu_to_le64(vq_info->desc_addr),
+ .vdpa_vq_init.avail_addr = cpu_to_le64(vq_info->avail_addr),
+ .vdpa_vq_init.used_addr = cpu_to_le64(vq_info->used_addr),
+ .vdpa_vq_init.intr_index = cpu_to_le16(qid),
+ .vdpa_vq_init.avail_index = cpu_to_le16(vq_info->avail_idx ^ invert_idx),
+ .vdpa_vq_init.used_index = cpu_to_le16(vq_info->used_idx ^ invert_idx),
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ dev_dbg(dev, "%s: qid %d len %d desc_addr %#llx avail_addr %#llx used_addr %#llx\n",
+ __func__, qid, ilog2(vq_info->q_len),
+ vq_info->desc_addr, vq_info->avail_addr, vq_info->used_addr);
+
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_vq_init),
+ &comp, 0);
+ if (err)
+ dev_dbg(dev, "Failed to init vq %d, status %d: %pe\n",
+ qid, comp.status, ERR_PTR(err));
+
+ return err;
+}
+
+int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
+ struct pds_vdpa_vq_info *vq_info)
+{
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_vq_reset.opcode = PDS_VDPA_CMD_VQ_RESET,
+ .vdpa_vq_reset.vdpa_index = pdsv->vdpa_index,
+ .vdpa_vq_reset.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id),
+ .vdpa_vq_reset.qid = cpu_to_le16(qid),
+ };
+ union pds_core_adminq_comp comp = {};
+ int err;
+
+ err = pds_client_adminq_cmd(padev, &cmd, sizeof(cmd.vdpa_vq_reset),
+ &comp, 0);
+ if (err) {
+ dev_dbg(dev, "Failed to reset vq %d, status %d: %pe\n",
+ qid, comp.status, ERR_PTR(err));
+ return err;
+ }
+
+ vq_info->avail_idx = le16_to_cpu(comp.vdpa_vq_reset.avail_index) ^ invert_idx;
+ vq_info->used_idx = le16_to_cpu(comp.vdpa_vq_reset.used_index) ^ invert_idx;
+
+ return 0;
+}
diff --git a/drivers/vdpa/pds/cmds.h b/drivers/vdpa/pds/cmds.h
new file mode 100644
index 000000000000..e24d85cb8f1c
--- /dev/null
+++ b/drivers/vdpa/pds/cmds.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _VDPA_CMDS_H_
+#define _VDPA_CMDS_H_
+
+int pds_vdpa_init_hw(struct pds_vdpa_device *pdsv);
+
+int pds_vdpa_cmd_reset(struct pds_vdpa_device *pdsv);
+int pds_vdpa_cmd_set_status(struct pds_vdpa_device *pdsv, u8 status);
+int pds_vdpa_cmd_set_mac(struct pds_vdpa_device *pdsv, u8 *mac);
+int pds_vdpa_cmd_set_max_vq_pairs(struct pds_vdpa_device *pdsv, u16 max_vqp);
+int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
+ struct pds_vdpa_vq_info *vq_info);
+int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
+ struct pds_vdpa_vq_info *vq_info);
+int pds_vdpa_cmd_set_features(struct pds_vdpa_device *pdsv, u64 features);
+#endif /* _VDPA_CMDS_H_ */
diff --git a/drivers/vdpa/pds/debugfs.c b/drivers/vdpa/pds/debugfs.c
new file mode 100644
index 000000000000..21a0dc0cb607
--- /dev/null
+++ b/drivers/vdpa/pds/debugfs.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+#include "aux_drv.h"
+#include "vdpa_dev.h"
+#include "debugfs.h"
+
+static struct dentry *dbfs_dir;
+
+void pds_vdpa_debugfs_create(void)
+{
+ dbfs_dir = debugfs_create_dir(PDS_VDPA_DRV_NAME, NULL);
+}
+
+void pds_vdpa_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dbfs_dir);
+ dbfs_dir = NULL;
+}
+
+#define PRINT_SBIT_NAME(__seq, __f, __name) \
+ do { \
+ if ((__f) & (__name)) \
+ seq_printf(__seq, " %s", &#__name[16]); \
+ } while (0)
+
+static void print_status_bits(struct seq_file *seq, u8 status)
+{
+ seq_puts(seq, "status:");
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_DRIVER);
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_DRIVER_OK);
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_FEATURES_OK);
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_NEEDS_RESET);
+ PRINT_SBIT_NAME(seq, status, VIRTIO_CONFIG_S_FAILED);
+ seq_puts(seq, "\n");
+}
+
+static void print_feature_bits_all(struct seq_file *seq, u64 features)
+{
+ int i;
+
+ seq_puts(seq, "features:");
+
+ for (i = 0; i < (sizeof(u64) * 8); i++) {
+ u64 mask = BIT_ULL(i);
+
+ switch (features & mask) {
+ case BIT_ULL(VIRTIO_NET_F_CSUM):
+ seq_puts(seq, " VIRTIO_NET_F_CSUM");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_CSUM):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_CSUM");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_GUEST_OFFLOADS");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_MTU):
+ seq_puts(seq, " VIRTIO_NET_F_MTU");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_MAC):
+ seq_puts(seq, " VIRTIO_NET_F_MAC");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_TSO4):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_TSO4");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_TSO6):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_TSO6");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_ECN):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_ECN");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_UFO):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_UFO");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_HOST_TSO4):
+ seq_puts(seq, " VIRTIO_NET_F_HOST_TSO4");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_HOST_TSO6):
+ seq_puts(seq, " VIRTIO_NET_F_HOST_TSO6");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_HOST_ECN):
+ seq_puts(seq, " VIRTIO_NET_F_HOST_ECN");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_HOST_UFO):
+ seq_puts(seq, " VIRTIO_NET_F_HOST_UFO");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_MRG_RXBUF):
+ seq_puts(seq, " VIRTIO_NET_F_MRG_RXBUF");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_STATUS):
+ seq_puts(seq, " VIRTIO_NET_F_STATUS");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_VQ):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_VQ");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_RX):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_RX");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_VLAN):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_VLAN");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_RX_EXTRA");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE):
+ seq_puts(seq, " VIRTIO_NET_F_GUEST_ANNOUNCE");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_MQ):
+ seq_puts(seq, " VIRTIO_NET_F_MQ");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR):
+ seq_puts(seq, " VIRTIO_NET_F_CTRL_MAC_ADDR");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_HASH_REPORT):
+ seq_puts(seq, " VIRTIO_NET_F_HASH_REPORT");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_RSS):
+ seq_puts(seq, " VIRTIO_NET_F_RSS");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_RSC_EXT):
+ seq_puts(seq, " VIRTIO_NET_F_RSC_EXT");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_STANDBY):
+ seq_puts(seq, " VIRTIO_NET_F_STANDBY");
+ break;
+ case BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX):
+ seq_puts(seq, " VIRTIO_NET_F_SPEED_DUPLEX");
+ break;
+ case BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY):
+ seq_puts(seq, " VIRTIO_F_NOTIFY_ON_EMPTY");
+ break;
+ case BIT_ULL(VIRTIO_F_ANY_LAYOUT):
+ seq_puts(seq, " VIRTIO_F_ANY_LAYOUT");
+ break;
+ case BIT_ULL(VIRTIO_F_VERSION_1):
+ seq_puts(seq, " VIRTIO_F_VERSION_1");
+ break;
+ case BIT_ULL(VIRTIO_F_ACCESS_PLATFORM):
+ seq_puts(seq, " VIRTIO_F_ACCESS_PLATFORM");
+ break;
+ case BIT_ULL(VIRTIO_F_RING_PACKED):
+ seq_puts(seq, " VIRTIO_F_RING_PACKED");
+ break;
+ case BIT_ULL(VIRTIO_F_ORDER_PLATFORM):
+ seq_puts(seq, " VIRTIO_F_ORDER_PLATFORM");
+ break;
+ case BIT_ULL(VIRTIO_F_SR_IOV):
+ seq_puts(seq, " VIRTIO_F_SR_IOV");
+ break;
+ case 0:
+ break;
+ default:
+ seq_printf(seq, " bit_%d", i);
+ break;
+ }
+ }
+
+ seq_puts(seq, "\n");
+}
+
+void pds_vdpa_debugfs_add_pcidev(struct pds_vdpa_aux *vdpa_aux)
+{
+ vdpa_aux->dentry = debugfs_create_dir(pci_name(vdpa_aux->padev->vf_pdev), dbfs_dir);
+}
+
+static int identity_show(struct seq_file *seq, void *v)
+{
+ struct pds_vdpa_aux *vdpa_aux = seq->private;
+ struct vdpa_mgmt_dev *mgmt;
+
+ seq_printf(seq, "aux_dev: %s\n",
+ dev_name(&vdpa_aux->padev->aux_dev.dev));
+
+ mgmt = &vdpa_aux->vdpa_mdev;
+ seq_printf(seq, "max_vqs: %d\n", mgmt->max_supported_vqs);
+ seq_printf(seq, "config_attr_mask: %#llx\n", mgmt->config_attr_mask);
+ seq_printf(seq, "supported_features: %#llx\n", mgmt->supported_features);
+ print_feature_bits_all(seq, mgmt->supported_features);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(identity);
+
+void pds_vdpa_debugfs_add_ident(struct pds_vdpa_aux *vdpa_aux)
+{
+ debugfs_create_file("identity", 0400, vdpa_aux->dentry,
+ vdpa_aux, &identity_fops);
+}
+
+static int config_show(struct seq_file *seq, void *v)
+{
+ struct pds_vdpa_device *pdsv = seq->private;
+ struct virtio_net_config vc;
+ u64 driver_features;
+ u8 status;
+
+ memcpy_fromio(&vc, pdsv->vdpa_aux->vd_mdev.device,
+ sizeof(struct virtio_net_config));
+
+ seq_printf(seq, "mac: %pM\n", vc.mac);
+ seq_printf(seq, "max_virtqueue_pairs: %d\n",
+ __virtio16_to_cpu(true, vc.max_virtqueue_pairs));
+ seq_printf(seq, "mtu: %d\n", __virtio16_to_cpu(true, vc.mtu));
+ seq_printf(seq, "speed: %d\n", le32_to_cpu(vc.speed));
+ seq_printf(seq, "duplex: %d\n", vc.duplex);
+ seq_printf(seq, "rss_max_key_size: %d\n", vc.rss_max_key_size);
+ seq_printf(seq, "rss_max_indirection_table_length: %d\n",
+ le16_to_cpu(vc.rss_max_indirection_table_length));
+ seq_printf(seq, "supported_hash_types: %#x\n",
+ le32_to_cpu(vc.supported_hash_types));
+ seq_printf(seq, "vn_status: %#x\n",
+ __virtio16_to_cpu(true, vc.status));
+
+ status = vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
+ seq_printf(seq, "dev_status: %#x\n", status);
+ print_status_bits(seq, status);
+
+ seq_printf(seq, "req_features: %#llx\n", pdsv->req_features);
+ print_feature_bits_all(seq, pdsv->req_features);
+ driver_features = vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev);
+ seq_printf(seq, "driver_features: %#llx\n", driver_features);
+ print_feature_bits_all(seq, driver_features);
+ seq_printf(seq, "vdpa_index: %d\n", pdsv->vdpa_index);
+ seq_printf(seq, "num_vqs: %d\n", pdsv->num_vqs);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(config);
+
+static int vq_show(struct seq_file *seq, void *v)
+{
+ struct pds_vdpa_vq_info *vq = seq->private;
+
+ seq_printf(seq, "ready: %d\n", vq->ready);
+ seq_printf(seq, "desc_addr: %#llx\n", vq->desc_addr);
+ seq_printf(seq, "avail_addr: %#llx\n", vq->avail_addr);
+ seq_printf(seq, "used_addr: %#llx\n", vq->used_addr);
+ seq_printf(seq, "q_len: %d\n", vq->q_len);
+ seq_printf(seq, "qid: %d\n", vq->qid);
+
+ seq_printf(seq, "doorbell: %#llx\n", vq->doorbell);
+ seq_printf(seq, "avail_idx: %d\n", vq->avail_idx);
+ seq_printf(seq, "used_idx: %d\n", vq->used_idx);
+ seq_printf(seq, "irq: %d\n", vq->irq);
+ seq_printf(seq, "irq-name: %s\n", vq->irq_name);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vq);
+
+void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux)
+{
+ int i;
+
+ debugfs_create_file("config", 0400, vdpa_aux->dentry, vdpa_aux->pdsv, &config_fops);
+
+ for (i = 0; i < vdpa_aux->pdsv->num_vqs; i++) {
+ char name[8];
+
+ snprintf(name, sizeof(name), "vq%02d", i);
+ debugfs_create_file(name, 0400, vdpa_aux->dentry,
+ &vdpa_aux->pdsv->vqs[i], &vq_fops);
+ }
+}
+
+void pds_vdpa_debugfs_del_vdpadev(struct pds_vdpa_aux *vdpa_aux)
+{
+ debugfs_remove_recursive(vdpa_aux->dentry);
+ vdpa_aux->dentry = NULL;
+}
+
+void pds_vdpa_debugfs_reset_vdpadev(struct pds_vdpa_aux *vdpa_aux)
+{
+ /* we don't keep track of the entries, so remove it all
+ * then rebuild the basics
+ */
+ pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
+ pds_vdpa_debugfs_add_pcidev(vdpa_aux);
+ pds_vdpa_debugfs_add_ident(vdpa_aux);
+}
diff --git a/drivers/vdpa/pds/debugfs.h b/drivers/vdpa/pds/debugfs.h
new file mode 100644
index 000000000000..c088a4e8f1e9
--- /dev/null
+++ b/drivers/vdpa/pds/debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDS_VDPA_DEBUGFS_H_
+#define _PDS_VDPA_DEBUGFS_H_
+
+#include <linux/debugfs.h>
+
+void pds_vdpa_debugfs_create(void);
+void pds_vdpa_debugfs_destroy(void);
+void pds_vdpa_debugfs_add_pcidev(struct pds_vdpa_aux *vdpa_aux);
+void pds_vdpa_debugfs_add_ident(struct pds_vdpa_aux *vdpa_aux);
+void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux);
+void pds_vdpa_debugfs_del_vdpadev(struct pds_vdpa_aux *vdpa_aux);
+void pds_vdpa_debugfs_reset_vdpadev(struct pds_vdpa_aux *vdpa_aux);
+
+#endif /* _PDS_VDPA_DEBUGFS_H_ */
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
new file mode 100644
index 000000000000..5071a4d58f8d
--- /dev/null
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <uapi/linux/vdpa.h>
+#include <linux/virtio_pci_modern.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+#include "vdpa_dev.h"
+#include "aux_drv.h"
+#include "cmds.h"
+#include "debugfs.h"
+
+static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev);
+
+static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev)
+{
+ return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev);
+}
+
+static int pds_vdpa_notify_handler(struct notifier_block *nb,
+ unsigned long ecode,
+ void *data)
+{
+ struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb);
+ struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
+
+ dev_dbg(dev, "%s: event code %lu\n", __func__, ecode);
+
+ if (ecode == PDS_EVENT_RESET || ecode == PDS_EVENT_LINK_CHANGE) {
+ if (pdsv->config_cb.callback)
+ pdsv->config_cb.callback(pdsv->config_cb.private);
+ }
+
+ return 0;
+}
+
+static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv)
+{
+ struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
+ struct notifier_block *nb = &pdsv->nb;
+ int err;
+
+ if (!nb->notifier_call) {
+ nb->notifier_call = pds_vdpa_notify_handler;
+ err = pdsc_register_notify(nb);
+ if (err) {
+ nb->notifier_call = NULL;
+ dev_err(dev, "failed to register pds event handler: %ps\n",
+ ERR_PTR(err));
+ return -EINVAL;
+ }
+ dev_dbg(dev, "pds event handler registered\n");
+ }
+
+ return 0;
+}
+
+static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv)
+{
+ if (pdsv->nb.notifier_call) {
+ pdsc_unregister_notify(&pdsv->nb);
+ pdsv->nb.notifier_call = NULL;
+ }
+}
+
+static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 desc_addr, u64 driver_addr, u64 device_addr)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ pdsv->vqs[qid].desc_addr = desc_addr;
+ pdsv->vqs[qid].avail_addr = driver_addr;
+ pdsv->vqs[qid].used_addr = device_addr;
+
+ return 0;
+}
+
+static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ pdsv->vqs[qid].q_len = num;
+}
+
+static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ iowrite16(qid, pdsv->vqs[qid].notify);
+}
+
+static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ pdsv->vqs[qid].event_cb = *cb;
+}
+
+static irqreturn_t pds_vdpa_isr(int irq, void *data)
+{
+ struct pds_vdpa_vq_info *vq;
+
+ vq = data;
+ if (vq->event_cb.callback)
+ vq->event_cb.callback(vq->event_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
+{
+ if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR)
+ return;
+
+ free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]);
+ pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
+}
+
+static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
+ struct device *dev = &pdsv->vdpa_dev.dev;
+ u64 driver_features;
+ u16 invert_idx = 0;
+ int irq;
+ int err;
+
+ dev_dbg(dev, "%s: qid %d ready %d => %d\n",
+ __func__, qid, pdsv->vqs[qid].ready, ready);
+ if (ready == pdsv->vqs[qid].ready)
+ return;
+
+ driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+ if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
+ invert_idx = PDS_VDPA_PACKED_INVERT_IDX;
+
+ if (ready) {
+ irq = pci_irq_vector(pdev, qid);
+ snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
+ "vdpa-%s-%d", dev_name(dev), qid);
+
+ err = request_irq(irq, pds_vdpa_isr, 0,
+ pdsv->vqs[qid].irq_name, &pdsv->vqs[qid]);
+ if (err) {
+ dev_err(dev, "%s: no irq for qid %d: %pe\n",
+ __func__, qid, ERR_PTR(err));
+ return;
+ }
+ pdsv->vqs[qid].irq = irq;
+
+ /* Pass vq setup info to DSC using adminq to gather up and
+ * send all info at once so FW can do its full set up in
+ * one easy operation
+ */
+ err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
+ if (err) {
+ dev_err(dev, "Failed to init vq %d: %pe\n",
+ qid, ERR_PTR(err));
+ pds_vdpa_release_irq(pdsv, qid);
+ ready = false;
+ }
+ } else {
+ err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
+ if (err)
+ dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
+ __func__, qid, ERR_PTR(err));
+ pds_vdpa_release_irq(pdsv, qid);
+ }
+
+ pdsv->vqs[qid].ready = ready;
+}
+
+static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ return pdsv->vqs[qid].ready;
+}
+
+static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ u64 driver_features;
+ u16 avail;
+ u16 used;
+
+ if (pdsv->vqs[qid].ready) {
+ dev_err(dev, "Setting device position is denied while vq is enabled\n");
+ return -EINVAL;
+ }
+
+ driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+ if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ avail = state->packed.last_avail_idx |
+ (state->packed.last_avail_counter << 15);
+ used = state->packed.last_used_idx |
+ (state->packed.last_used_counter << 15);
+
+ /* The avail and used index are stored with the packed wrap
+ * counter bit inverted. This way, in case set_vq_state is
+ * not called, the initial value can be set to zero prior to
+ * feature negotiation, and it is good for both packed and
+ * split vq.
+ */
+ avail ^= PDS_VDPA_PACKED_INVERT_IDX;
+ used ^= PDS_VDPA_PACKED_INVERT_IDX;
+ } else {
+ avail = state->split.avail_index;
+ /* state->split does not provide a used_index:
+ * the vq will be set to "empty" here, and the vq will read
+ * the current used index the next time the vq is kicked.
+ */
+ used = avail;
+ }
+
+ if (used != avail) {
+ dev_dbg(dev, "Setting used equal to avail, for interoperability\n");
+ used = avail;
+ }
+
+ pdsv->vqs[qid].avail_idx = avail;
+ pdsv->vqs[qid].used_idx = used;
+
+ return 0;
+}
+
+static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
+ struct device *dev = &padev->aux_dev.dev;
+ u64 driver_features;
+ u16 avail;
+ u16 used;
+
+ if (pdsv->vqs[qid].ready) {
+ dev_err(dev, "Getting device position is denied while vq is enabled\n");
+ return -EINVAL;
+ }
+
+ avail = pdsv->vqs[qid].avail_idx;
+ used = pdsv->vqs[qid].used_idx;
+
+ driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+ if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ avail ^= PDS_VDPA_PACKED_INVERT_IDX;
+ used ^= PDS_VDPA_PACKED_INVERT_IDX;
+
+ state->packed.last_avail_idx = avail & 0x7fff;
+ state->packed.last_avail_counter = avail >> 15;
+ state->packed.last_used_idx = used & 0x7fff;
+ state->packed.last_used_counter = used >> 15;
+ } else {
+ state->split.avail_index = avail;
+ /* state->split does not provide a used_index. */
+ }
+
+ return 0;
+}
+
+static struct vdpa_notification_area
+pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct virtio_pci_modern_device *vd_mdev;
+ struct vdpa_notification_area area;
+
+ area.addr = pdsv->vqs[qid].notify_pa;
+
+ vd_mdev = &pdsv->vdpa_aux->vd_mdev;
+ if (!vd_mdev->notify_offset_multiplier)
+ area.size = PDS_PAGE_SIZE;
+ else
+ area.size = vd_mdev->notify_offset_multiplier;
+
+ return area;
+}
+
+static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ return pdsv->vqs[qid].irq;
+}
+
+static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
+{
+ return PDS_PAGE_SIZE;
+}
+
+static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx)
+{
+ return 0;
+}
+
+static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ return pdsv->supported_features;
+}
+
+static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct device *dev = &pdsv->vdpa_dev.dev;
+ u64 driver_features;
+ u64 nego_features;
+ u64 missing;
+
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+ dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ return -EOPNOTSUPP;
+ }
+
+ pdsv->req_features = features;
+
+ /* Check for valid feature bits */
+ nego_features = features & le64_to_cpu(pdsv->vdpa_aux->ident.hw_features);
+ missing = pdsv->req_features & ~nego_features;
+ if (missing) {
+ dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n",
+ pdsv->req_features, missing);
+ return -EOPNOTSUPP;
+ }
+
+ driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+ dev_dbg(dev, "%s: %#llx => %#llx\n",
+ __func__, driver_features, nego_features);
+
+ if (driver_features == nego_features)
+ return 0;
+
+ vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features);
+
+ return 0;
+}
+
+static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ return vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev);
+}
+
+static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
+ struct vdpa_callback *cb)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ pdsv->config_cb.callback = cb->callback;
+ pdsv->config_cb.private = cb->private;
+}
+
+static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */
+ return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen)));
+}
+
+static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
+{
+ return VIRTIO_ID_NET;
+}
+
+static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
+{
+ return PCI_VENDOR_ID_PENSANDO;
+}
+
+static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+
+ return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
+}
+
+static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct device *dev = &pdsv->vdpa_dev.dev;
+ u8 old_status;
+ int i;
+
+ old_status = pds_vdpa_get_status(vdpa_dev);
+ dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status);
+
+ pds_vdpa_cmd_set_status(pdsv, status);
+
+ /* Note: still working with FW on the need for this reset cmd */
+ if (status == 0) {
+ pds_vdpa_cmd_reset(pdsv);
+
+ for (i = 0; i < pdsv->num_vqs; i++) {
+ pdsv->vqs[i].avail_idx = 0;
+ pdsv->vqs[i].used_idx = 0;
+ }
+ }
+
+ if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ for (i = 0; i < pdsv->num_vqs; i++) {
+ pdsv->vqs[i].notify =
+ vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
+ i, &pdsv->vqs[i].notify_pa);
+ }
+ }
+}
+
+static int pds_vdpa_reset(struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct device *dev;
+ int err = 0;
+ u8 status;
+ int i;
+
+ dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
+ status = pds_vdpa_get_status(vdpa_dev);
+
+ if (status == 0)
+ return 0;
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ /* Reset the vqs */
+ for (i = 0; i < pdsv->num_vqs && !err; i++) {
+ err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]);
+ if (err)
+ dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
+ __func__, i, ERR_PTR(err));
+ pds_vdpa_release_irq(pdsv, i);
+ memset(&pdsv->vqs[i], 0, sizeof(pdsv->vqs[0]));
+ pdsv->vqs[i].ready = false;
+ }
+ }
+
+ pds_vdpa_set_status(vdpa_dev, 0);
+
+ return 0;
+}
+
+static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
+{
+ return sizeof(struct virtio_net_config);
+}
+
+static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ void __iomem *device;
+
+ if (offset + len > sizeof(struct virtio_net_config)) {
+ WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len);
+ return;
+ }
+
+ device = pdsv->vdpa_aux->vd_mdev.device;
+ memcpy_fromio(buf, device + offset, len);
+}
+
+static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ void __iomem *device;
+
+ if (offset + len > sizeof(struct virtio_net_config)) {
+ WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len);
+ return;
+ }
+
+ device = pdsv->vdpa_aux->vd_mdev.device;
+ memcpy_toio(device + offset, buf, len);
+}
+
+static const struct vdpa_config_ops pds_vdpa_ops = {
+ .set_vq_address = pds_vdpa_set_vq_address,
+ .set_vq_num = pds_vdpa_set_vq_num,
+ .kick_vq = pds_vdpa_kick_vq,
+ .set_vq_cb = pds_vdpa_set_vq_cb,
+ .set_vq_ready = pds_vdpa_set_vq_ready,
+ .get_vq_ready = pds_vdpa_get_vq_ready,
+ .set_vq_state = pds_vdpa_set_vq_state,
+ .get_vq_state = pds_vdpa_get_vq_state,
+ .get_vq_notification = pds_vdpa_get_vq_notification,
+ .get_vq_irq = pds_vdpa_get_vq_irq,
+ .get_vq_align = pds_vdpa_get_vq_align,
+ .get_vq_group = pds_vdpa_get_vq_group,
+
+ .get_device_features = pds_vdpa_get_device_features,
+ .set_driver_features = pds_vdpa_set_driver_features,
+ .get_driver_features = pds_vdpa_get_driver_features,
+ .set_config_cb = pds_vdpa_set_config_cb,
+ .get_vq_num_max = pds_vdpa_get_vq_num_max,
+ .get_device_id = pds_vdpa_get_device_id,
+ .get_vendor_id = pds_vdpa_get_vendor_id,
+ .get_status = pds_vdpa_get_status,
+ .set_status = pds_vdpa_set_status,
+ .reset = pds_vdpa_reset,
+ .get_config_size = pds_vdpa_get_config_size,
+ .get_config = pds_vdpa_get_config,
+ .set_config = pds_vdpa_set_config,
+};
+static struct virtio_device_id pds_vdpa_id_table[] = {
+ {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *add_config)
+{
+ struct pds_vdpa_aux *vdpa_aux;
+ struct pds_vdpa_device *pdsv;
+ struct vdpa_mgmt_dev *mgmt;
+ u16 fw_max_vqs, vq_pairs;
+ struct device *dma_dev;
+ struct pci_dev *pdev;
+ struct device *dev;
+ u8 mac[ETH_ALEN];
+ int err;
+ int i;
+
+ vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev);
+ dev = &vdpa_aux->padev->aux_dev.dev;
+ mgmt = &vdpa_aux->vdpa_mdev;
+
+ if (vdpa_aux->pdsv) {
+ dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev,
+ dev, &pds_vdpa_ops, 1, 1, name, false);
+ if (IS_ERR(pdsv)) {
+ dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv);
+ return PTR_ERR(pdsv);
+ }
+
+ vdpa_aux->pdsv = pdsv;
+ pdsv->vdpa_aux = vdpa_aux;
+
+ pdev = vdpa_aux->padev->vf_pdev;
+ dma_dev = &pdev->dev;
+ pdsv->vdpa_dev.dma_dev = dma_dev;
+
+ pdsv->supported_features = mgmt->supported_features;
+
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ u64 unsupp_features =
+ add_config->device_features & ~mgmt->supported_features;
+
+ if (unsupp_features) {
+ dev_err(dev, "Unsupported features: %#llx\n", unsupp_features);
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ pdsv->supported_features = add_config->device_features;
+ }
+
+ err = pds_vdpa_cmd_reset(pdsv);
+ if (err) {
+ dev_err(dev, "Failed to reset hw: %pe\n", ERR_PTR(err));
+ goto err_unmap;
+ }
+
+ err = pds_vdpa_init_hw(pdsv);
+ if (err) {
+ dev_err(dev, "Failed to init hw: %pe\n", ERR_PTR(err));
+ goto err_unmap;
+ }
+
+ fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs);
+ vq_pairs = fw_max_vqs / 2;
+
+ /* Make sure we have the queues being requested */
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
+ vq_pairs = add_config->net.max_vq_pairs;
+
+ pdsv->num_vqs = 2 * vq_pairs;
+ if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+ pdsv->num_vqs++;
+
+ if (pdsv->num_vqs > fw_max_vqs) {
+ dev_err(dev, "%s: queue count requested %u greater than max %u\n",
+ __func__, pdsv->num_vqs, fw_max_vqs);
+ err = -ENOSPC;
+ goto err_unmap;
+ }
+
+ if (pdsv->num_vqs != fw_max_vqs) {
+ err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs);
+ if (err) {
+ dev_err(dev, "Failed to set max_vq_pairs: %pe\n",
+ ERR_PTR(err));
+ goto err_unmap;
+ }
+ }
+
+ /* Set a mac, either from the user config if provided
+ * or set a random mac if default is 00:..:00
+ */
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ ether_addr_copy(mac, add_config->net.mac);
+ pds_vdpa_cmd_set_mac(pdsv, mac);
+ } else {
+ struct virtio_net_config __iomem *vc;
+
+ vc = pdsv->vdpa_aux->vd_mdev.device;
+ memcpy_fromio(mac, vc->mac, sizeof(mac));
+ if (is_zero_ether_addr(mac)) {
+ eth_random_addr(mac);
+ dev_info(dev, "setting random mac %pM\n", mac);
+ pds_vdpa_cmd_set_mac(pdsv, mac);
+ }
+ }
+
+ for (i = 0; i < pdsv->num_vqs; i++) {
+ pdsv->vqs[i].qid = i;
+ pdsv->vqs[i].pdsv = pdsv;
+ pdsv->vqs[i].irq = VIRTIO_MSI_NO_VECTOR;
+ pdsv->vqs[i].notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
+ i, &pdsv->vqs[i].notify_pa);
+ }
+
+ pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev;
+
+ err = pds_vdpa_register_event_handler(pdsv);
+ if (err) {
+ dev_err(dev, "Failed to register for PDS events: %pe\n", ERR_PTR(err));
+ goto err_unmap;
+ }
+
+ /* We use the _vdpa_register_device() call rather than the
+ * vdpa_register_device() to avoid a deadlock because our
+ * dev_add() is called with the vdpa_dev_lock already set
+ * by vdpa_nl_cmd_dev_add_set_doit()
+ */
+ err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs);
+ if (err) {
+ dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err));
+ goto err_unevent;
+ }
+
+ pds_vdpa_debugfs_add_vdpadev(vdpa_aux);
+
+ return 0;
+
+err_unevent:
+ pds_vdpa_unregister_event_handler(pdsv);
+err_unmap:
+ put_device(&pdsv->vdpa_dev.dev);
+ vdpa_aux->pdsv = NULL;
+ return err;
+}
+
+static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev,
+ struct vdpa_device *vdpa_dev)
+{
+ struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
+ struct pds_vdpa_aux *vdpa_aux;
+
+ pds_vdpa_unregister_event_handler(pdsv);
+
+ vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev);
+ _vdpa_unregister_device(vdpa_dev);
+
+ pds_vdpa_cmd_reset(vdpa_aux->pdsv);
+ pds_vdpa_debugfs_reset_vdpadev(vdpa_aux);
+
+ vdpa_aux->pdsv = NULL;
+
+ dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n");
+}
+
+static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = {
+ .dev_add = pds_vdpa_dev_add,
+ .dev_del = pds_vdpa_dev_del
+};
+
+int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux)
+{
+ union pds_core_adminq_cmd cmd = {
+ .vdpa_ident.opcode = PDS_VDPA_CMD_IDENT,
+ .vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id),
+ };
+ union pds_core_adminq_comp comp = {};
+ struct vdpa_mgmt_dev *mgmt;
+ struct pci_dev *pf_pdev;
+ struct device *pf_dev;
+ struct pci_dev *pdev;
+ dma_addr_t ident_pa;
+ struct device *dev;
+ u16 dev_intrs;
+ u16 max_vqs;
+ int err;
+
+ dev = &vdpa_aux->padev->aux_dev.dev;
+ pdev = vdpa_aux->padev->vf_pdev;
+ mgmt = &vdpa_aux->vdpa_mdev;
+
+ /* Get resource info through the PF's adminq. It is a block of info,
+ * so we need to map some memory for PF to make available to the
+ * firmware for writing the data.
+ */
+ pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev);
+ pf_dev = &pf_pdev->dev;
+ ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident,
+ sizeof(vdpa_aux->ident), DMA_FROM_DEVICE);
+ if (dma_mapping_error(pf_dev, ident_pa)) {
+ dev_err(dev, "Failed to map ident space\n");
+ return -ENOMEM;
+ }
+
+ cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa);
+ cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident));
+ err = pds_client_adminq_cmd(vdpa_aux->padev, &cmd,
+ sizeof(cmd.vdpa_ident), &comp, 0);
+ dma_unmap_single(pf_dev, ident_pa,
+ sizeof(vdpa_aux->ident), DMA_FROM_DEVICE);
+ if (err) {
+ dev_err(dev, "Failed to ident hw, status %d: %pe\n",
+ comp.status, ERR_PTR(err));
+ return err;
+ }
+
+ max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs);
+ dev_intrs = pci_msix_vec_count(pdev);
+ dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n", max_vqs, dev_intrs);
+
+ max_vqs = min_t(u16, dev_intrs, max_vqs);
+ mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs);
+ vdpa_aux->nintrs = mgmt->max_supported_vqs;
+
+ mgmt->ops = &pds_vdpa_mgmt_dev_ops;
+ mgmt->id_table = pds_vdpa_id_table;
+ mgmt->device = dev;
+ mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features);
+ mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+ mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
+
+ err = pci_alloc_irq_vectors(pdev, vdpa_aux->nintrs, vdpa_aux->nintrs,
+ PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Couldn't get %d msix vectors: %pe\n",
+ vdpa_aux->nintrs, ERR_PTR(err));
+ return err;
+ }
+ vdpa_aux->nintrs = err;
+
+ return 0;
+}
diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h
new file mode 100644
index 000000000000..a1bc37de9537
--- /dev/null
+++ b/drivers/vdpa/pds/vdpa_dev.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _VDPA_DEV_H_
+#define _VDPA_DEV_H_
+
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+
+struct pds_vdpa_vq_info {
+ bool ready;
+ u64 desc_addr;
+ u64 avail_addr;
+ u64 used_addr;
+ u32 q_len;
+ u16 qid;
+ int irq;
+ char irq_name[32];
+
+ void __iomem *notify;
+ dma_addr_t notify_pa;
+
+ u64 doorbell;
+ u16 avail_idx;
+ u16 used_idx;
+
+ struct vdpa_callback event_cb;
+ struct pds_vdpa_device *pdsv;
+};
+
+#define PDS_VDPA_MAX_QUEUES 65
+#define PDS_VDPA_MAX_QLEN 32768
+struct pds_vdpa_device {
+ struct vdpa_device vdpa_dev;
+ struct pds_vdpa_aux *vdpa_aux;
+
+ struct pds_vdpa_vq_info vqs[PDS_VDPA_MAX_QUEUES];
+ u64 supported_features; /* specified device features */
+ u64 req_features; /* features requested by vdpa */
+ u8 vdpa_index; /* rsvd for future subdevice use */
+ u8 num_vqs; /* num vqs in use */
+ struct vdpa_callback config_cb;
+ struct notifier_block nb;
+};
+
+#define PDS_VDPA_PACKED_INVERT_IDX 0x8000
+
+int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux);
+#endif /* _VDPA_DEV_H_ */
diff --git a/drivers/vdpa/solidrun/snet_ctrl.c b/drivers/vdpa/solidrun/snet_ctrl.c
index 3858738643b4..3cef2571d15d 100644
--- a/drivers/vdpa/solidrun/snet_ctrl.c
+++ b/drivers/vdpa/solidrun/snet_ctrl.c
@@ -16,6 +16,7 @@ enum snet_ctrl_opcodes {
SNET_CTRL_OP_DESTROY = 1,
SNET_CTRL_OP_READ_VQ_STATE,
SNET_CTRL_OP_SUSPEND,
+ SNET_CTRL_OP_RESUME,
};
#define SNET_CTRL_TIMEOUT 2000000
@@ -328,3 +329,8 @@ int snet_suspend_dev(struct snet *snet)
{
return snet_send_ctrl_msg(snet, SNET_CTRL_OP_SUSPEND, 0);
}
+
+int snet_resume_dev(struct snet *snet)
+{
+ return snet_send_ctrl_msg(snet, SNET_CTRL_OP_RESUME, 0);
+}
diff --git a/drivers/vdpa/solidrun/snet_hwmon.c b/drivers/vdpa/solidrun/snet_hwmon.c
index 42c87387a0f1..af531a339082 100644
--- a/drivers/vdpa/solidrun/snet_hwmon.c
+++ b/drivers/vdpa/solidrun/snet_hwmon.c
@@ -159,7 +159,7 @@ static const struct hwmon_ops snet_hwmon_ops = {
.read_string = snet_hwmon_read_string
};
-static const struct hwmon_channel_info *snet_hwmon_info[] = {
+static const struct hwmon_channel_info * const snet_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL),
diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c
index cdcd84ce4f5a..99428a04068d 100644
--- a/drivers/vdpa/solidrun/snet_main.c
+++ b/drivers/vdpa/solidrun/snet_main.c
@@ -509,6 +509,20 @@ static int snet_suspend(struct vdpa_device *vdev)
return ret;
}
+static int snet_resume(struct vdpa_device *vdev)
+{
+ struct snet *snet = vdpa_to_snet(vdev);
+ int ret;
+
+ ret = snet_resume_dev(snet);
+ if (ret)
+ SNET_ERR(snet->pdev, "SNET[%u] resume failed, err: %d\n", snet->sid, ret);
+ else
+ SNET_DBG(snet->pdev, "Resume SNET[%u] device\n", snet->sid);
+
+ return ret;
+}
+
static const struct vdpa_config_ops snet_config_ops = {
.set_vq_address = snet_set_vq_address,
.set_vq_num = snet_set_vq_num,
@@ -536,6 +550,7 @@ static const struct vdpa_config_ops snet_config_ops = {
.get_config = snet_get_config,
.set_config = snet_set_config,
.suspend = snet_suspend,
+ .resume = snet_resume,
};
static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
diff --git a/drivers/vdpa/solidrun/snet_vdpa.h b/drivers/vdpa/solidrun/snet_vdpa.h
index 3c78d4e7d485..36ac285835ea 100644
--- a/drivers/vdpa/solidrun/snet_vdpa.h
+++ b/drivers/vdpa/solidrun/snet_vdpa.h
@@ -204,5 +204,6 @@ void snet_ctrl_clear(struct snet *snet);
int snet_destroy_dev(struct snet *snet);
int snet_read_vq_state(struct snet *snet, u16 idx, struct vdpa_vq_state *state);
int snet_suspend_dev(struct snet *snet);
+int snet_resume_dev(struct snet *snet);
#endif //_SNET_VDPA_H_
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 4619b4a520ef..dc38ed21319d 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -726,7 +726,11 @@ static int vduse_vdpa_set_vq_affinity(struct vdpa_device *vdpa, u16 idx,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- cpumask_copy(&dev->vqs[idx]->irq_affinity, cpu_mask);
+ if (cpu_mask)
+ cpumask_copy(&dev->vqs[idx]->irq_affinity, cpu_mask);
+ else
+ cpumask_setall(&dev->vqs[idx]->irq_affinity);
+
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ae2273196b0c..f2ed7167c848 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -546,7 +546,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
endtime = busy_clock() + busyloop_timeout;
while (vhost_can_busy_poll(endtime)) {
- if (vhost_has_work(&net->dev)) {
+ if (vhost_vq_has_work(vq)) {
*busyloop_intr = true;
break;
}
@@ -1347,8 +1347,10 @@ static int vhost_net_open(struct inode *inode, struct file *f)
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
NULL);
- vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
- vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
+ vqs[VHOST_NET_VQ_TX]);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
+ vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
n->page_frag.page = NULL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index bb10fa4bb4f6..c83f7f043470 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -167,6 +167,7 @@ MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi d
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
+ struct vhost_scsi *vs;
/*
* Reference counting for inflight reqs, used for flush operation. At
* each time, one reference tracks new commands submitted, while we
@@ -181,6 +182,9 @@ struct vhost_scsi_virtqueue {
struct vhost_scsi_cmd *scsi_cmds;
struct sbitmap scsi_tags;
int max_cmds;
+
+ struct vhost_work completion_work;
+ struct llist_head completion_list;
};
struct vhost_scsi {
@@ -190,12 +194,8 @@ struct vhost_scsi {
struct vhost_dev dev;
struct vhost_scsi_virtqueue *vqs;
- unsigned long *compl_bitmap;
struct vhost_scsi_inflight **old_inflight;
- struct vhost_work vs_completion_work; /* cmd completion work item */
- struct llist_head vs_completion_list; /* cmd completion queue */
-
struct vhost_work vs_event_work; /* evt injection work item */
struct llist_head vs_event_list; /* evt injection queue */
@@ -353,15 +353,17 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct vhost_scsi_tmf *tmf = container_of(se_cmd,
struct vhost_scsi_tmf, se_cmd);
+ struct vhost_virtqueue *vq = &tmf->svq->vq;
- vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
+ vhost_vq_work_queue(vq, &tmf->vwork);
} else {
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
- struct vhost_scsi *vs = cmd->tvc_vhost;
+ struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+ llist_add(&cmd->tvc_completion_list, &svq->completion_list);
+ vhost_vq_work_queue(&svq->vq, &svq->completion_work);
}
}
@@ -509,17 +511,17 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
*/
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
- vs_completion_work);
+ struct vhost_scsi_virtqueue *svq = container_of(work,
+ struct vhost_scsi_virtqueue, completion_work);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
struct iov_iter iov_iter;
- int ret, vq;
+ bool signal = false;
+ int ret;
- bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
- llnode = llist_del_all(&vs->vs_completion_list);
+ llnode = llist_del_all(&svq->completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -539,21 +541,17 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
- struct vhost_scsi_virtqueue *q;
+ signal = true;
+
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
- q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
- vq = q - vs->vqs;
- __set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_release_cmd_res(se_cmd);
}
- vq = -1;
- while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
- < vs->dev.nvqs)
- vhost_signal(&vs->dev, &vs->vqs[vq].vq);
+ if (signal)
+ vhost_signal(&svq->vs->dev, &svq->vq);
}
static struct vhost_scsi_cmd *
@@ -1135,12 +1133,27 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
{
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
vwork);
- int resp_code;
+ struct vhost_virtqueue *ctl_vq, *vq;
+ int resp_code, i;
+
+ if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
+ /*
+ * Flush IO vqs that don't share a worker with the ctl to make
+ * sure they have sent their responses before us.
+ */
+ ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
+ for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
+ vq = &tmf->vhost->vqs[i].vq;
+
+ if (vhost_vq_is_setup(vq) &&
+ vq->worker != ctl_vq->worker)
+ vhost_vq_flush(vq);
+ }
- if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
- else
+ } else {
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ }
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
@@ -1335,11 +1348,9 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
}
static void
-vhost_scsi_send_evt(struct vhost_scsi *vs,
- struct vhost_scsi_tpg *tpg,
- struct se_lun *lun,
- u32 event,
- u32 reason)
+vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_tpg *tpg, struct se_lun *lun,
+ u32 event, u32 reason)
{
struct vhost_scsi_evt *evt;
@@ -1361,7 +1372,7 @@ vhost_scsi_send_evt(struct vhost_scsi *vs,
}
llist_add(&evt->list, &vs->vs_event_list);
- vhost_work_queue(&vs->dev, &vs->vs_event_work);
+ vhost_vq_work_queue(vq, &vs->vs_event_work);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
@@ -1375,7 +1386,8 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
goto out;
if (vs->vs_events_missed)
- vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+ vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
+ 0);
out:
mutex_unlock(&vq->mutex);
}
@@ -1770,6 +1782,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
+ struct vhost_scsi_virtqueue *svq;
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
@@ -1788,10 +1801,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
}
nvqs += VHOST_SCSI_VQ_IO;
- vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
- if (!vs->compl_bitmap)
- goto err_compl_bitmap;
-
vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
GFP_KERNEL | __GFP_ZERO);
if (!vs->old_inflight)
@@ -1806,7 +1815,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
if (!vqs)
goto err_local_vqs;
- vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
vs->vs_events_nr = 0;
@@ -1817,8 +1825,14 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
- vqs[i] = &vs->vqs[i].vq;
- vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
+ svq = &vs->vqs[i];
+
+ vqs[i] = &svq->vq;
+ svq->vs = vs;
+ init_llist_head(&svq->completion_list);
+ vhost_work_init(&svq->completion_work,
+ vhost_scsi_complete_cmd_work);
+ svq->vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
@@ -1833,8 +1847,6 @@ err_local_vqs:
err_vqs:
kfree(vs->old_inflight);
err_inflight:
- bitmap_free(vs->compl_bitmap);
-err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
@@ -1854,7 +1866,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
kfree(vs->dev.vqs);
kfree(vs->vqs);
kfree(vs->old_inflight);
- bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
@@ -1916,6 +1927,14 @@ vhost_scsi_ioctl(struct file *f,
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
return vhost_scsi_set_features(vs, features);
+ case VHOST_NEW_WORKER:
+ case VHOST_FREE_WORKER:
+ case VHOST_ATTACH_VRING_WORKER:
+ case VHOST_GET_VRING_WORKER:
+ mutex_lock(&vs->dev.mutex);
+ r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
+ mutex_unlock(&vs->dev.mutex);
+ return r;
default:
mutex_lock(&vs->dev.mutex);
r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
@@ -1995,7 +2014,7 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
goto unlock;
if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
- vhost_scsi_send_evt(vs, tpg, lun,
+ vhost_scsi_send_evt(vs, vq, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
unlock:
mutex_unlock(&vq->mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 60c9ebd629dd..c71d573f1c94 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -187,13 +187,15 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- __poll_t mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
poll->wqh = NULL;
+ poll->vq = vq;
vhost_work_init(&poll->work, fn);
}
@@ -231,46 +233,102 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_dev_flush(struct vhost_dev *dev)
+static void vhost_worker_queue(struct vhost_worker *worker,
+ struct vhost_work *work)
+{
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ * test_and_set_bit() implies a memory barrier.
+ */
+ llist_add(&work->node, &worker->work_list);
+ vhost_task_wake(worker->vtsk);
+ }
+}
+
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
+{
+ struct vhost_worker *worker;
+ bool queued = false;
+
+ rcu_read_lock();
+ worker = rcu_dereference(vq->worker);
+ if (worker) {
+ queued = true;
+ vhost_worker_queue(worker, work);
+ }
+ rcu_read_unlock();
+
+ return queued;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+
+void vhost_vq_flush(struct vhost_virtqueue *vq)
{
struct vhost_flush_struct flush;
- if (dev->worker.vtsk) {
- init_completion(&flush.wait_event);
- vhost_work_init(&flush.work, vhost_flush_work);
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
- vhost_work_queue(dev, &flush.work);
+ if (vhost_vq_work_queue(vq, &flush.work))
wait_for_completion(&flush.wait_event);
- }
}
-EXPORT_SYMBOL_GPL(vhost_dev_flush);
+EXPORT_SYMBOL_GPL(vhost_vq_flush);
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
+/**
+ * vhost_worker_flush - flush a worker
+ * @worker: worker to flush
+ *
+ * This does not use RCU to protect the worker, so the device or worker
+ * mutex must be held.
+ */
+static void vhost_worker_flush(struct vhost_worker *worker)
{
- if (!dev->worker.vtsk)
- return;
+ struct vhost_flush_struct flush;
- if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
- /* We can only add the work to the list after we're
- * sure it was not in the list.
- * test_and_set_bit() implies a memory barrier.
- */
- llist_add(&work->node, &dev->worker.work_list);
- vhost_task_wake(dev->worker.vtsk);
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_worker_queue(worker, &flush.work);
+ wait_for_completion(&flush.wait_event);
+}
+
+void vhost_dev_flush(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ unsigned long i;
+
+ xa_for_each(&dev->worker_xa, i, worker) {
+ mutex_lock(&worker->mutex);
+ if (!worker->attachment_cnt) {
+ mutex_unlock(&worker->mutex);
+ continue;
+ }
+ vhost_worker_flush(worker);
+ mutex_unlock(&worker->mutex);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_queue);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
/* A lockless hint for busy polling code to exit the loop */
-bool vhost_has_work(struct vhost_dev *dev)
+bool vhost_vq_has_work(struct vhost_virtqueue *vq)
{
- return !llist_empty(&dev->worker.work_list);
+ struct vhost_worker *worker;
+ bool has_work = false;
+
+ rcu_read_lock();
+ worker = rcu_dereference(vq->worker);
+ if (worker && !llist_empty(&worker->work_list))
+ has_work = true;
+ rcu_read_unlock();
+
+ return has_work;
}
-EXPORT_SYMBOL_GPL(vhost_has_work);
+EXPORT_SYMBOL_GPL(vhost_vq_has_work);
void vhost_poll_queue(struct vhost_poll *poll)
{
- vhost_work_queue(poll->dev, &poll->work);
+ vhost_vq_work_queue(poll->vq, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
@@ -329,6 +387,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
+ rcu_assign_pointer(vq->worker, NULL);
vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq);
}
@@ -458,8 +517,6 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->umem = NULL;
dev->iotlb = NULL;
dev->mm = NULL;
- memset(&dev->worker, 0, sizeof(dev->worker));
- init_llist_head(&dev->worker.work_list);
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
@@ -469,7 +526,7 @@ void vhost_dev_init(struct vhost_dev *dev,
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
spin_lock_init(&dev->iotlb_lock);
-
+ xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
@@ -481,7 +538,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
- EPOLLIN, dev);
+ EPOLLIN, dev, vq);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
@@ -531,38 +588,284 @@ static void vhost_detach_mm(struct vhost_dev *dev)
dev->mm = NULL;
}
-static void vhost_worker_free(struct vhost_dev *dev)
+static void vhost_worker_destroy(struct vhost_dev *dev,
+ struct vhost_worker *worker)
+{
+ if (!worker)
+ return;
+
+ WARN_ON(!llist_empty(&worker->work_list));
+ xa_erase(&dev->worker_xa, worker->id);
+ vhost_task_stop(worker->vtsk);
+ kfree(worker);
+}
+
+static void vhost_workers_free(struct vhost_dev *dev)
{
- if (!dev->worker.vtsk)
+ struct vhost_worker *worker;
+ unsigned long i;
+
+ if (!dev->use_worker)
return;
- WARN_ON(!llist_empty(&dev->worker.work_list));
- vhost_task_stop(dev->worker.vtsk);
- dev->worker.kcov_handle = 0;
- dev->worker.vtsk = NULL;
+ for (i = 0; i < dev->nvqs; i++)
+ rcu_assign_pointer(dev->vqs[i]->worker, NULL);
+ /*
+ * Free the default worker we created and cleanup workers userspace
+ * created but couldn't clean up (it forgot or crashed).
+ */
+ xa_for_each(&dev->worker_xa, i, worker)
+ vhost_worker_destroy(dev, worker);
+ xa_destroy(&dev->worker_xa);
}
-static int vhost_worker_create(struct vhost_dev *dev)
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{
+ struct vhost_worker *worker;
struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
+ int ret;
+ u32 id;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ if (!worker)
+ return NULL;
snprintf(name, sizeof(name), "vhost-%d", current->pid);
- vtsk = vhost_task_create(vhost_worker, &dev->worker, name);
+ vtsk = vhost_task_create(vhost_worker, worker, name);
if (!vtsk)
- return -ENOMEM;
+ goto free_worker;
+
+ mutex_init(&worker->mutex);
+ init_llist_head(&worker->work_list);
+ worker->kcov_handle = kcov_common_handle();
+ worker->vtsk = vtsk;
- dev->worker.kcov_handle = kcov_common_handle();
- dev->worker.vtsk = vtsk;
vhost_task_start(vtsk);
+
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+ worker->id = id;
+
+ return worker;
+
+stop_worker:
+ vhost_task_stop(vtsk);
+free_worker:
+ kfree(worker);
+ return NULL;
+}
+
+/* Caller must have device mutex */
+static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ struct vhost_worker *worker)
+{
+ struct vhost_worker *old_worker;
+
+ old_worker = rcu_dereference_check(vq->worker,
+ lockdep_is_held(&vq->dev->mutex));
+
+ mutex_lock(&worker->mutex);
+ worker->attachment_cnt++;
+ mutex_unlock(&worker->mutex);
+ rcu_assign_pointer(vq->worker, worker);
+
+ if (!old_worker)
+ return;
+ /*
+ * Take the worker mutex to make sure we see the work queued from
+ * device wide flushes which doesn't use RCU for execution.
+ */
+ mutex_lock(&old_worker->mutex);
+ old_worker->attachment_cnt--;
+ /*
+ * We don't want to call synchronize_rcu for every vq during setup
+ * because it will slow down VM startup. If we haven't done
+ * VHOST_SET_VRING_KICK and not done the driver specific
+ * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will
+ * not be any works queued for scsi and net.
+ */
+ mutex_lock(&vq->mutex);
+ if (!vhost_vq_get_backend(vq) && !vq->kick) {
+ mutex_unlock(&vq->mutex);
+ mutex_unlock(&old_worker->mutex);
+ /*
+ * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
+ * Warn if it adds support for multiple workers but forgets to
+ * handle the early queueing case.
+ */
+ WARN_ON(!old_worker->attachment_cnt &&
+ !llist_empty(&old_worker->work_list));
+ return;
+ }
+ mutex_unlock(&vq->mutex);
+
+ /* Make sure new vq queue/flush/poll calls see the new worker */
+ synchronize_rcu();
+ /* Make sure whatever was queued gets run */
+ vhost_worker_flush(old_worker);
+ mutex_unlock(&old_worker->mutex);
+}
+
+ /* Caller must have device mutex */
+static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ struct vhost_vring_worker *info)
+{
+ unsigned long index = info->worker_id;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_worker *worker;
+
+ if (!dev->use_worker)
+ return -EINVAL;
+
+ worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
+ if (!worker || worker->id != info->worker_id)
+ return -ENODEV;
+
+ __vhost_vq_attach_worker(vq, worker);
+ return 0;
+}
+
+/* Caller must have device mutex */
+static int vhost_new_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *info)
+{
+ struct vhost_worker *worker;
+
+ worker = vhost_worker_create(dev);
+ if (!worker)
+ return -ENOMEM;
+
+ info->worker_id = worker->id;
+ return 0;
+}
+
+/* Caller must have device mutex */
+static int vhost_free_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *info)
+{
+ unsigned long index = info->worker_id;
+ struct vhost_worker *worker;
+
+ worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
+ if (!worker || worker->id != info->worker_id)
+ return -ENODEV;
+
+ mutex_lock(&worker->mutex);
+ if (worker->attachment_cnt) {
+ mutex_unlock(&worker->mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&worker->mutex);
+
+ vhost_worker_destroy(dev, worker);
return 0;
}
+static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
+ struct vhost_virtqueue **vq, u32 *id)
+{
+ u32 __user *idxp = argp;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, idxp);
+ if (r < 0)
+ return r;
+
+ if (idx >= dev->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, dev->nvqs);
+
+ *vq = dev->vqs[idx];
+ *id = idx;
+ return 0;
+}
+
+/* Caller must have device mutex */
+long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ void __user *argp)
+{
+ struct vhost_vring_worker ring_worker;
+ struct vhost_worker_state state;
+ struct vhost_worker *worker;
+ struct vhost_virtqueue *vq;
+ long ret;
+ u32 idx;
+
+ if (!dev->use_worker)
+ return -EINVAL;
+
+ if (!vhost_dev_has_owner(dev))
+ return -EINVAL;
+
+ ret = vhost_dev_check_owner(dev);
+ if (ret)
+ return ret;
+
+ switch (ioctl) {
+ /* dev worker ioctls */
+ case VHOST_NEW_WORKER:
+ ret = vhost_new_worker(dev, &state);
+ if (!ret && copy_to_user(argp, &state, sizeof(state)))
+ ret = -EFAULT;
+ return ret;
+ case VHOST_FREE_WORKER:
+ if (copy_from_user(&state, argp, sizeof(state)))
+ return -EFAULT;
+ return vhost_free_worker(dev, &state);
+ /* vring worker ioctls */
+ case VHOST_ATTACH_VRING_WORKER:
+ case VHOST_GET_VRING_WORKER:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ ret = vhost_get_vq_from_user(dev, argp, &vq, &idx);
+ if (ret)
+ return ret;
+
+ switch (ioctl) {
+ case VHOST_ATTACH_VRING_WORKER:
+ if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vhost_vq_attach_worker(vq, &ring_worker);
+ break;
+ case VHOST_GET_VRING_WORKER:
+ worker = rcu_dereference_check(vq->worker,
+ lockdep_is_held(&dev->mutex));
+ if (!worker) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ring_worker.index = idx;
+ ring_worker.worker_id = worker->id;
+
+ if (copy_to_user(argp, &ring_worker, sizeof(ring_worker)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_worker_ioctl);
+
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
- int err;
+ struct vhost_worker *worker;
+ int err, i;
/* Is there an owner already? */
if (vhost_dev_has_owner(dev)) {
@@ -572,20 +875,32 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
vhost_attach_mm(dev);
- if (dev->use_worker) {
- err = vhost_worker_create(dev);
- if (err)
- goto err_worker;
- }
-
err = vhost_dev_alloc_iovecs(dev);
if (err)
goto err_iovecs;
+ if (dev->use_worker) {
+ /*
+ * This should be done last, because vsock can queue work
+ * before VHOST_SET_OWNER so it simplifies the failure path
+ * below since we don't have to worry about vsock queueing
+ * while we free the worker.
+ */
+ worker = vhost_worker_create(dev);
+ if (!worker) {
+ err = -ENOMEM;
+ goto err_worker;
+ }
+
+ for (i = 0; i < dev->nvqs; i++)
+ __vhost_vq_attach_worker(dev->vqs[i], worker);
+ }
+
return 0;
-err_iovecs:
- vhost_worker_free(dev);
+
err_worker:
+ vhost_dev_free_iovecs(dev);
+err_iovecs:
vhost_detach_mm(dev);
err_mm:
return err;
@@ -677,7 +992,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->iotlb = NULL;
vhost_clear_msg(dev);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
- vhost_worker_free(dev);
+ vhost_workers_free(dev);
vhost_detach_mm(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -1565,21 +1880,15 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
struct file *eventfp, *filep = NULL;
bool pollstart = false, pollstop = false;
struct eventfd_ctx *ctx = NULL;
- u32 __user *idxp = argp;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
struct vhost_vring_file f;
u32 idx;
long r;
- r = get_user(idx, idxp);
+ r = vhost_get_vq_from_user(d, argp, &vq, &idx);
if (r < 0)
return r;
- if (idx >= d->nvqs)
- return -ENOBUFS;
-
- idx = array_index_nospec(idx, d->nvqs);
- vq = d->vqs[idx];
if (ioctl == VHOST_SET_VRING_NUM ||
ioctl == VHOST_SET_VRING_ADDR) {
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index fc900be504b3..f60d5f7bef94 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -28,8 +28,12 @@ struct vhost_work {
struct vhost_worker {
struct vhost_task *vtsk;
+ /* Used to serialize device wide flushing with worker swapping. */
+ struct mutex mutex;
struct llist_head work_list;
u64 kcov_handle;
+ u32 id;
+ int attachment_cnt;
};
/* Poll a file (eventfd or socket) */
@@ -41,17 +45,17 @@ struct vhost_poll {
struct vhost_work work;
__poll_t mask;
struct vhost_dev *dev;
+ struct vhost_virtqueue *vq;
};
-void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
-bool vhost_has_work(struct vhost_dev *dev);
-
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- __poll_t mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev,
+ struct vhost_virtqueue *vq);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
+
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
void vhost_dev_flush(struct vhost_dev *dev);
struct vhost_log {
@@ -74,6 +78,7 @@ struct vhost_vring_call {
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
+ struct vhost_worker __rcu *worker;
/* The actual ring of buffers. */
struct mutex mutex;
@@ -158,7 +163,6 @@ struct vhost_dev {
struct vhost_virtqueue **vqs;
int nvqs;
struct eventfd_ctx *log_ctx;
- struct vhost_worker worker;
struct vhost_iotlb *umem;
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
@@ -168,6 +172,7 @@ struct vhost_dev {
int iov_limit;
int weight;
int byte_weight;
+ struct xarray worker_xa;
bool use_worker;
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
@@ -188,16 +193,21 @@ void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ void __user *argp);
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
void vhost_clear_msg(struct vhost_dev *dev);
int vhost_get_vq_desc(struct vhost_virtqueue *,
- struct iovec iov[], unsigned int iov_count,
+ struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+void vhost_vq_flush(struct vhost_virtqueue *vq);
+bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
+bool vhost_vq_has_work(struct vhost_virtqueue *vq);
bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 6578db78f0ae..817d377a3f36 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -285,7 +285,7 @@ vhost_transport_send_pkt(struct sk_buff *skb)
atomic_inc(&vsock->queued_replies);
virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
- vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
rcu_read_unlock();
return len;
@@ -583,7 +583,7 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
/* Some packets may have been queued before the device was started,
* let's kick the send worker to send them.
*/
- vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
mutex_unlock(&vsock->dev.mutex);
return 0;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index a479aab90f78..f51ada4795e8 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -803,8 +803,8 @@ static struct i2c_driver adp8860_driver = {
.name = KBUILD_MODNAME,
.pm = &adp8860_i2c_pm_ops,
},
- .probe_new = adp8860_probe,
- .remove = adp8860_remove,
+ .probe = adp8860_probe,
+ .remove = adp8860_remove,
.id_table = adp8860_id,
};
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index d6b0007db649..6bb18dc970e9 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -973,8 +973,8 @@ static struct i2c_driver adp8870_driver = {
.name = KBUILD_MODNAME,
.pm = &adp8870_i2c_pm_ops,
},
- .probe_new = adp8870_probe,
- .remove = adp8870_remove,
+ .probe = adp8870_probe,
+ .remove = adp8870_remove,
.id_table = adp8870_id,
};
diff --git a/drivers/video/backlight/arcxcnn_bl.c b/drivers/video/backlight/arcxcnn_bl.c
index 088bcca547dd..1d5a570cfe02 100644
--- a/drivers/video/backlight/arcxcnn_bl.c
+++ b/drivers/video/backlight/arcxcnn_bl.c
@@ -392,7 +392,7 @@ static struct i2c_driver arcxcnn_driver = {
.name = "arcxcnn_bl",
.of_match_table = arcxcnn_dt_ids,
},
- .probe_new = arcxcnn_probe,
+ .probe = arcxcnn_probe,
.remove = arcxcnn_remove,
.id_table = arcxcnn_ids,
};
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index f4db6c064635..7df25faa07a5 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -192,7 +192,7 @@ static struct i2c_driver bd6107_driver = {
.driver = {
.name = "bd6107",
},
- .probe_new = bd6107_probe,
+ .probe = bd6107_probe,
.remove = bd6107_remove,
.id_table = bd6107_ids,
};
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
index d38c13ad39c7..9c980f2571ee 100644
--- a/drivers/video/backlight/ktz8866.c
+++ b/drivers/video/backlight/ktz8866.c
@@ -196,7 +196,7 @@ static struct i2c_driver ktz8866_driver = {
.name = "ktz8866",
.of_match_table = ktz8866_match_table,
},
- .probe_new = ktz8866_probe,
+ .probe = ktz8866_probe,
.remove = ktz8866_remove,
.id_table = ktz8866_ids,
};
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index a1b6a2ad73a0..3259292fda76 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -209,8 +209,11 @@ static int led_bl_probe(struct platform_device *pdev)
return PTR_ERR(priv->bl_dev);
}
- for (i = 0; i < priv->nb_leds; i++)
+ for (i = 0; i < priv->nb_leds; i++) {
+ mutex_lock(&priv->leds[i]->led_access);
led_sysfs_disable(priv->leds[i]);
+ mutex_unlock(&priv->leds[i]->led_access);
+ }
backlight_update_status(priv->bl_dev);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index d8c42acecb5d..8fcb62be597b 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -202,7 +202,9 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness);
- if (bl->props.brightness < 0x4)
+
+ if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ /* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0);
else
ret |= lm3630a_update(pchip, REG_CTRL,
@@ -277,7 +279,9 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness);
- if (bl->props.brightness < 0x4)
+
+ if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ /* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0);
else
ret |= lm3630a_update(pchip, REG_CTRL,
@@ -616,7 +620,7 @@ static struct i2c_driver lm3630a_i2c_driver = {
.name = LM3630A_NAME,
.of_match_table = lm3630a_match_table,
},
- .probe_new = lm3630a_probe,
+ .probe = lm3630a_probe,
.remove = lm3630a_remove,
.id_table = lm3630a_id,
};
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index a836628ce06e..5246c171497d 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -411,7 +411,7 @@ static struct i2c_driver lm3639_i2c_driver = {
.driver = {
.name = LM3639_NAME,
},
- .probe_new = lm3639_probe,
+ .probe = lm3639_probe,
.remove = lm3639_remove,
.id_table = lm3639_id,
};
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index a57c9ef3b1cc..1c9e921bca14 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -218,23 +218,10 @@ err:
static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
- struct pwm_device *pwm;
struct pwm_state state;
- /* request pwm device with the consumer name */
- if (!lp->pwm) {
- pwm = devm_pwm_get(lp->dev, lp->chipname);
- if (IS_ERR(pwm))
- return;
-
- lp->pwm = pwm;
-
- pwm_init_state(lp->pwm, &state);
- } else {
- pwm_get_state(lp->pwm, &state);
- }
+ pwm_get_state(lp->pwm, &state);
- state.period = lp->pdata->period_ns;
state.duty_cycle = div_u64(br * state.period, max_br);
state.enabled = state.duty_cycle;
@@ -339,6 +326,7 @@ static int lp855x_parse_dt(struct lp855x *lp)
of_property_read_string(node, "bl-name", &pdata->name);
of_property_read_u8(node, "dev-ctrl", &pdata->device_control);
of_property_read_u8(node, "init-brt", &pdata->initial_brightness);
+ /* Deprecated, specify period in pwms property instead */
of_property_read_u32(node, "pwm-period", &pdata->period_ns);
/* Fill ROM platform data if defined */
@@ -399,6 +387,7 @@ static int lp855x_probe(struct i2c_client *cl)
const struct i2c_device_id *id = i2c_client_get_device_id(cl);
const struct acpi_device_id *acpi_id = NULL;
struct device *dev = &cl->dev;
+ struct pwm_state pwmstate;
struct lp855x *lp;
int ret;
@@ -457,11 +446,6 @@ static int lp855x_probe(struct i2c_client *cl)
}
}
- if (lp->pdata->period_ns > 0)
- lp->mode = PWM_BASED;
- else
- lp->mode = REGISTER_BASED;
-
lp->supply = devm_regulator_get(dev, "power");
if (IS_ERR(lp->supply)) {
if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
@@ -472,11 +456,31 @@ static int lp855x_probe(struct i2c_client *cl)
lp->enable = devm_regulator_get_optional(dev, "enable");
if (IS_ERR(lp->enable)) {
ret = PTR_ERR(lp->enable);
- if (ret == -ENODEV) {
+ if (ret == -ENODEV)
lp->enable = NULL;
- } else {
+ else
return dev_err_probe(dev, ret, "getting enable regulator\n");
- }
+ }
+
+ lp->pwm = devm_pwm_get(lp->dev, lp->chipname);
+ if (IS_ERR(lp->pwm)) {
+ ret = PTR_ERR(lp->pwm);
+ if (ret == -ENODEV || ret == -EINVAL)
+ lp->pwm = NULL;
+ else
+ return dev_err_probe(dev, ret, "getting PWM\n");
+
+ lp->mode = REGISTER_BASED;
+ dev_dbg(dev, "mode: register based\n");
+ } else {
+ pwm_init_state(lp->pwm, &pwmstate);
+ /* Legacy platform data compatibility */
+ if (lp->pdata->period_ns > 0)
+ pwmstate.period = lp->pdata->period_ns;
+ pwm_apply_state(lp->pwm, &pwmstate);
+
+ lp->mode = PWM_BASED;
+ dev_dbg(dev, "mode: PWM based\n");
}
if (lp->supply) {
@@ -587,7 +591,7 @@ static struct i2c_driver lp855x_driver = {
.of_match_table = of_match_ptr(lp855x_dt_ids),
.acpi_match_table = ACPI_PTR(lp855x_acpi_match),
},
- .probe_new = lp855x_probe,
+ .probe = lp855x_probe,
.remove = lp855x_remove,
.id_table = lp855x_ids,
};
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 00673c8b66ac..56695ce67e48 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -141,7 +141,7 @@ static struct i2c_driver lv5207lp_driver = {
.driver = {
.name = "lv5207lp",
},
- .probe_new = lv5207lp_probe,
+ .probe = lv5207lp_probe,
.remove = lv5207lp_remove,
.id_table = lv5207lp_ids,
};
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index fce412234d10..a51fbab96368 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -54,8 +54,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
if (pb->post_pwm_on_delay)
msleep(pb->post_pwm_on_delay);
- if (pb->enable_gpio)
- gpiod_set_value_cansleep(pb->enable_gpio, 1);
+ gpiod_set_value_cansleep(pb->enable_gpio, 1);
pb->enabled = true;
}
@@ -65,8 +64,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
if (!pb->enabled)
return;
- if (pb->enable_gpio)
- gpiod_set_value_cansleep(pb->enable_gpio, 0);
+ gpiod_set_value_cansleep(pb->enable_gpio, 0);
if (pb->pwm_off_delay)
msleep(pb->pwm_off_delay);
@@ -429,8 +427,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
* Synchronize the enable_gpio with the observed state of the
* hardware.
*/
- if (pb->enable_gpio)
- gpiod_direction_output(pb->enable_gpio, active);
+ gpiod_direction_output(pb->enable_gpio, active);
/*
* Do not change pb->enabled here! pb->enabled essentially
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 66d82f6d17c7..c746deb79afc 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -71,9 +71,9 @@
#include <video/sticore.h>
-/* REGION_BASE(fb_info, index) returns the virtual address for region <index> */
+/* REGION_BASE(fb_info, index) returns the physical address for region <index> */
#define REGION_BASE(fb_info, index) \
- F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index])
+ F_EXTEND(fb_info->sti->regions_phys[index])
#define NGLEDEVDEPROM_CRT_REGION 1
diff --git a/drivers/video/sticore.c b/drivers/video/sticore.c
index 7eb925f2ba9c..c3765ad6eedf 100644
--- a/drivers/video/sticore.c
+++ b/drivers/video/sticore.c
@@ -4,7 +4,7 @@
* core code for console driver using HP's STI firmware
*
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2001-2020 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2023 Helge Deller <deller@gmx.de>
* Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
* TODO:
@@ -41,6 +41,26 @@ static struct sti_struct *default_sti __read_mostly;
static int num_sti_roms __read_mostly;
static struct sti_struct *sti_roms[MAX_STI_ROMS] __read_mostly;
+static void *store_sti_val(struct sti_struct *sti, void *ptr, unsigned long val)
+{
+ u32 *ptr32 = ptr;
+
+ if (IS_ENABLED(CONFIG_64BIT) && sti->do_call64) {
+ /* used for 64-bit STI ROM */
+ unsigned long *ptr64 = ptr;
+
+ ptr64 = PTR_ALIGN(ptr64, sizeof(void *));
+ *ptr64++ = val;
+ return ptr64;
+ }
+
+ /* used for 32-bit STI ROM */
+ *ptr32++ = val;
+ return ptr32;
+}
+
+#define store_sti_ptr(sti, dest, ptr) \
+ store_sti_val(sti, dest, STI_PTR(ptr))
/* The colour indices used by STI are
* 0 - Black
@@ -87,7 +107,7 @@ static int sti_init_graph(struct sti_struct *sti)
memset(inptr, 0, sizeof(*inptr));
inptr->text_planes = 3; /* # of text planes (max 3 for STI) */
memset(inptr_ext, 0, sizeof(*inptr_ext));
- inptr->ext_ptr = STI_PTR(inptr_ext);
+ store_sti_ptr(sti, &inptr->ext_ptr, inptr_ext);
outptr->errno = 0;
ret = sti_call(sti, sti->init_graph, &default_init_flags, inptr,
@@ -118,7 +138,7 @@ static void sti_inq_conf(struct sti_struct *sti)
unsigned long flags;
s32 ret;
- outptr->ext_ptr = STI_PTR(&sti->sti_data->inq_outptr_ext);
+ store_sti_ptr(sti, &outptr->ext_ptr, &sti->sti_data->inq_outptr_ext);
do {
spin_lock_irqsave(&sti->lock, flags);
@@ -138,9 +158,9 @@ void
sti_putc(struct sti_struct *sti, int c, int y, int x,
struct sti_cooked_font *font)
{
- struct sti_font_inptr *inptr = &sti->sti_data->font_inptr;
+ struct sti_font_inptr *inptr;
struct sti_font_inptr inptr_default = {
- .font_start_addr = STI_PTR(font->raw),
+ .font_start_addr = (void *)STI_PTR(font->raw),
.index = c_index(sti, c),
.fg_color = c_fg(sti, c),
.bg_color = c_bg(sti, c),
@@ -153,7 +173,14 @@ sti_putc(struct sti_struct *sti, int c, int y, int x,
do {
spin_lock_irqsave(&sti->lock, flags);
- *inptr = inptr_default;
+ inptr = &inptr_default;
+ if (IS_ENABLED(CONFIG_64BIT) && !sti->do_call64) {
+ /* copy below 4G if calling 32-bit on LP64 kernel */
+ inptr = &sti->sti_data->font_inptr;
+ *inptr = inptr_default;
+ /* skip first 4 bytes for 32-bit STI call */
+ inptr = (void *)(((unsigned long)inptr) + sizeof(u32));
+ }
ret = sti_call(sti, sti->font_unpmv, &default_font_flags,
inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
@@ -170,7 +197,7 @@ void
sti_set(struct sti_struct *sti, int src_y, int src_x,
int height, int width, u8 color)
{
- struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr *inptr;
struct sti_blkmv_inptr inptr_default = {
.fg_color = color,
.bg_color = color,
@@ -187,7 +214,12 @@ sti_set(struct sti_struct *sti, int src_y, int src_x,
do {
spin_lock_irqsave(&sti->lock, flags);
- *inptr = inptr_default;
+ inptr = &inptr_default;
+ if (IS_ENABLED(CONFIG_64BIT) && !sti->do_call64) {
+ /* copy below 4G if calling 32-bit on LP64 kernel */
+ inptr = &sti->sti_data->blkmv_inptr;
+ *inptr = inptr_default;
+ }
ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
@@ -198,7 +230,7 @@ void
sti_clear(struct sti_struct *sti, int src_y, int src_x,
int height, int width, int c, struct sti_cooked_font *font)
{
- struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr *inptr;
struct sti_blkmv_inptr inptr_default = {
.fg_color = c_fg(sti, c),
.bg_color = c_bg(sti, c),
@@ -215,7 +247,12 @@ sti_clear(struct sti_struct *sti, int src_y, int src_x,
do {
spin_lock_irqsave(&sti->lock, flags);
- *inptr = inptr_default;
+ inptr = &inptr_default;
+ if (IS_ENABLED(CONFIG_64BIT) && !sti->do_call64) {
+ /* copy below 4G if calling 32-bit on LP64 kernel */
+ inptr = &sti->sti_data->blkmv_inptr;
+ *inptr = inptr_default;
+ }
ret = sti_call(sti, sti->block_move, &clear_blkmv_flags,
inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
@@ -231,7 +268,7 @@ sti_bmove(struct sti_struct *sti, int src_y, int src_x,
int dst_y, int dst_x, int height, int width,
struct sti_cooked_font *font)
{
- struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
+ struct sti_blkmv_inptr *inptr;
struct sti_blkmv_inptr inptr_default = {
.src_x = src_x * font->width,
.src_y = src_y * font->height,
@@ -246,7 +283,12 @@ sti_bmove(struct sti_struct *sti, int src_y, int src_x,
do {
spin_lock_irqsave(&sti->lock, flags);
- *inptr = inptr_default;
+ inptr = &inptr_default;
+ if (IS_ENABLED(CONFIG_64BIT) && !sti->do_call64) {
+ /* copy below 4G if calling 32-bit on LP64 kernel */
+ inptr = &sti->sti_data->blkmv_inptr;
+ *inptr = inptr_default;
+ }
ret = sti_call(sti, sti->block_move, &default_blkmv_flags,
inptr, outptr, sti->glob_cfg);
spin_unlock_irqrestore(&sti->lock, flags);
@@ -359,42 +401,31 @@ __setup("sti_font=", sti_font_setup);
-static void sti_dump_globcfg(struct sti_glob_cfg *glob_cfg,
- unsigned int sti_mem_request)
+static void sti_dump_globcfg(struct sti_struct *sti)
{
- struct sti_glob_cfg_ext *cfg;
+ struct sti_glob_cfg *glob_cfg = sti->glob_cfg;
+ struct sti_glob_cfg_ext *cfg = &sti->sti_data->glob_cfg_ext;
pr_debug("%d text planes\n"
"%4d x %4d screen resolution\n"
"%4d x %4d offscreen\n"
- "%4d x %4d layout\n"
- "regions at %08x %08x %08x %08x\n"
- "regions at %08x %08x %08x %08x\n"
- "reent_lvl %d\n"
- "save_addr %08x\n",
+ "%4d x %4d layout\n",
glob_cfg->text_planes,
glob_cfg->onscreen_x, glob_cfg->onscreen_y,
glob_cfg->offscreen_x, glob_cfg->offscreen_y,
- glob_cfg->total_x, glob_cfg->total_y,
- glob_cfg->region_ptrs[0], glob_cfg->region_ptrs[1],
- glob_cfg->region_ptrs[2], glob_cfg->region_ptrs[3],
- glob_cfg->region_ptrs[4], glob_cfg->region_ptrs[5],
- glob_cfg->region_ptrs[6], glob_cfg->region_ptrs[7],
- glob_cfg->reent_lvl,
- glob_cfg->save_addr);
+ glob_cfg->total_x, glob_cfg->total_y);
/* dump extended cfg */
- cfg = PTR_STI((unsigned long)glob_cfg->ext_ptr);
pr_debug("monitor %d\n"
"in friendly mode: %d\n"
"power consumption %d watts\n"
"freq ref %d\n"
- "sti_mem_addr %08x (size=%d bytes)\n",
+ "sti_mem_addr %px (size=%d bytes)\n",
cfg->curr_mon,
cfg->friendly_boot,
cfg->power,
cfg->freq_ref,
- cfg->sti_mem_addr, sti_mem_request);
+ cfg->sti_mem_addr, sti->sti_mem_request);
}
static void sti_dump_outptr(struct sti_struct *sti)
@@ -414,7 +445,7 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
{
struct sti_glob_cfg *glob_cfg;
struct sti_glob_cfg_ext *glob_cfg_ext;
- void *save_addr;
+ void *save_addr, *ptr;
void *sti_mem_addr;
int i, size;
@@ -432,9 +463,7 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
save_addr = &sti->sti_data->save_addr;
sti_mem_addr = &sti->sti_data->sti_mem_addr;
- glob_cfg->ext_ptr = STI_PTR(glob_cfg_ext);
- glob_cfg->save_addr = STI_PTR(save_addr);
- for (i=0; i<8; i++) {
+ for (i = 0; i < STI_REGION_MAX; i++) {
unsigned long newhpa, len;
if (sti->pd) {
@@ -457,13 +486,10 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
REGION_OFFSET_TO_PHYS(sti->regions[i], newhpa);
len = sti->regions[i].region_desc.length * 4096;
- if (len)
- glob_cfg->region_ptrs[i] = sti->regions_phys[i];
- pr_debug("region #%d: phys %08lx, region_ptr %08x, len=%lukB, "
+ pr_debug("region #%d: phys %08lx, len=%lukB, "
"btlb=%d, sysonly=%d, cache=%d, last=%d\n",
- i, sti->regions_phys[i], glob_cfg->region_ptrs[i],
- len/1024,
+ i, sti->regions_phys[i], len / 1024,
sti->regions[i].region_desc.btlb,
sti->regions[i].region_desc.sys_only,
sti->regions[i].region_desc.cache,
@@ -474,11 +500,16 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
break;
}
- if (++i<8 && sti->regions[i].region)
- pr_warn("future ptr (0x%8x) not yet supported !\n",
- sti->regions[i].region);
+ ptr = &glob_cfg->region_ptrs;
+ for (i = 0; i < STI_REGION_MAX; i++)
+ ptr = store_sti_val(sti, ptr, sti->regions_phys[i]);
+
+ *(s32 *)ptr = 0; /* set reent_lvl */
+ ptr += sizeof(s32);
+ ptr = store_sti_ptr(sti, ptr, save_addr);
+ ptr = store_sti_ptr(sti, ptr, glob_cfg_ext);
- glob_cfg_ext->sti_mem_addr = STI_PTR(sti_mem_addr);
+ store_sti_ptr(sti, &glob_cfg_ext->sti_mem_addr, sti_mem_addr);
sti->glob_cfg = glob_cfg;
@@ -802,10 +833,19 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
raw->alt_code_type == ALT_CODE_TYPE_PA_RISC_64
? "and 64 " : "");
- sti->font_unpmv = address + (raw->font_unpmv & 0x03ffffff);
- sti->block_move = address + (raw->block_move & 0x03ffffff);
- sti->init_graph = address + (raw->init_graph & 0x03ffffff);
- sti->inq_conf = address + (raw->inq_conf & 0x03ffffff);
+ if (IS_ENABLED(CONFIG_64BIT) &&
+ raw->alt_code_type == ALT_CODE_TYPE_PA_RISC_64) {
+ sti->do_call64 = 1;
+ sti->font_unpmv = address + (raw->font_unp_addr & 0x03ffffff);
+ sti->block_move = address + (raw->block_move_addr & 0x03ffffff);
+ sti->init_graph = address + (raw->init_graph_addr & 0x03ffffff);
+ sti->inq_conf = address + (raw->inq_conf_addr & 0x03ffffff);
+ } else {
+ sti->font_unpmv = address + (raw->font_unpmv & 0x03ffffff);
+ sti->block_move = address + (raw->block_move & 0x03ffffff);
+ sti->init_graph = address + (raw->init_graph & 0x03ffffff);
+ sti->inq_conf = address + (raw->inq_conf & 0x03ffffff);
+ }
sti->rom = cooked;
sti->rom->raw = raw;
@@ -818,7 +858,13 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
sti_font_convert_bytemode(sti, sti->font);
sti_dump_font(sti->font);
+ pr_info(" using %d-bit STI ROM functions\n",
+ (IS_ENABLED(CONFIG_64BIT) && sti->do_call64) ? 64 : 32);
+
sti->sti_mem_request = raw->sti_mem_req;
+ pr_debug(" mem_request = %d, reentsize %d\n",
+ sti->sti_mem_request, raw->reentsize);
+
sti->graphics_id[0] = raw->graphics_id[0];
sti->graphics_id[1] = raw->graphics_id[1];
@@ -876,10 +922,12 @@ static struct sti_struct *sti_try_rom_generic(unsigned long address,
spin_lock_init(&sti->lock);
test_rom:
- /* if we can't read the ROM, bail out early. Not being able
- * to read the hpa is okay, for romless sti */
- if (pdc_add_valid(address))
+ /* pdc_add_valid() works only on 32-bit kernels */
+ if ((!IS_ENABLED(CONFIG_64BIT) ||
+ (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)) &&
+ pdc_add_valid(address)) {
goto out_err;
+ }
sig = gsc_readl(address);
@@ -949,7 +997,7 @@ test_rom:
goto out_err;
sti_inq_conf(sti);
- sti_dump_globcfg(sti->glob_cfg, sti->sti_mem_request);
+ sti_dump_globcfg(sti);
sti_dump_outptr(sti);
pr_info(" graphics card name: %s\n",
@@ -1135,14 +1183,15 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
unsigned long _glob_cfg = STI_PTR(glob_cfg);
int ret;
-#ifdef CONFIG_64BIT
/* Check for overflow when using 32bit STI on 64bit kernel. */
- if (WARN_ONCE(_flags>>32 || _inptr>>32 || _outptr>>32 || _glob_cfg>>32,
+ if (WARN_ONCE(IS_ENABLED(CONFIG_64BIT) && !sti->do_call64 &&
+ (upper_32_bits(_flags) || upper_32_bits(_inptr) ||
+ upper_32_bits(_outptr) || upper_32_bits(_glob_cfg)),
"Out of 32bit-range pointers!"))
return -1;
-#endif
- ret = pdc_sti_call(func, _flags, _inptr, _outptr, _glob_cfg);
+ ret = pdc_sti_call(func, _flags, _inptr, _outptr, _glob_cfg,
+ sti->do_call64);
return ret;
}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 23112d84218f..4b773bd7c58c 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -45,9 +45,10 @@ struct virtio_pci_vq_info {
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
- struct virtio_pci_legacy_device ldev;
- struct virtio_pci_modern_device mdev;
-
+ union {
+ struct virtio_pci_legacy_device ldev;
+ struct virtio_pci_modern_device mdev;
+ };
bool is_legacy;
/* Where to read and clear interrupt */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 869cb46bef96..aad7d9296e77 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -218,21 +218,29 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
int err, common, isr, notify, device;
u32 notify_length;
u32 notify_offset;
+ int devid;
check_offsets();
- /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
- return -ENODEV;
-
- if (pci_dev->device < 0x1040) {
- /* Transitional devices: use the PCI subsystem device id as
- * virtio device id, same as legacy driver always did.
- */
- mdev->id.device = pci_dev->subsystem_device;
+ if (mdev->device_id_check) {
+ devid = mdev->device_id_check(pci_dev);
+ if (devid < 0)
+ return devid;
+ mdev->id.device = devid;
} else {
- /* Modern devices: simply use PCI device id, but start from 0x1040. */
- mdev->id.device = pci_dev->device - 0x1040;
+ /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
+ return -ENODEV;
+
+ if (pci_dev->device < 0x1040) {
+ /* Transitional devices: use the PCI subsystem device id as
+ * virtio device id, same as legacy driver always did.
+ */
+ mdev->id.device = pci_dev->subsystem_device;
+ } else {
+ /* Modern devices: simply use PCI device id, but start from 0x1040. */
+ mdev->id.device = pci_dev->device - 0x1040;
+ }
}
mdev->id.vendor = pci_dev->subsystem_vendor;
@@ -260,7 +268,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
return -EINVAL;
}
- err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pci_dev->dev,
+ mdev->dma_mask ? : DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pci_dev->dev,
DMA_BIT_MASK(32));
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index eb6aee8c06b2..989e2d7184ce 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -385,7 +385,9 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
}
- ops->set_vq_affinity(vdpa, i, &masks[i]);
+
+ if (ops->set_vq_affinity)
+ ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index e8c7fa68d3cc..d7fbc3c146e1 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -93,7 +93,7 @@ static int sgi_w1_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
- strlcpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
+ strscpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
sdev->bus_master.dev_id = sdev->dev_id;
}
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 687753889c34..32b8a757744e 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -71,8 +71,8 @@ config W1_SLAVE_DS2805
help
Say Y here if you want to use a 1-wire
is a 112-byte user-programmable EEPROM is
- organized as 7 pages of 16 bytes each with 64bit
- unique number. Requires OverDrive Speed to talk to.
+ organized as 7 pages of 16 bytes each with 64bit
+ unique number. Requires OverDrive Speed to talk to.
config W1_SLAVE_DS2430
tristate "256b EEPROM family support (DS2430)"
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index ca64f99c8f3d..e008c27b3db9 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -66,8 +66,6 @@ static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
size_t count;
while (retries--) {
- crc = 0;
-
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 067692626cf0..c85e80c7e130 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -284,7 +284,7 @@ static int read_powermode(struct w1_slave *sl);
* trigger_bulk_read() - function to trigger a bulk read on the bus
* @dev_master: the device master of the bus
*
- * Send a SKIP ROM follow by a CONVERT T commmand on the bus.
+ * Send a SKIP ROM follow by a CONVERT T command on the bus.
* It also set the status flag in each slave &struct w1_therm_family_data
* to signal that a conversion is in progress.
*
@@ -454,7 +454,7 @@ static const struct hwmon_channel_info w1_temp = {
.config = w1_temp_config,
};
-static const struct hwmon_channel_info *w1_info[] = {
+static const struct hwmon_channel_info * const w1_info[] = {
&w1_temp,
NULL
};
@@ -1159,29 +1159,26 @@ static int convert_t(struct w1_slave *sl, struct therm_info *info)
w1_write_8(dev_master, W1_CONVERT_TEMP);
- if (strong_pullup) { /*some device need pullup */
+ if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
+ ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
+ if (ret) {
+ dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
+ goto mt_unlock;
+ }
+ mutex_unlock(&dev_master->bus_mutex);
+ } else if (!strong_pullup) { /*no device need pullup */
sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
mutex_unlock(&dev_master->bus_mutex);
- } else { /*no device need pullup */
- if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
- ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
- if (ret) {
- dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
- goto mt_unlock;
- }
- mutex_unlock(&dev_master->bus_mutex);
- } else {
- /* Fixed delay */
- mutex_unlock(&dev_master->bus_mutex);
- sleep_rem = msleep_interruptible(t_conv);
- if (sleep_rem != 0) {
- ret = -EINTR;
- goto dec_refcnt;
- }
+ } else { /*some device need pullup */
+ mutex_unlock(&dev_master->bus_mutex);
+ sleep_rem = msleep_interruptible(t_conv);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto dec_refcnt;
}
}
ret = read_scratchpad(sl, info);
@@ -1515,7 +1512,7 @@ static int trigger_bulk_read(struct w1_master *dev_master)
if (bulk_read_support(sl)) {
int t_cur = conversion_time(sl);
- t_conv = t_cur > t_conv ? t_cur : t_conv;
+ t_conv = max(t_cur, t_conv);
strong_pullup = strong_pullup ||
(w1_strong_pullup == 2 ||
(!SLAVE_POWERMODE(sl) &&
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 9d199fed9628..5353cbd75126 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -32,7 +32,7 @@ static int w1_timeout = 10;
module_param_named(timeout, w1_timeout, int, 0);
MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
-static int w1_timeout_us = 0;
+static int w1_timeout_us;
module_param_named(timeout_us, w1_timeout_us, int, 0);
MODULE_PARM_DESC(timeout_us,
"time in microseconds between automatic slave searches");
@@ -58,11 +58,6 @@ MODULE_PARM_DESC(slave_ttl,
DEFINE_MUTEX(w1_mlock);
LIST_HEAD(w1_masters);
-static int w1_master_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
static int w1_master_probe(struct device *dev)
{
return -ENODEV;
@@ -174,7 +169,6 @@ static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env);
static struct bus_type w1_bus_type = {
.name = "w1",
- .match = w1_master_match,
.uevent = w1_uevent,
};
@@ -301,17 +295,13 @@ static ssize_t w1_master_attribute_show_pointer(struct device *dev, struct devic
static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
- ssize_t count;
- count = sprintf(buf, "%d\n", w1_timeout);
- return count;
+ return sprintf(buf, "%d\n", w1_timeout);
}
static ssize_t w1_master_attribute_show_timeout_us(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t count;
- count = sprintf(buf, "%d\n", w1_timeout_us);
- return count;
+ return sprintf(buf, "%d\n", w1_timeout_us);
}
static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
@@ -501,7 +491,7 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
struct w1_master *md = dev_to_w1_master(dev);
struct w1_reg_num rn;
struct w1_slave *sl;
- ssize_t result = count;
+ ssize_t result;
if (w1_atoreg_num(dev, buf, count, &rn))
return -EINVAL;
@@ -702,6 +692,7 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
dev_err(&sl->dev,
"Device registration [%s] failed. err=%d\n",
dev_name(&sl->dev), err);
+ of_node_put(sl->dev.of_node);
put_device(&sl->dev);
return err;
}
@@ -830,49 +821,47 @@ int w1_slave_detach(struct w1_slave *sl)
struct w1_master *w1_search_master_id(u32 id)
{
- struct w1_master *dev;
- int found = 0;
+ struct w1_master *dev = NULL, *iter;
mutex_lock(&w1_mlock);
- list_for_each_entry(dev, &w1_masters, w1_master_entry) {
- if (dev->id == id) {
- found = 1;
- atomic_inc(&dev->refcnt);
+ list_for_each_entry(iter, &w1_masters, w1_master_entry) {
+ if (iter->id == id) {
+ dev = iter;
+ atomic_inc(&iter->refcnt);
break;
}
}
mutex_unlock(&w1_mlock);
- return (found)?dev:NULL;
+ return dev;
}
struct w1_slave *w1_search_slave(struct w1_reg_num *id)
{
struct w1_master *dev;
- struct w1_slave *sl = NULL;
- int found = 0;
+ struct w1_slave *sl = NULL, *iter;
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
mutex_lock(&dev->list_mutex);
- list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
- if (sl->reg_num.family == id->family &&
- sl->reg_num.id == id->id &&
- sl->reg_num.crc == id->crc) {
- found = 1;
+ list_for_each_entry(iter, &dev->slist, w1_slave_entry) {
+ if (iter->reg_num.family == id->family &&
+ iter->reg_num.id == id->id &&
+ iter->reg_num.crc == id->crc) {
+ sl = iter;
atomic_inc(&dev->refcnt);
- atomic_inc(&sl->refcnt);
+ atomic_inc(&iter->refcnt);
break;
}
}
mutex_unlock(&dev->list_mutex);
- if (found)
+ if (sl)
break;
}
mutex_unlock(&w1_mlock);
- return (found)?sl:NULL;
+ return sl;
}
void w1_reconnect_slaves(struct w1_family *f, int attach)
@@ -1263,10 +1252,10 @@ err_out_exit_init:
static void __exit w1_fini(void)
{
- struct w1_master *dev;
+ struct w1_master *dev, *n;
/* Set netlink removal messages and some cleanup */
- list_for_each_entry(dev, &w1_masters, w1_master_entry)
+ list_for_each_entry_safe(dev, n, &w1_masters, w1_master_entry)
__w1_remove_master_device(dev);
w1_fini_netlink();
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index f22138709bf5..ee97d89dfc11 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -304,6 +304,24 @@ config XILINX_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called of_xilinx_wdt.
+config XILINX_WINDOW_WATCHDOG
+ tristate "Xilinx window watchdog timer"
+ depends on HAS_IOMEM
+ depends on ARM64
+ select WATCHDOG_CORE
+ help
+ Window watchdog driver for the versal_wwdt IP core.
+ Window watchdog timer(WWDT) contains closed(first) and
+ open(second) window with 32 bit width. Write to the watchdog
+ timer within predefined window periods of time. This means
+ a period that is not too soon and a period that is not too
+ late. The WWDT has to be restarted within the open window time.
+ If software tries to restart WWDT outside of the open window
+ time period, it generates a reset.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xilinx_wwdt.
+
config ZIIRAVE_WATCHDOG
tristate "Zodiac RAVE Watchdog Timer"
depends on I2C
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index b4c4ccf2d703..3633f5b98236 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -157,6 +157,7 @@ obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
# MicroBlaze Architecture
obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
+obj-$(CONFIG_XILINX_WINDOW_WATCHDOG) += xilinx_wwdt.o
# MIPS Architecture
obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 38e26f160b9a..59dfd7f6bf0b 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for Cirrus Logic EP93xx family of devices.
*
@@ -11,10 +12,6 @@
* Copyright (c) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
* Convert to a platform device and use the watchdog framework API
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* This watchdog fires after 250msec, which is a too short interval
* for us to rely on the user space daemon alone. So we ping the
* wdt each ~200msec and eventually stop doing it if the user space
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 4a22fe152086..6955c693b5fd 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* IBM Automatic Server Restart driver.
*
@@ -6,8 +7,6 @@
* Based on driver written by Pete Reynolds.
* Copyright (c) IBM Corporation, 1998-2004.
*
- * This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 3c651c50a98c..4ac7810a314d 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
@@ -112,7 +113,7 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata->clk = devm_clk_get_enabled(dev, pdev->name);
+ drvdata->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
@@ -144,10 +145,20 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id ls1x_wdt_dt_ids[] = {
+ { .compatible = "loongson,ls1b-wdt", },
+ { .compatible = "loongson,ls1c-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_wdt_dt_ids);
+#endif
+
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
.driver = {
.name = "ls1x-wdt",
+ .of_match_table = of_match_ptr(ls1x_wdt_dt_ids),
},
};
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index f388a769dbd3..062ea3e6497e 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/watchdog/m54xx_wdt.c
*
@@ -11,9 +12,6 @@
* Copyright 2004 (c) MontaVista, Software, Inc.
* Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 9e1541cfae0d..21935f9620e4 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/char/watchdog/max63xx_wdt.c
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2009 Marc Zyngier <maz@misterjones.org>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* This driver assumes the watchdog pins are memory mapped (as it is
* the case for the Arcom Zeus). Should it be connected over GPIOs or
* another interface, some abstraction will have to be introduced.
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 6340a1f5f471..b7b1da3c932d 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* MOXA ART SoCs watchdog driver.
*
@@ -5,9 +6,6 @@
*
* Jonas Jensen <jonas.jensen@gmail.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/watchdog/octeon-wdt-nmi.S b/drivers/watchdog/octeon-wdt-nmi.S
index 97f6eb7b5a8e..e308cc743920 100644
--- a/drivers/watchdog/octeon-wdt-nmi.S
+++ b/drivers/watchdog/octeon-wdt-nmi.S
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2007-2017 Cavium, Inc.
*/
#include <asm/asm.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 5ec2dd8fd5fa..1fe583e8a95b 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/watchdog/orion_wdt.c
*
@@ -5,9 +6,6 @@
*
* Author: Sylver Bruneau <sylver.bruneau@googlemail.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/rtd119x_wdt.c b/drivers/watchdog/rtd119x_wdt.c
index 95c8d7abce42..984905695dde 100644
--- a/drivers/watchdog/rtd119x_wdt.c
+++ b/drivers/watchdog/rtd119x_wdt.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Realtek RTD129x watchdog
*
* Copyright (c) 2017 Andreas Färber
*
- * SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/bitops.h>
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 13db71e16583..b8eb8d5ca1af 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for SBC-FITPC2 board
*
@@ -5,9 +6,6 @@
*
* Adapted from the IXP2000 watchdog driver by Deepak Saxena.
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME " WATCHDOG: " fmt
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 14f8d8d90920..2bd3dc25cb03 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -96,7 +96,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
return efch_mmio;
- } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) &&
((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
dev->revision >= 0x41) ||
(dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
@@ -579,6 +579,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = {
PCI_ANY_ID, },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
+ { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
+ PCI_ANY_ID, },
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 0ea554c7cda5..0099403f4992 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for TS-4800 based boards
*
* Copyright (c) 2015 - Savoir-faire Linux
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index bf918f5fa131..3d57670befe1 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for Technologic Systems TS-72xx based SBCs
* (TS-7200, TS-7250 and TS-7260). These boards have external
@@ -8,9 +9,6 @@
*
* This driver is based on ep93xx_wdt and wm831x_wdt drivers.
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/xilinx_wwdt.c b/drivers/watchdog/xilinx_wwdt.c
new file mode 100644
index 000000000000..2585038d5575
--- /dev/null
+++ b/drivers/watchdog/xilinx_wwdt.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Window watchdog device driver for Xilinx Versal WWDT
+ *
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/watchdog.h>
+
+/* Max timeout is calculated at 100MHz source clock */
+#define XWWDT_DEFAULT_TIMEOUT 42
+#define XWWDT_MIN_TIMEOUT 1
+
+/* Register offsets for the WWDT device */
+#define XWWDT_MWR_OFFSET 0x00
+#define XWWDT_ESR_OFFSET 0x04
+#define XWWDT_FCR_OFFSET 0x08
+#define XWWDT_FWR_OFFSET 0x0c
+#define XWWDT_SWR_OFFSET 0x10
+
+/* Master Write Control Register Masks */
+#define XWWDT_MWR_MASK BIT(0)
+
+/* Enable and Status Register Masks */
+#define XWWDT_ESR_WINT_MASK BIT(16)
+#define XWWDT_ESR_WSW_MASK BIT(8)
+#define XWWDT_ESR_WEN_MASK BIT(0)
+
+#define XWWDT_CLOSE_WINDOW_PERCENT 50
+
+static int wwdt_timeout;
+static int closed_window_percent;
+
+module_param(wwdt_timeout, int, 0);
+MODULE_PARM_DESC(wwdt_timeout,
+ "Watchdog time in seconds. (default="
+ __MODULE_STRING(XWWDT_DEFAULT_TIMEOUT) ")");
+module_param(closed_window_percent, int, 0);
+MODULE_PARM_DESC(closed_window_percent,
+ "Watchdog closed window percentage. (default="
+ __MODULE_STRING(XWWDT_CLOSE_WINDOW_PERCENT) ")");
+/**
+ * struct xwwdt_device - Watchdog device structure
+ * @base: base io address of WDT device
+ * @spinlock: spinlock for IO register access
+ * @xilinx_wwdt_wdd: watchdog device structure
+ * @freq: source clock frequency of WWDT
+ * @close_percent: Closed window percent
+ */
+struct xwwdt_device {
+ void __iomem *base;
+ spinlock_t spinlock; /* spinlock for register handling */
+ struct watchdog_device xilinx_wwdt_wdd;
+ unsigned long freq;
+ u32 close_percent;
+};
+
+static int xilinx_wwdt_start(struct watchdog_device *wdd)
+{
+ struct xwwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wwdt_wdd = &xdev->xilinx_wwdt_wdd;
+ u64 time_out, closed_timeout, open_timeout;
+ u32 control_status_reg;
+
+ /* Calculate timeout count */
+ time_out = xdev->freq * wdd->timeout;
+ closed_timeout = (time_out * xdev->close_percent) / 100;
+ open_timeout = time_out - closed_timeout;
+ wdd->min_hw_heartbeat_ms = xdev->close_percent * 10 * wdd->timeout;
+
+ spin_lock(&xdev->spinlock);
+
+ iowrite32(XWWDT_MWR_MASK, xdev->base + XWWDT_MWR_OFFSET);
+ iowrite32(~(u32)XWWDT_ESR_WEN_MASK, xdev->base + XWWDT_ESR_OFFSET);
+ iowrite32((u32)closed_timeout, xdev->base + XWWDT_FWR_OFFSET);
+ iowrite32((u32)open_timeout, xdev->base + XWWDT_SWR_OFFSET);
+
+ /* Enable the window watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWWDT_ESR_OFFSET);
+ control_status_reg |= XWWDT_ESR_WEN_MASK;
+ iowrite32(control_status_reg, xdev->base + XWWDT_ESR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ dev_dbg(xilinx_wwdt_wdd->parent, "Watchdog Started!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_keepalive(struct watchdog_device *wdd)
+{
+ struct xwwdt_device *xdev = watchdog_get_drvdata(wdd);
+ u32 control_status_reg;
+
+ spin_lock(&xdev->spinlock);
+
+ /* Enable write access control bit for the window watchdog */
+ iowrite32(XWWDT_MWR_MASK, xdev->base + XWWDT_MWR_OFFSET);
+
+ /* Trigger restart kick to watchdog */
+ control_status_reg = ioread32(xdev->base + XWWDT_ESR_OFFSET);
+ control_status_reg |= XWWDT_ESR_WSW_MASK;
+ iowrite32(control_status_reg, xdev->base + XWWDT_ESR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ return 0;
+}
+
+static const struct watchdog_info xilinx_wwdt_ident = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "xlnx_window watchdog",
+};
+
+static const struct watchdog_ops xilinx_wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wwdt_start,
+ .ping = xilinx_wwdt_keepalive,
+};
+
+static int xwwdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *xilinx_wwdt_wdd;
+ struct device *dev = &pdev->dev;
+ struct xwwdt_device *xdev;
+ struct clk *clk;
+ int ret;
+
+ xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xilinx_wwdt_wdd = &xdev->xilinx_wwdt_wdd;
+ xilinx_wwdt_wdd->info = &xilinx_wwdt_ident;
+ xilinx_wwdt_wdd->ops = &xilinx_wwdt_ops;
+ xilinx_wwdt_wdd->parent = dev;
+
+ xdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->base))
+ return PTR_ERR(xdev->base);
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ xdev->freq = clk_get_rate(clk);
+ if (!xdev->freq)
+ return -EINVAL;
+
+ xilinx_wwdt_wdd->min_timeout = XWWDT_MIN_TIMEOUT;
+ xilinx_wwdt_wdd->timeout = XWWDT_DEFAULT_TIMEOUT;
+ xilinx_wwdt_wdd->max_hw_heartbeat_ms = 1000 * xilinx_wwdt_wdd->timeout;
+
+ if (closed_window_percent == 0 || closed_window_percent >= 100)
+ xdev->close_percent = XWWDT_CLOSE_WINDOW_PERCENT;
+ else
+ xdev->close_percent = closed_window_percent;
+
+ watchdog_init_timeout(xilinx_wwdt_wdd, wwdt_timeout, &pdev->dev);
+ spin_lock_init(&xdev->spinlock);
+ watchdog_set_drvdata(xilinx_wwdt_wdd, xdev);
+ watchdog_set_nowayout(xilinx_wwdt_wdd, 1);
+
+ ret = devm_watchdog_register_device(dev, xilinx_wwdt_wdd);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Xilinx window watchdog Timer with timeout %ds\n",
+ xilinx_wwdt_wdd->timeout);
+
+ return 0;
+}
+
+static const struct of_device_id xwwdt_of_match[] = {
+ { .compatible = "xlnx,versal-wwdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwwdt_of_match);
+
+static struct platform_driver xwwdt_driver = {
+ .probe = xwwdt_probe,
+ .driver = {
+ .name = "Xilinx window watchdog",
+ .of_match_table = xwwdt_of_match,
+ },
+};
+
+module_platform_driver(xwwdt_driver);
+
+MODULE_AUTHOR("Neeli Srinivas <srinivas.neeli@amd.com>");
+MODULE_DESCRIPTION("Xilinx window watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index 21ca08a694ee..5ed33df68e9a 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -731,7 +731,7 @@ static struct i2c_driver ziirave_wdt_driver = {
.name = "ziirave_wdt",
.of_match_table = zrv_wdt_of_match,
},
- .probe_new = ziirave_wdt_probe,
+ .probe = ziirave_wdt_probe,
.remove = ziirave_wdt_remove,
.id_table = ziirave_wdt_id,
};
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 9c7fd6fd8095..e1c45341719b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -413,17 +413,19 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
afs_op_set_vnode(op, 0, vnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
- op->store.write_iter = iter;
op->store.pos = pos;
op->store.size = size;
- op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
op->store.laundering = laundering;
- op->mtime = vnode->netfs.inode.i_mtime;
op->flags |= AFS_OPERATION_UNINTR;
op->ops = &afs_store_data_operation;
try_next_key:
afs_begin_vnode_operation(op);
+
+ op->store.write_iter = iter;
+ op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
+ op->mtime = vnode->netfs.inode.i_mtime;
+
afs_wait_for_operation(op);
switch (op->error) {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6bb251a4d613..59cbfb80edbd 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -187,16 +187,42 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_layout *lo = &ci->i_layout;
+ unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
+ loff_t end = rreq->start + rreq->len, new_end;
+ struct ceph_netfs_request_data *priv = rreq->netfs_priv;
+ unsigned long max_len;
u32 blockoff;
- u64 blockno;
- /* Expand the start downward */
- blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
- rreq->start = blockno * lo->stripe_unit;
- rreq->len += blockoff;
+ if (priv) {
+ /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
+ if (priv->file_ra_disabled)
+ max_pages = 0;
+ else
+ max_pages = priv->file_ra_pages;
+
+ }
- /* Now, round up the length to the next block */
- rreq->len = roundup(rreq->len, lo->stripe_unit);
+ /* Readahead is disabled */
+ if (!max_pages)
+ return;
+
+ max_len = max_pages << PAGE_SHIFT;
+
+ /*
+ * Try to expand the length forward by rounding up it to the next
+ * block, but do not exceed the file size, unless the original
+ * request already exceeds it.
+ */
+ new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
+ if (new_end > end && new_end <= rreq->start + max_len)
+ rreq->len = new_end - rreq->start;
+
+ /* Try to expand the start downward */
+ div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
+ if (rreq->len + blockoff <= max_len) {
+ rreq->start -= blockoff;
+ rreq->len += blockoff;
+ }
}
static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
@@ -362,18 +388,28 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct inode *inode = rreq->inode;
int got = 0, want = CEPH_CAP_FILE_CACHE;
+ struct ceph_netfs_request_data *priv;
int ret = 0;
if (rreq->origin != NETFS_READAHEAD)
return 0;
+ priv = kzalloc(sizeof(*priv), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+
if (file) {
struct ceph_rw_context *rw_ctx;
struct ceph_file_info *fi = file->private_data;
+ priv->file_ra_pages = file->f_ra.ra_pages;
+ priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
+
rw_ctx = ceph_find_rw_context(fi);
- if (rw_ctx)
+ if (rw_ctx) {
+ rreq->netfs_priv = priv;
return 0;
+ }
}
/*
@@ -383,27 +419,40 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
- return ret;
+ goto out;
}
if (!(got & want)) {
dout("start_read %p, no cache cap\n", inode);
- return -EACCES;
+ ret = -EACCES;
+ goto out;
+ }
+ if (ret == 0) {
+ ret = -EACCES;
+ goto out;
}
- if (ret == 0)
- return -EACCES;
- rreq->netfs_priv = (void *)(uintptr_t)got;
- return 0;
+ priv->caps = got;
+ rreq->netfs_priv = priv;
+
+out:
+ if (ret < 0)
+ kfree(priv);
+
+ return ret;
}
static void ceph_netfs_free_request(struct netfs_io_request *rreq)
{
- struct ceph_inode_info *ci = ceph_inode(rreq->inode);
- int got = (uintptr_t)rreq->netfs_priv;
+ struct ceph_netfs_request_data *priv = rreq->netfs_priv;
+
+ if (!priv)
+ return;
- if (got)
- ceph_put_cap_refs(ci, got);
+ if (priv->caps)
+ ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
+ kfree(priv);
+ rreq->netfs_priv = NULL;
}
const struct netfs_request_ops ceph_netfs_ops = {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 2321e5ddb664..e2bb0d0072da 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3109,6 +3109,12 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
}
if (had & CEPH_CAP_FILE_WR) {
if (--ci->i_wr_ref == 0) {
+ /*
+ * The Fb caps will always be took and released
+ * together with the Fw caps.
+ */
+ WARN_ON_ONCE(ci->i_wb_ref);
+
last++;
check_flushsnaps = true;
if (ci->i_wrbuffer_ref_head == 0 &&
@@ -3560,6 +3566,15 @@ static void handle_cap_grant(struct inode *inode,
}
BUG_ON(cap->issued & ~cap->implemented);
+ /* don't let check_caps skip sending a response to MDS for revoke msgs */
+ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ cap->mds_wanted = 0;
+ if (cap == ci->i_auth_cap)
+ check_caps = 1; /* check auth cap only */
+ else
+ check_caps = 2; /* check all caps */
+ }
+
if (extra_info->inline_version > 0 &&
extra_info->inline_version >= ci->i_inline_version) {
ci->i_inline_version = extra_info->inline_version;
@@ -4086,6 +4101,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct cap_extra_info extra_info = {};
bool queue_trunc;
bool close_sessions = false;
+ bool do_cap_release = false;
dout("handle_caps from mds%d\n", session->s_mds);
@@ -4192,17 +4208,14 @@ void ceph_handle_caps(struct ceph_mds_session *session,
if (!inode) {
dout(" i don't have ino %llx\n", vino.ino);
- if (op == CEPH_CAP_OP_IMPORT) {
- cap = ceph_get_cap(mdsc, NULL);
- cap->cap_ino = vino.ino;
- cap->queue_release = 1;
- cap->cap_id = le64_to_cpu(h->cap_id);
- cap->mseq = mseq;
- cap->seq = seq;
- cap->issue_seq = seq;
- spin_lock(&session->s_cap_lock);
- __ceph_queue_cap_release(session, cap);
- spin_unlock(&session->s_cap_lock);
+ switch (op) {
+ case CEPH_CAP_OP_IMPORT:
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
+ do_cap_release = true;
+ break;
+ default:
+ break;
}
goto flush_cap_releases;
}
@@ -4252,6 +4265,14 @@ void ceph_handle_caps(struct ceph_mds_session *session,
inode, ceph_ino(inode), ceph_snap(inode),
session->s_mds);
spin_unlock(&ci->i_ceph_lock);
+ switch (op) {
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
+ do_cap_release = true;
+ break;
+ default:
+ break;
+ }
goto flush_cap_releases;
}
@@ -4302,6 +4323,18 @@ flush_cap_releases:
* along for the mds (who clearly thinks we still have this
* cap).
*/
+ if (do_cap_release) {
+ cap = ceph_get_cap(mdsc, NULL);
+ cap->cap_ino = vino.ino;
+ cap->queue_release = 1;
+ cap->cap_id = le64_to_cpu(h->cap_id);
+ cap->mseq = mseq;
+ cap->seq = seq;
+ cap->issue_seq = seq;
+ spin_lock(&session->s_cap_lock);
+ __ceph_queue_cap_release(session, cap);
+ spin_unlock(&session->s_cap_lock);
+ }
ceph_flush_cap_releases(mdsc, session);
goto done;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index cb67ac821f0e..4a2b39d9a61a 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -886,7 +886,8 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (as_ctx.pagelist) {
req->r_pagelist = as_ctx.pagelist;
@@ -953,7 +954,8 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (as_ctx.pagelist) {
req->r_pagelist = as_ctx.pagelist;
@@ -1022,7 +1024,8 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mkdir.mode = cpu_to_le32(mode);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (as_ctx.pagelist) {
req->r_pagelist = as_ctx.pagelist;
@@ -1079,7 +1082,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_SHARED on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
@@ -1218,7 +1221,7 @@ retry:
req->r_num_caps = 2;
req->r_parent = dir;
ihold(dir);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
@@ -1320,9 +1323,9 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
req->r_parent = new_dir;
ihold(new_dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_RDCACHE on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b1925232dc08..63efe5389783 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -791,7 +791,8 @@ retry:
if (flags & O_CREAT) {
struct ceph_file_layout lo;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (as_ctx.pagelist) {
req->r_pagelist = as_ctx.pagelist;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4c0f22acf53d..66048a86c480 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -645,6 +645,7 @@ bad:
err = -EIO;
out_bad:
pr_err("mds parse_reply err %d\n", err);
+ ceph_msg_dump(msg);
return err;
}
@@ -3538,6 +3539,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
bad:
pr_err("mdsc_handle_forward decode error err=%d\n", err);
+ ceph_msg_dump(msg);
}
static int __decode_session_metadata(void **p, void *end,
@@ -5258,6 +5260,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
bad:
pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
ceph_umount_begin(mdsc->fsc->sb);
+ ceph_msg_dump(msg);
err_out:
mutex_lock(&mdsc->mutex);
mdsc->mdsmap_err = err;
@@ -5326,6 +5329,7 @@ bad_unlock:
bad:
pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
ceph_umount_begin(mdsc->fsc->sb);
+ ceph_msg_dump(msg);
return;
}
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index c47347d2e84e..cce78d769f55 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -36,6 +36,14 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
s32 items = 0;
s32 len;
+ /* Do not send the metrics until the MDS rank is ready */
+ mutex_lock(&mdsc->mutex);
+ if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
+ mutex_unlock(&mdsc->mutex);
+ return false;
+ }
+ mutex_unlock(&mdsc->mutex);
+
len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
+ sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
+ sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 2e73ba62bd7a..343d738448dc 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -675,14 +675,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
return 0;
}
- /* Fb cap still in use, delay it */
- if (ci->i_wb_ref) {
+ /*
+ * Defer flushing the capsnap if the dirty buffer not flushed yet.
+ * And trigger to flush the buffer immediately.
+ */
+ if (ci->i_wrbuffer_ref) {
dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
"used WRBUFFER, delaying\n", __func__, inode,
ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size);
- capsnap->writing = 1;
+ ceph_queue_writeback(inode);
return 0;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d24bf0db5234..3bfddf34d488 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -451,6 +451,19 @@ struct ceph_inode_info {
unsigned long i_work_mask;
};
+struct ceph_netfs_request_data {
+ int caps;
+
+ /*
+ * Maximum size of a file readahead request.
+ * The fadvise could update the bdi's default ra_pages.
+ */
+ unsigned int file_ra_pages;
+
+ /* Set it if fadvise disables file readahead entirely */
+ bool file_ra_disabled;
+};
+
static inline struct ceph_inode_info *
ceph_inode(const struct inode *inode)
{
diff --git a/fs/dax.c b/fs/dax.c
index 2ababb89918d..906ecbd541a3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1148,7 +1148,7 @@ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
if (!zero_edge) {
ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
if (ret)
- return ret;
+ return dax_mem2blk_err(ret);
}
if (copy_all) {
@@ -1310,7 +1310,7 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
out_unlock:
dax_read_unlock(id);
- return ret;
+ return dax_mem2blk_err(ret);
}
int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
@@ -1342,7 +1342,8 @@ static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
NULL);
if (ret < 0)
- return ret;
+ return dax_mem2blk_err(ret);
+
memset(kaddr + offset, 0, size);
if (iomap->flags & IOMAP_F_SHARED)
ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
@@ -1498,7 +1499,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
DAX_ACCESS, &kaddr, NULL);
- if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
+ if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
map_len = dax_direct_access(dax_dev, pgoff,
PHYS_PFN(size), DAX_RECOVERY_WRITE,
&kaddr, NULL);
@@ -1506,7 +1507,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
recovery = true;
}
if (map_len < 0) {
- ret = map_len;
+ ret = dax_mem2blk_err(map_len);
break;
}
@@ -1830,7 +1831,6 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
vm_fault_t ret = VM_FAULT_FALLBACK;
pgoff_t max_pgoff;
void *entry;
- int error;
if (vmf->flags & FAULT_FLAG_WRITE)
iter.flags |= IOMAP_WRITE;
@@ -1877,7 +1877,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- while ((error = iomap_iter(&iter, ops)) > 0) {
+ while (iomap_iter(&iter, ops) > 0) {
if (iomap_length(&iter) < PMD_SIZE)
continue; /* actually breaks out of the loop */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 1f971c880dde..b7711888dd17 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -940,15 +940,6 @@ static const struct file_operations fops_str_wo = {
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 64b3860f50ee..8fd3b7f9fb88 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -30,12 +30,9 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason)
{
f2fs_build_fault_attr(sbi, 0, 0);
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
- if (!end_io) {
+ if (!end_io)
f2fs_flush_merged_writes(sbi);
-
- f2fs_handle_stop(sbi, reason);
- }
+ f2fs_handle_critical_error(sbi, reason, end_io);
}
/*
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 11653fa79289..236d890f560b 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -55,6 +55,7 @@ struct f2fs_compress_ops {
int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
int (*decompress_pages)(struct decompress_io_ctx *dic);
+ bool (*is_level_valid)(int level);
};
static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -308,17 +309,25 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
return 0;
}
+static bool lz4_is_level_valid(int lvl)
+{
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
+#else
+ return lvl == 0;
+#endif
+}
+
static const struct f2fs_compress_ops f2fs_lz4_ops = {
.init_compress_ctx = lz4_init_compress_ctx,
.destroy_compress_ctx = lz4_destroy_compress_ctx,
.compress_pages = lz4_compress_pages,
.decompress_pages = lz4_decompress_pages,
+ .is_level_valid = lz4_is_level_valid,
};
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
-#define F2FS_ZSTD_DEFAULT_CLEVEL 1
-
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
zstd_parameters params;
@@ -327,6 +336,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
unsigned int workspace_size;
unsigned char level = F2FS_I(cc->inode)->i_compress_level;
+ /* Need to remain this for backward compatibility */
if (!level)
level = F2FS_ZSTD_DEFAULT_CLEVEL;
@@ -477,6 +487,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
return 0;
}
+static bool zstd_is_level_valid(int lvl)
+{
+ return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
+}
+
static const struct f2fs_compress_ops f2fs_zstd_ops = {
.init_compress_ctx = zstd_init_compress_ctx,
.destroy_compress_ctx = zstd_destroy_compress_ctx,
@@ -484,6 +499,7 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
.init_decompress_ctx = zstd_init_decompress_ctx,
.destroy_decompress_ctx = zstd_destroy_decompress_ctx,
.decompress_pages = zstd_decompress_pages,
+ .is_level_valid = zstd_is_level_valid,
};
#endif
@@ -542,6 +558,16 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}
+bool f2fs_is_compress_level_valid(int alg, int lvl)
+{
+ const struct f2fs_compress_ops *cops = f2fs_cops[alg];
+
+ if (cops->is_level_valid)
+ return cops->is_level_valid(lvl);
+
+ return lvl == 0;
+}
+
static mempool_t *compress_page_pool;
static int num_compress_pages = 512;
module_param(num_compress_pages, uint, 0444);
@@ -743,8 +769,8 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
ret = -EFSCORRUPTED;
/* Avoid f2fs_commit_super in irq context */
- if (in_task)
- f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
+ if (!in_task)
+ f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
else
f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
goto out_release;
@@ -1215,6 +1241,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
unsigned int last_index = cc->cluster_size - 1;
loff_t psize;
int i, err;
+ bool quota_inode = IS_NOQUOTA(inode);
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
@@ -1222,7 +1249,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_free;
}
- if (IS_NOQUOTA(inode)) {
+ if (quota_inode) {
/*
* We need to wait for node_write to avoid block allocation during
* checkpoint. This can only happen to quota writes which can cause
@@ -1344,7 +1371,7 @@ unlock_continue:
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- if (IS_NOQUOTA(inode))
+ if (quota_inode)
f2fs_up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
@@ -1370,7 +1397,7 @@ out_put_cic:
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- if (IS_NOQUOTA(inode))
+ if (quota_inode)
f2fs_up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7165b1202f53..5882afe71d82 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -383,6 +383,17 @@ static void f2fs_write_end_io(struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static void f2fs_zone_write_end_io(struct bio *bio)
+{
+ struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
+
+ bio->bi_private = io->bi_private;
+ complete(&io->zone_wait);
+ f2fs_write_end_io(bio);
+}
+#endif
+
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, sector_t *sector)
{
@@ -639,6 +650,11 @@ int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
+#ifdef CONFIG_BLK_DEV_ZONED
+ init_completion(&sbi->write_io[i][j].zone_wait);
+ sbi->write_io[i][j].zone_pending_bio = NULL;
+ sbi->write_io[i][j].bi_private = NULL;
+#endif
}
}
@@ -965,6 +981,26 @@ alloc_new:
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ int devi = 0;
+
+ if (f2fs_is_multi_device(sbi)) {
+ devi = f2fs_target_device_index(sbi, blkaddr);
+ if (blkaddr < FDEV(devi).start_blk ||
+ blkaddr > FDEV(devi).end_blk) {
+ f2fs_err(sbi, "Invalid block %x", blkaddr);
+ return false;
+ }
+ blkaddr -= FDEV(devi).start_blk;
+ }
+ return bdev_zoned_model(FDEV(devi).bdev) == BLK_ZONED_HM &&
+ f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
+ (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
+}
+#endif
+
void f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
@@ -975,6 +1011,16 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
f2fs_bug_on(sbi, is_read_io(fio->op));
f2fs_down_write(&io->io_rwsem);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
+ wait_for_completion_io(&io->zone_wait);
+ bio_put(io->zone_pending_bio);
+ io->zone_pending_bio = NULL;
+ io->bi_private = NULL;
+ }
+#endif
+
next:
if (fio->in_list) {
spin_lock(&io->io_lock);
@@ -1038,6 +1084,18 @@ skip:
if (fio->in_list)
goto next;
out:
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
+ is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
+ bio_get(io->bio);
+ reinit_completion(&io->zone_wait);
+ io->bi_private = io->bio->bi_private;
+ io->bio->bi_private = io;
+ io->bio->bi_end_io = f2fs_zone_write_end_io;
+ io->zone_pending_bio = io->bio;
+ __submit_merged_bio(io);
+ }
+#endif
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
@@ -2173,7 +2231,6 @@ submit_and_realloc:
f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
F2FS_BLKSIZE);
*last_block_in_bio = block_nr;
- goto out;
out:
*bio_ret = bio;
return ret;
@@ -2775,6 +2832,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
+ bool quota_inode = IS_NOQUOTA(inode);
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
@@ -2807,6 +2865,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (S_ISDIR(inode->i_mode) &&
!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
goto redirty_out;
+
+ /* keep data pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
goto out;
}
@@ -2832,19 +2894,19 @@ write:
goto out;
/* Dentry/quota blocks are controlled by checkpoint */
- if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
+ if (S_ISDIR(inode->i_mode) || quota_inode) {
/*
* We need to wait for node_write to avoid block allocation during
* checkpoint. This can only happen to quota writes which can cause
* the below discard race condition.
*/
- if (IS_NOQUOTA(inode))
+ if (quota_inode)
f2fs_down_read(&sbi->node_write);
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
- if (IS_NOQUOTA(inode))
+ if (quota_inode)
f2fs_up_read(&sbi->node_write);
goto done;
@@ -4067,7 +4129,6 @@ const struct address_space_operations f2fs_dblock_aops = {
.migrate_folio = filemap_migrate_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio = f2fs_release_folio,
- .direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,
.swap_deactivate = f2fs_swap_deactivate,
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 887e55988450..d635c58cf5a3 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -775,8 +775,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
{
int err = -EAGAIN;
- if (f2fs_has_inline_dentry(dir))
+ if (f2fs_has_inline_dentry(dir)) {
+ /*
+ * Should get i_xattr_sem to keep the lock order:
+ * i_xattr_sem -> inode_page lock used by f2fs_setxattr.
+ */
+ f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
+ f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
+ }
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d211ee89c158..c7cb2177b252 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -80,34 +80,34 @@ extern const char *f2fs_fault_name[FAULT_MAX];
/*
* For mount options
*/
-#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
-#define F2FS_MOUNT_DISCARD 0x00000004
-#define F2FS_MOUNT_NOHEAP 0x00000008
-#define F2FS_MOUNT_XATTR_USER 0x00000010
-#define F2FS_MOUNT_POSIX_ACL 0x00000020
-#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
-#define F2FS_MOUNT_INLINE_XATTR 0x00000080
-#define F2FS_MOUNT_INLINE_DATA 0x00000100
-#define F2FS_MOUNT_INLINE_DENTRY 0x00000200
-#define F2FS_MOUNT_FLUSH_MERGE 0x00000400
-#define F2FS_MOUNT_NOBARRIER 0x00000800
-#define F2FS_MOUNT_FASTBOOT 0x00001000
-#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000
-#define F2FS_MOUNT_DATA_FLUSH 0x00008000
-#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
-#define F2FS_MOUNT_USRQUOTA 0x00080000
-#define F2FS_MOUNT_GRPQUOTA 0x00100000
-#define F2FS_MOUNT_PRJQUOTA 0x00200000
-#define F2FS_MOUNT_QUOTA 0x00400000
-#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
-#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
-#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
-#define F2FS_MOUNT_NORECOVERY 0x04000000
-#define F2FS_MOUNT_ATGC 0x08000000
-#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
-#define F2FS_MOUNT_GC_MERGE 0x20000000
-#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
-#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x80000000
+#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001
+#define F2FS_MOUNT_DISCARD 0x00000002
+#define F2FS_MOUNT_NOHEAP 0x00000004
+#define F2FS_MOUNT_XATTR_USER 0x00000008
+#define F2FS_MOUNT_POSIX_ACL 0x00000010
+#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020
+#define F2FS_MOUNT_INLINE_XATTR 0x00000040
+#define F2FS_MOUNT_INLINE_DATA 0x00000080
+#define F2FS_MOUNT_INLINE_DENTRY 0x00000100
+#define F2FS_MOUNT_FLUSH_MERGE 0x00000200
+#define F2FS_MOUNT_NOBARRIER 0x00000400
+#define F2FS_MOUNT_FASTBOOT 0x00000800
+#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000
+#define F2FS_MOUNT_DATA_FLUSH 0x00002000
+#define F2FS_MOUNT_FAULT_INJECTION 0x00004000
+#define F2FS_MOUNT_USRQUOTA 0x00008000
+#define F2FS_MOUNT_GRPQUOTA 0x00010000
+#define F2FS_MOUNT_PRJQUOTA 0x00020000
+#define F2FS_MOUNT_QUOTA 0x00040000
+#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000
+#define F2FS_MOUNT_RESERVE_ROOT 0x00100000
+#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000
+#define F2FS_MOUNT_NORECOVERY 0x00400000
+#define F2FS_MOUNT_ATGC 0x00800000
+#define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000
+#define F2FS_MOUNT_GC_MERGE 0x02000000
+#define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
+#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -162,6 +162,7 @@ struct f2fs_mount_info {
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
int memory_mode; /* memory mode */
+ int errors; /* errors parameter */
int discard_unit; /*
* discard command's offset/size should
* be aligned to this unit: block,
@@ -185,21 +186,21 @@ struct f2fs_mount_info {
unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
};
-#define F2FS_FEATURE_ENCRYPT 0x0001
-#define F2FS_FEATURE_BLKZONED 0x0002
-#define F2FS_FEATURE_ATOMIC_WRITE 0x0004
-#define F2FS_FEATURE_EXTRA_ATTR 0x0008
-#define F2FS_FEATURE_PRJQUOTA 0x0010
-#define F2FS_FEATURE_INODE_CHKSUM 0x0020
-#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
-#define F2FS_FEATURE_QUOTA_INO 0x0080
-#define F2FS_FEATURE_INODE_CRTIME 0x0100
-#define F2FS_FEATURE_LOST_FOUND 0x0200
-#define F2FS_FEATURE_VERITY 0x0400
-#define F2FS_FEATURE_SB_CHKSUM 0x0800
-#define F2FS_FEATURE_CASEFOLD 0x1000
-#define F2FS_FEATURE_COMPRESSION 0x2000
-#define F2FS_FEATURE_RO 0x4000
+#define F2FS_FEATURE_ENCRYPT 0x00000001
+#define F2FS_FEATURE_BLKZONED 0x00000002
+#define F2FS_FEATURE_ATOMIC_WRITE 0x00000004
+#define F2FS_FEATURE_EXTRA_ATTR 0x00000008
+#define F2FS_FEATURE_PRJQUOTA 0x00000010
+#define F2FS_FEATURE_INODE_CHKSUM 0x00000020
+#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040
+#define F2FS_FEATURE_QUOTA_INO 0x00000080
+#define F2FS_FEATURE_INODE_CRTIME 0x00000100
+#define F2FS_FEATURE_LOST_FOUND 0x00000200
+#define F2FS_FEATURE_VERITY 0x00000400
+#define F2FS_FEATURE_SB_CHKSUM 0x00000800
+#define F2FS_FEATURE_CASEFOLD 0x00001000
+#define F2FS_FEATURE_COMPRESSION 0x00002000
+#define F2FS_FEATURE_RO 0x00004000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -1175,6 +1176,7 @@ enum iostat_type {
/* other */
FS_DISCARD_IO, /* discard */
FS_FLUSH_IO, /* flush */
+ FS_ZONE_RESET_IO, /* zone reset */
NR_IO_TYPE,
};
@@ -1217,6 +1219,11 @@ struct f2fs_bio_info {
struct bio *bio; /* bios to merge */
sector_t last_block_in_bio; /* last block number */
struct f2fs_io_info fio; /* store buffered io info. */
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct completion zone_wait; /* condition value for the previous open zone to close */
+ struct bio *zone_pending_bio; /* pending bio for the previous zone */
+ void *bi_private; /* previous bi_private for pending bio */
+#endif
struct f2fs_rwsem io_rwsem; /* blocking op for bio */
spinlock_t io_lock; /* serialize DATA/NODE IOs */
struct list_head io_list; /* track fios */
@@ -1370,6 +1377,12 @@ enum {
MEMORY_MODE_LOW, /* memory mode for low memry devices */
};
+enum errors_option {
+ MOUNT_ERRORS_READONLY, /* remount fs ro on errors */
+ MOUNT_ERRORS_CONTINUE, /* continue on errors */
+ MOUNT_ERRORS_PANIC, /* panic on errors */
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1427,6 +1440,8 @@ struct compress_data {
#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
+#define F2FS_ZSTD_DEFAULT_CLEVEL 1
+
#define COMPRESS_LEVEL_OFFSET 8
/* compress context */
@@ -1721,8 +1736,14 @@ struct f2fs_sb_info {
struct workqueue_struct *post_read_wq; /* post read workqueue */
- unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
- spinlock_t error_lock; /* protect errors array */
+ /*
+ * If we are in irq context, let's update error information into
+ * on-disk superblock in the work.
+ */
+ struct work_struct s_error_work;
+ unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
+ unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */
+ spinlock_t error_lock; /* protect errors/stop_reason array */
bool error_dirty; /* errors of sb is dirty */
struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
@@ -2941,6 +2962,8 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
+#define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
+
/* Flags that should be inherited by new inodes from their parent. */
#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
@@ -3394,6 +3417,8 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+#define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32))
+
#define F2FS_TOTAL_EXTRA_ATTR_SIZE \
(offsetof(struct f2fs_inode, i_extra_end) - \
offsetof(struct f2fs_inode, i_extra_isize)) \
@@ -3432,7 +3457,6 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
* file.c
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
-void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
@@ -3541,9 +3565,11 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
int f2fs_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
-void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ bool irq_context);
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
+void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
@@ -3815,7 +3841,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
+int f2fs_resize_fs(struct file *filp, __u64 block_count);
int __init f2fs_create_garbage_collection_cache(void);
void f2fs_destroy_garbage_collection_cache(void);
/* victim selection function for cleaning and SSR */
@@ -4213,6 +4239,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
+bool f2fs_is_compress_level_valid(int alg, int lvl);
int __init f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
@@ -4277,6 +4304,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
/* not support compression */
return false;
}
+static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
static inline struct page *f2fs_compress_control_page(struct page *page)
{
WARN_ON_ONCE(1);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 2435111a8532..093039dee992 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -149,8 +149,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
- if (!PageUptodate(page))
- SetPageUptodate(page);
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
f2fs_update_time(sbi, REQ_TIME);
@@ -546,7 +544,8 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
- filp->f_mode |= FMODE_NOWAIT;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+ filp->f_mode |= FMODE_CAN_ODIRECT;
return dquot_file_open(inode, filp);
}
@@ -627,11 +626,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
dn->ofs_in_node, nr_free);
}
-void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
-{
- f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
-}
-
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
@@ -2225,7 +2219,6 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
ret = 0;
f2fs_stop_checkpoint(sbi, false,
STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
trace_f2fs_shutdown(sbi, in, ret);
}
return ret;
@@ -2238,7 +2231,6 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
thaw_bdev(sb->s_bdev);
break;
case F2FS_GOING_DOWN_METASYNC:
@@ -2247,16 +2239,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NEED_FSCK:
set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -2593,6 +2582,11 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
inode_lock(inode);
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
+
/* if in-place-update policy is enabled, don't waste time here */
set_inode_flag(inode, FI_OPU_WRITE);
if (f2fs_should_update_inplace(inode, NULL)) {
@@ -2717,6 +2711,7 @@ clear_out:
clear_inode_flag(inode, FI_SKIP_WRITES);
out:
clear_inode_flag(inode, FI_OPU_WRITE);
+unlock_out:
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;
@@ -2876,6 +2871,17 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
out_src:
f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out_unlock;
+
+ src->i_mtime = src->i_ctime = current_time(src);
+ f2fs_mark_inode_dirty_sync(src, false);
+ if (src != dst) {
+ dst->i_mtime = dst->i_ctime = current_time(dst);
+ f2fs_mark_inode_dirty_sync(dst, false);
+ }
+ f2fs_update_time(sbi, REQ_TIME);
+
out_unlock:
if (src != dst)
inode_unlock(dst);
@@ -3278,7 +3284,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
sizeof(block_count)))
return -EFAULT;
- return f2fs_resize_fs(sbi, block_count);
+ return f2fs_resize_fs(filp, block_count);
}
static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
@@ -3375,18 +3381,29 @@ out:
return err;
}
-static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
+static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
{
- struct inode *inode = file_inode(filp);
- __u64 blocks;
-
if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
if (!f2fs_compressed_file(inode))
return -EINVAL;
- blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
+ *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
+
+ return 0;
+}
+
+static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u64 blocks;
+ int ret;
+
+ ret = f2fs_get_compress_blocks(inode, &blocks);
+ if (ret < 0)
+ return ret;
+
return put_user(blocks, (u64 __user *)arg);
}
@@ -3455,7 +3472,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
int ret;
int writecount;
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
if (!f2fs_compressed_file(inode))
@@ -3468,7 +3485,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
return ret;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
inode_lock(inode);
@@ -3488,13 +3505,15 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
goto out;
+ if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+ ret = -EPERM;
+ goto out;
+ }
+
set_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
- if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
- goto out;
-
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -3625,7 +3644,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
unsigned int reserved_blocks = 0;
int ret;
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
if (!f2fs_compressed_file(inode))
@@ -3641,7 +3660,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
goto out;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
inode_lock(inode);
@@ -4035,7 +4054,7 @@ static int f2fs_ioc_decompress_file(struct file *filp)
if (!f2fs_compressed_file(inode))
return -EINVAL;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
file_start_write(filp);
inode_lock(inode);
@@ -4110,7 +4129,7 @@ static int f2fs_ioc_compress_file(struct file *filp)
if (!f2fs_compressed_file(inode))
return -EINVAL;
- f2fs_balance_fs(F2FS_I_SB(inode), true);
+ f2fs_balance_fs(sbi, true);
file_start_write(filp);
inode_lock(inode);
@@ -4238,7 +4257,7 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC_SETFSLABEL:
return f2fs_ioc_setfslabel(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
- return f2fs_get_compress_blocks(filp, arg);
+ return f2fs_ioc_get_compress_blocks(filp, arg);
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
return f2fs_release_compress_blocks(filp, arg);
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 61c5f9d26018..01effd3fcb6c 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -59,7 +59,7 @@ static int gc_thread_func(void *data)
if (gc_th->gc_wake)
gc_th->gc_wake = false;
- if (try_to_freeze()) {
+ if (try_to_freeze() || f2fs_readonly(sbi->sb)) {
stat_other_skip_bggc_count(sbi);
continue;
}
@@ -1797,7 +1797,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
{
int gc_type = gc_control->init_gc_type;
unsigned int segno = gc_control->victim_segno;
- int sec_freed = 0, seg_freed = 0, total_freed = 0;
+ int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
int ret = 0;
struct cp_control cpc;
struct gc_inode_list gc_list = {
@@ -1842,6 +1842,8 @@ gc_more:
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
+ /* Reset due to checkpoint */
+ sec_freed = 0;
}
}
@@ -1866,15 +1868,17 @@ retry:
gc_control->should_migrate_blocks);
total_freed += seg_freed;
- if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
sec_freed++;
+ total_sec_freed++;
+ }
if (gc_type == FG_GC) {
sbi->cur_victim_sec = NULL_SEGNO;
if (has_enough_free_secs(sbi, sec_freed, 0)) {
if (!gc_control->no_bg_gc &&
- sec_freed < gc_control->nr_free_secs)
+ total_sec_freed < gc_control->nr_free_secs)
goto go_gc_more;
goto stop;
}
@@ -1901,6 +1905,8 @@ retry:
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
+ /* Reset due to checkpoint */
+ sec_freed = 0;
}
go_gc_more:
segno = NULL_SEGNO;
@@ -1913,7 +1919,7 @@ stop:
if (gc_type == FG_GC)
f2fs_unpin_all_sections(sbi, true);
- trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
+ trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
get_pages(sbi, F2FS_DIRTY_NODES),
get_pages(sbi, F2FS_DIRTY_DENTS),
get_pages(sbi, F2FS_DIRTY_IMETA),
@@ -1927,7 +1933,7 @@ stop:
put_gc_inode(&gc_list);
if (gc_control->err_gc_skipped && !ret)
- ret = sec_freed ? 0 : -EAGAIN;
+ ret = total_sec_freed ? 0 : -EAGAIN;
return ret;
}
@@ -2099,8 +2105,9 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
}
}
-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+int f2fs_resize_fs(struct file *filp, __u64 block_count)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
__u64 old_block_count, shrunk_blocks;
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
unsigned int secs;
@@ -2138,12 +2145,18 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
return -EINVAL;
}
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
shrunk_blocks = old_block_count - block_count;
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
/* stop other GC */
- if (!f2fs_down_write_trylock(&sbi->gc_lock))
- return -EAGAIN;
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ err = -EAGAIN;
+ goto out_drop_write;
+ }
/* stop CP to protect MAIN_SEC in free_segment_range */
f2fs_lock_op(sbi);
@@ -2163,10 +2176,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
out_unlock:
f2fs_unlock_op(sbi);
f2fs_up_write(&sbi->gc_lock);
+out_drop_write:
+ mnt_drop_write_file(filp);
if (err)
return err;
- freeze_super(sbi->sb);
+ err = freeze_super(sbi->sb);
+ if (err)
+ return err;
+
+ if (f2fs_readonly(sbi->sb)) {
+ thaw_super(sbi->sb);
+ return -EROFS;
+ }
+
f2fs_down_write(&sbi->gc_lock);
f2fs_down_write(&sbi->cp_global_sem);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index cf4327ad106c..09e986b050c6 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -10,6 +10,8 @@
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
+#include <linux/lz4.h>
+#include <linux/zstd.h>
#include "f2fs.h"
#include "node.h"
@@ -202,6 +204,80 @@ void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
}
+static bool sanity_check_compress_inode(struct inode *inode,
+ struct f2fs_inode *ri)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned char clevel;
+
+ if (ri->i_compress_algorithm >= COMPRESS_MAX) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
+ __func__, inode->i_ino, ri->i_compress_algorithm);
+ goto err;
+ }
+ if (le64_to_cpu(ri->i_compr_blocks) >
+ SECTOR_TO_BLOCK(inode->i_blocks)) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
+ __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
+ SECTOR_TO_BLOCK(inode->i_blocks));
+ goto err;
+ }
+ if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_warn(sbi,
+ "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
+ __func__, inode->i_ino, ri->i_log_cluster_size);
+ goto err;
+ }
+
+ clevel = le16_to_cpu(ri->i_compress_flag) >>
+ COMPRESS_LEVEL_OFFSET;
+ switch (ri->i_compress_algorithm) {
+ case COMPRESS_LZO:
+#ifdef CONFIG_F2FS_FS_LZO
+ if (clevel)
+ goto err_level;
+#endif
+ break;
+ case COMPRESS_LZORLE:
+#ifdef CONFIG_F2FS_FS_LZORLE
+ if (clevel)
+ goto err_level;
+#endif
+ break;
+ case COMPRESS_LZ4:
+#ifdef CONFIG_F2FS_FS_LZ4
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ if (clevel &&
+ (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
+ goto err_level;
+#else
+ if (clevel)
+ goto err_level;
+#endif
+#endif
+ break;
+ case COMPRESS_ZSTD:
+#ifdef CONFIG_F2FS_FS_ZSTD
+ if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
+ goto err_level;
+#endif
+ break;
+ default:
+ goto err_level;
+ }
+
+ return true;
+err_level:
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
+ __func__, inode->i_ino, clevel);
+err:
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return false;
+}
+
static bool sanity_check_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -225,41 +301,77 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
- if (f2fs_sb_has_flexible_inline_xattr(sbi)
- && !f2fs_has_extra_attr(inode)) {
+ if (f2fs_has_extra_attr(inode)) {
+ if (!f2fs_sb_has_extra_attr(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
+ if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize % sizeof(__le32)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
+ __func__, inode->i_ino, fi->i_extra_isize,
+ F2FS_TOTAL_EXTRA_ATTR_SIZE);
+ return false;
+ }
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ f2fs_has_inline_xattr(inode) &&
+ (!fi->i_inline_xattr_size ||
+ fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
+ __func__, inode->i_ino, fi->i_inline_xattr_size,
+ MAX_INLINE_XATTR_SIZE);
+ return false;
+ }
+ if (f2fs_sb_has_compression(sbi) &&
+ fi->i_flags & F2FS_COMPR_FL &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_compress_flag)) {
+ if (!sanity_check_compress_inode(inode, ri))
+ return false;
+ }
+ } else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
__func__, inode->i_ino);
return false;
}
- if (f2fs_has_extra_attr(inode) &&
- !f2fs_sb_has_extra_attr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
- __func__, inode->i_ino);
- return false;
- }
-
- if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
- fi->i_extra_isize % sizeof(__le32)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
- __func__, inode->i_ino, fi->i_extra_isize,
- F2FS_TOTAL_EXTRA_ATTR_SIZE);
- return false;
- }
-
- if (f2fs_has_extra_attr(inode) &&
- f2fs_sb_has_flexible_inline_xattr(sbi) &&
- f2fs_has_inline_xattr(inode) &&
- (!fi->i_inline_xattr_size ||
- fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
- __func__, inode->i_ino, fi->i_inline_xattr_size,
- MAX_INLINE_XATTR_SIZE);
- return false;
+ if (!f2fs_sb_has_extra_attr(sbi)) {
+ if (f2fs_sb_has_project_quota(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
+ return false;
+ }
+ if (f2fs_sb_has_inode_chksum(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
+ return false;
+ }
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
+ return false;
+ }
+ if (f2fs_sb_has_inode_crtime(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
+ return false;
+ }
+ if (f2fs_sb_has_compression(sbi)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
+ __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
+ return false;
+ }
}
if (f2fs_sanity_check_inline_data(inode)) {
@@ -283,39 +395,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
- if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
- fi->i_flags & F2FS_COMPR_FL &&
- F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
- i_log_cluster_size)) {
- if (ri->i_compress_algorithm >= COMPRESS_MAX) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
- "compress algorithm: %u, run fsck to fix",
- __func__, inode->i_ino,
- ri->i_compress_algorithm);
- return false;
- }
- if (le64_to_cpu(ri->i_compr_blocks) >
- SECTOR_TO_BLOCK(inode->i_blocks)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
- "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
- __func__, inode->i_ino,
- le64_to_cpu(ri->i_compr_blocks),
- SECTOR_TO_BLOCK(inode->i_blocks));
- return false;
- }
- if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
- ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
- "log cluster size: %u, run fsck to fix",
- __func__, inode->i_ino,
- ri->i_log_cluster_size);
- return false;
- }
- }
-
return true;
}
@@ -442,7 +521,7 @@ static int do_read_inode(struct inode *inode)
if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
(fi->i_flags & F2FS_COMPR_FL)) {
if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
- i_log_cluster_size)) {
+ i_compress_flag)) {
unsigned short compress_flag;
atomic_set(&fi->i_compr_blocks,
@@ -680,7 +759,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
- i_log_cluster_size)) {
+ i_compress_flag)) {
unsigned short compress_flag;
ri->i_compr_blocks =
diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index 3d5bfb1ad585..f8703038e1d8 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -80,6 +80,7 @@ int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
seq_puts(seq, "[OTHER]\n");
IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
IOSTAT_INFO_SHOW("fs flush", FS_FLUSH_IO);
+ IOSTAT_INFO_SHOW("fs zone reset", FS_ZONE_RESET_IO);
return 0;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index ad597b417fea..bee0568888da 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -23,7 +23,7 @@
#include <trace/events/f2fs.h>
static inline bool is_extension_exist(const unsigned char *s, const char *sub,
- bool tmp_ext)
+ bool tmp_ext, bool tmp_dot)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
@@ -49,13 +49,27 @@ static inline bool is_extension_exist(const unsigned char *s, const char *sub,
for (i = 1; i < slen - sublen; i++) {
if (s[i] != '.')
continue;
- if (!strncasecmp(s + i + 1, sub, sublen))
- return true;
+ if (!strncasecmp(s + i + 1, sub, sublen)) {
+ if (!tmp_dot)
+ return true;
+ if (i == slen - sublen - 1 || s[i + 1 + sublen] == '.')
+ return true;
+ }
}
return false;
}
+static inline bool is_temperature_extension(const unsigned char *s, const char *sub)
+{
+ return is_extension_exist(s, sub, true, false);
+}
+
+static inline bool is_compress_extension(const unsigned char *s, const char *sub)
+{
+ return is_extension_exist(s, sub, true, true);
+}
+
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set)
{
@@ -148,7 +162,7 @@ static void set_compress_new_inode(struct f2fs_sb_info *sbi, struct inode *dir,
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
hot_count = sbi->raw_super->hot_ext_count;
for (i = cold_count; i < cold_count + hot_count; i++)
- if (is_extension_exist(name, extlist[i], false))
+ if (is_temperature_extension(name, extlist[i]))
break;
f2fs_up_read(&sbi->sb_lock);
if (i < (cold_count + hot_count))
@@ -156,12 +170,12 @@ static void set_compress_new_inode(struct f2fs_sb_info *sbi, struct inode *dir,
/* Don't compress unallowed extension. */
for (i = 0; i < noext_cnt; i++)
- if (is_extension_exist(name, noext[i], false))
+ if (is_compress_extension(name, noext[i]))
return;
/* Compress wanting extension. */
for (i = 0; i < ext_cnt; i++) {
- if (is_extension_exist(name, ext[i], false)) {
+ if (is_compress_extension(name, ext[i])) {
set_compress_context(inode);
return;
}
@@ -189,7 +203,7 @@ static void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
hot_count = sbi->raw_super->hot_ext_count;
for (i = 0; i < cold_count + hot_count; i++)
- if (is_extension_exist(name, extlist[i], true))
+ if (is_temperature_extension(name, extlist[i]))
break;
f2fs_up_read(&sbi->sb_lock);
@@ -576,8 +590,8 @@ out_splice:
}
#endif
new = d_splice_alias(inode, dentry);
- err = PTR_ERR_OR_ZERO(new);
- trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err);
+ trace_f2fs_lookup_end(dir, !IS_ERR_OR_NULL(new) ? new : dentry,
+ ino, IS_ERR(new) ? PTR_ERR(new) : err);
return new;
out_iput:
iput(inode);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index bd1dad523796..ee2e1dd64f25 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -925,6 +925,7 @@ static int truncate_node(struct dnode_of_data *dn)
static int truncate_dnode(struct dnode_of_data *dn)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct page *page;
int err;
@@ -932,19 +933,30 @@ static int truncate_dnode(struct dnode_of_data *dn)
return 1;
/* get direct node */
- page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+ page = f2fs_get_node_page(sbi, dn->nid);
if (PTR_ERR(page) == -ENOENT)
return 1;
else if (IS_ERR(page))
return PTR_ERR(page);
+ if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
+ f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
+ dn->inode->i_ino, dn->nid, ino_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
+ f2fs_put_page(page, 1);
+ return -EFSCORRUPTED;
+ }
+
/* Make dnode_of_data for parameter */
dn->node_page = page;
dn->ofs_in_node = 0;
- f2fs_truncate_data_blocks(dn);
+ f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
err = truncate_node(dn);
- if (err)
+ if (err) {
+ f2fs_put_page(page, 1);
return err;
+ }
return 1;
}
@@ -1596,6 +1608,9 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
+ /* keep node pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_NODES);
unlock_page(page);
@@ -2063,7 +2078,6 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
struct list_head *head = &sbi->fsync_node_list;
unsigned long flags;
unsigned int cur_seq_id = 0;
- int ret2, ret = 0;
while (seq_id && cur_seq_id < seq_id) {
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
@@ -2084,16 +2098,9 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
f2fs_wait_on_page_writeback(page, NODE, true, false);
put_page(page);
-
- if (ret)
- break;
}
- ret2 = filemap_check_errors(NODE_MAPPING(sbi));
- if (!ret)
- ret = ret2;
-
- return ret;
+ return filemap_check_errors(NODE_MAPPING(sbi));
}
static int f2fs_write_node_pages(struct address_space *mapping,
@@ -3065,7 +3072,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
- struct nat_entry_set *setvec[SETVEC_SIZE];
+ struct nat_entry_set *setvec[NAT_VEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
nid_t set_idx = 0;
@@ -3098,7 +3105,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
remove_nats_in_journal(sbi);
while ((found = __gang_lookup_nat_set(nm_i,
- set_idx, SETVEC_SIZE, setvec))) {
+ set_idx, NAT_VEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
@@ -3319,8 +3326,9 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next_i;
- struct nat_entry *natvec[NATVEC_SIZE];
- struct nat_entry_set *setvec[SETVEC_SIZE];
+ void *vec[NAT_VEC_SIZE];
+ struct nat_entry **natvec = (struct nat_entry **)vec;
+ struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
nid_t nid = 0;
unsigned int found;
@@ -3343,7 +3351,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
/* destroy nat cache */
f2fs_down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i,
- nid, NATVEC_SIZE, natvec))) {
+ nid, NAT_VEC_SIZE, natvec))) {
unsigned idx;
nid = nat_get_nid(natvec[found - 1]) + 1;
@@ -3359,8 +3367,9 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
/* destroy nat set cache */
nid = 0;
+ memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
while ((found = __gang_lookup_nat_set(nm_i,
- nid, SETVEC_SIZE, setvec))) {
+ nid, NAT_VEC_SIZE, setvec))) {
unsigned idx;
nid = setvec[found - 1]->set + 1;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 906fb67a99da..5bd16a95eef8 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -35,8 +35,7 @@
#define DEF_RF_NODE_BLOCKS 0
/* vector size for gang look-up from nat cache that consists of radix tree */
-#define NATVEC_SIZE 64
-#define SETVEC_SIZE 32
+#define NAT_VEC_SIZE 32
/* return value for read_node_page */
#define LOCKED_PAGE 1
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 58c1a0096f7d..4e7d4ceeb084 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -360,21 +360,63 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
return ra_blocks;
}
+/* Detect looped node chain with Floyd's cycle detection algorithm. */
+static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
+ block_t *blkaddr_fast, bool *is_detecting)
+{
+ unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
+ struct page *page = NULL;
+ int i;
+
+ if (!*is_detecting)
+ return 0;
+
+ for (i = 0; i < 2; i++) {
+ if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) {
+ *is_detecting = false;
+ return 0;
+ }
+
+ page = f2fs_get_tmp_page(sbi, *blkaddr_fast);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ if (!is_recoverable_dnode(page)) {
+ f2fs_put_page(page, 1);
+ *is_detecting = false;
+ return 0;
+ }
+
+ ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast,
+ next_blkaddr_of_node(page));
+
+ *blkaddr_fast = next_blkaddr_of_node(page);
+ f2fs_put_page(page, 1);
+
+ f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks);
+ }
+
+ if (*blkaddr_fast == blkaddr) {
+ f2fs_notice(sbi, "%s: Detect looped node chain on blkaddr:%u."
+ " Run fsck to fix it.", __func__, blkaddr);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
bool check_only)
{
struct curseg_info *curseg;
struct page *page = NULL;
- block_t blkaddr;
- unsigned int loop_cnt = 0;
- unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
- unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
- valid_user_blocks(sbi);
+ block_t blkaddr, blkaddr_fast;
+ bool is_detecting = true;
int err = 0;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+ blkaddr_fast = blkaddr;
while (1) {
struct fsync_inode_entry *entry;
@@ -418,10 +460,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
quota_inode);
if (IS_ERR(entry)) {
err = PTR_ERR(entry);
- if (err == -ENOENT) {
- err = 0;
+ if (err == -ENOENT)
goto next;
- }
f2fs_put_page(page, 1);
break;
}
@@ -431,25 +471,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
if (IS_INODE(page) && is_dent_dnode(page))
entry->last_dentry = blkaddr;
next:
- /* sanity check in order to detect looped node chain */
- if (++loop_cnt >= free_blocks ||
- blkaddr == next_blkaddr_of_node(page)) {
- f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
- __func__, blkaddr,
- next_blkaddr_of_node(page));
- f2fs_put_page(page, 1);
- err = -EINVAL;
- break;
- }
-
- ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
- next_blkaddr_of_node(page));
-
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
- f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
+ err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast,
+ &is_detecting);
+ if (err)
+ break;
}
return err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6db410f1bb8c..0457d620011f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1196,6 +1196,45 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc, blk_opf_t flag,
+ struct list_head *wait_list,
+ unsigned int *issued)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct block_device *bdev = dc->bdev;
+ struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
+ unsigned long flags;
+
+ trace_f2fs_issue_reset_zone(bdev, dc->di.start);
+
+ spin_lock_irqsave(&dc->lock, flags);
+ dc->state = D_SUBMIT;
+ dc->bio_ref++;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ if (issued)
+ (*issued)++;
+
+ atomic_inc(&dcc->queued_discard);
+ dc->queued++;
+ list_move_tail(&dc->list, wait_list);
+
+ /* sanity check on discard range */
+ __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
+
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ submit_bio(bio);
+
+ atomic_inc(&dcc->issued_discard);
+ f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
+}
+#endif
+
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
@@ -1217,6 +1256,13 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
return 0;
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
+ __submit_zone_reset_cmd(sbi, dc, flag, wait_list, issued);
+ return 0;
+ }
+#endif
+
trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
lstart = dc->di.lstart;
@@ -1461,6 +1507,19 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
}
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t lblkstart,
+ block_t blklen)
+{
+ trace_f2fs_queue_reset_zone(bdev, blkstart);
+
+ mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
+ __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
+ mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
+}
+#endif
+
static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
@@ -1724,6 +1783,19 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
mutex_lock(&dcc->cmd_lock);
dc = __lookup_discard_cmd(sbi, blkaddr);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
+ /* force submit zone reset */
+ if (dc->state == D_PREP)
+ __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
+ &dcc->wait_list, NULL);
+ dc->ref++;
+ mutex_unlock(&dcc->cmd_lock);
+ /* wait zone reset */
+ __wait_one_discard_bio(sbi, dc);
+ return;
+ }
+#endif
if (dc) {
if (dc->state == D_PREP) {
__punch_discard_cmd(sbi, dc, blkaddr);
@@ -1876,9 +1948,15 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
blkstart, blklen);
return -EIO;
}
- trace_f2fs_issue_reset_zone(bdev, blkstart);
- return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- sector, nr_sects, GFP_NOFS);
+
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
+ trace_f2fs_issue_reset_zone(bdev, blkstart);
+ return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sector, nr_sects, GFP_NOFS);
+ }
+
+ __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
+ return 0;
}
/* For conventional zones, use regular discard if supported */
@@ -2115,7 +2193,7 @@ find_next:
len = next_pos - cur_pos;
if (f2fs_sb_has_blkzoned(sbi) ||
- (force && len < cpc->trim_minlen))
+ !force || len < cpc->trim_minlen)
goto skip;
f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
@@ -4768,17 +4846,17 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
{
unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
block_t zone_block, wp_block, last_valid_block;
- unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
int i, s, b, ret;
struct seg_entry *se;
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
+ wp_block = fdev->start_blk + (zone->wp >> sbi->log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
+ zone_block = fdev->start_blk + (zone->start >>
+ sbi->log_sectors_per_block);
zone_segno = GET_SEGNO(sbi, zone_block);
zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
@@ -4811,39 +4889,52 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
}
/*
- * If last valid block is beyond the write pointer, report the
- * inconsistency. This inconsistency does not cause write error
- * because the zone will not be selected for write operation until
- * it get discarded. Just report it.
+ * The write pointer matches with the valid blocks or
+ * already points to the end of the zone.
*/
- if (last_valid_block >= wp_block) {
- f2fs_notice(sbi, "Valid block beyond write pointer: "
- "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
- GET_SEGNO(sbi, last_valid_block),
- GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
- wp_segno, wp_blkoff);
+ if ((last_valid_block + 1 == wp_block) ||
+ (zone->wp == zone->start + zone->len))
return 0;
- }
- /*
- * If there is no valid block in the zone and if write pointer is
- * not at zone start, reset the write pointer.
- */
- if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
+ if (last_valid_block + 1 == zone_block) {
+ /*
+ * If there is no valid block in the zone and if write pointer
+ * is not at zone start, reset the write pointer.
+ */
f2fs_notice(sbi,
"Zone without valid block has non-zero write "
"pointer. Reset the write pointer: wp[0x%x,0x%x]",
wp_segno, wp_blkoff);
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
- zone->len >> log_sectors_per_block);
- if (ret) {
+ zone->len >> sbi->log_sectors_per_block);
+ if (ret)
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
fdev->path, ret);
- return ret;
- }
+
+ return ret;
}
- return 0;
+ /*
+ * If there are valid blocks and the write pointer doesn't
+ * match with them, we need to report the inconsistency and
+ * fill the zone till the end to close the zone. This inconsistency
+ * does not cause write error because the zone will not be selected
+ * for write operation until it get discarded.
+ */
+ f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: "
+ "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
+ GET_SEGNO(sbi, last_valid_block),
+ GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
+ wp_segno, wp_blkoff);
+
+ ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
+ zone->len - (zone->wp - zone->start),
+ GFP_NOFS, 0);
+ if (ret)
+ f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
+ fdev->path, ret);
+
+ return ret;
}
static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
@@ -4876,7 +4967,6 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
struct blk_zone zone;
unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
block_t cs_zone_block, wp_block;
- unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
sector_t zone_sector;
int err;
@@ -4888,8 +4978,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
return 0;
/* report zone for the sector the curseg points to */
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
- << log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
+ sbi->log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -4901,10 +4991,10 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+ wp_block = zbd->start_blk + (zone.wp >> sbi->log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+ wp_sector_off = zone.wp & GENMASK(sbi->log_sectors_per_block - 1, 0);
if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
wp_sector_off == 0)
@@ -4931,8 +5021,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (!zbd)
return 0;
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
- << log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
+ sbi->log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -4950,7 +5040,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
"Reset the zone: curseg[0x%x,0x%x]",
type, cs->segno, cs->next_blkoff);
err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
- zone.len >> log_sectors_per_block);
+ zone.len >> sbi->log_sectors_per_block);
if (err) {
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
zbd->path, err);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index e34197a70dc1..ca31163da00a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -164,6 +164,7 @@ enum {
Opt_discard_unit,
Opt_memory_mode,
Opt_age_extent_cache,
+ Opt_errors,
Opt_err,
};
@@ -243,6 +244,7 @@ static match_table_t f2fs_tokens = {
{Opt_discard_unit, "discard_unit=%s"},
{Opt_memory_mode, "memory=%s"},
{Opt_age_extent_cache, "age_extent_cache"},
+ {Opt_errors, "errors=%s"},
{Opt_err, NULL},
};
@@ -587,14 +589,12 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
{
#ifdef CONFIG_F2FS_FS_LZ4HC
unsigned int level;
-#endif
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_OPTION(sbi).compress_level = LZ4HC_DEFAULT_CLEVEL;
return 0;
}
-#ifdef CONFIG_F2FS_FS_LZ4HC
str += 3;
if (str[0] != ':') {
@@ -604,7 +604,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
- if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
+ if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
return -EINVAL;
}
@@ -612,6 +612,10 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
F2FS_OPTION(sbi).compress_level = level;
return 0;
#else
+ if (strlen(str) == 3) {
+ F2FS_OPTION(sbi).compress_level = 0;
+ return 0;
+ }
f2fs_info(sbi, "kernel doesn't support lz4hc compression");
return -EINVAL;
#endif
@@ -625,7 +629,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
int len = 4;
if (strlen(str) == len) {
- F2FS_OPTION(sbi).compress_level = 0;
+ F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
return 0;
}
@@ -638,7 +642,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
- if (!level || level > zstd_max_clevel()) {
+ if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
f2fs_info(sbi, "invalid zstd compress level: %d", level);
return -EINVAL;
}
@@ -1268,6 +1272,25 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_age_extent_cache:
set_opt(sbi, AGE_EXTENT_CACHE);
break;
+ case Opt_errors:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (!strcmp(name, "remount-ro")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_READONLY;
+ } else if (!strcmp(name, "continue")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_CONTINUE;
+ } else if (!strcmp(name, "panic")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_PANIC;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p);
@@ -1340,7 +1363,7 @@ default_check:
return -EINVAL;
}
- min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
+ min_size = MIN_INLINE_XATTR_SIZE;
max_size = MAX_INLINE_XATTR_SIZE;
if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
@@ -1550,6 +1573,7 @@ static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
+ int err = 0;
bool done;
/* unregister procfs/sysfs entries in advance to avoid race case */
@@ -1576,7 +1600,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
- f2fs_write_checkpoint(sbi, &cpc);
+ err = f2fs_write_checkpoint(sbi, &cpc);
}
/* be sure to wait for any on-going discard commands */
@@ -1585,7 +1609,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
- f2fs_write_checkpoint(sbi, &cpc);
+ err = f2fs_write_checkpoint(sbi, &cpc);
}
/*
@@ -1602,6 +1626,19 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+ if (err) {
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
+ truncate_inode_pages_final(META_MAPPING(sbi));
+ }
+
+ for (i = 0; i < NR_COUNT_TYPE; i++) {
+ if (!get_pages(sbi, i))
+ continue;
+ f2fs_err(sbi, "detect filesystem reference count leak during "
+ "umount, type: %d, count: %lld", i, get_pages(sbi, i));
+ f2fs_bug_on(sbi, 1);
+ }
+
f2fs_bug_on(sbi, sbi->fsync_node_num);
f2fs_destroy_compress_inode(sbi);
@@ -1622,6 +1659,9 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
+
f2fs_destroy_post_read_wq(sbi);
kvfree(sbi->ckpt);
@@ -2052,12 +2092,32 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
seq_printf(seq, ",memory=%s", "low");
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ seq_printf(seq, ",errors=%s", "remount-ro");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
+ seq_printf(seq, ",errors=%s", "continue");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
+ seq_printf(seq, ",errors=%s", "panic");
+
return 0;
}
-static void default_options(struct f2fs_sb_info *sbi)
+static void default_options(struct f2fs_sb_info *sbi, bool remount)
{
/* init some FS parameters */
+ if (!remount) {
+ set_opt(sbi, READ_EXTENT_CACHE);
+ clear_opt(sbi, DISABLE_CHECKPOINT);
+
+ if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
+ set_opt(sbi, DISCARD);
+
+ if (f2fs_sb_has_blkzoned(sbi))
+ F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
+ else
+ F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
+ }
+
if (f2fs_sb_has_readonly(sbi))
F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
else
@@ -2080,29 +2140,23 @@ static void default_options(struct f2fs_sb_info *sbi)
}
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
sbi->sb->s_flags &= ~SB_INLINECRYPT;
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
- set_opt(sbi, READ_EXTENT_CACHE);
set_opt(sbi, NOHEAP);
- clear_opt(sbi, DISABLE_CHECKPOINT);
set_opt(sbi, MERGE_CHECKPOINT);
F2FS_OPTION(sbi).unusable_cap = 0;
sbi->sb->s_flags |= SB_LAZYTIME;
if (!f2fs_is_readonly(sbi))
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
- set_opt(sbi, DISCARD);
- if (f2fs_sb_has_blkzoned(sbi)) {
+ if (f2fs_sb_has_blkzoned(sbi))
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
- F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
- } else {
+ else
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
- }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -2274,13 +2328,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
}
- default_options(sbi);
+ default_options(sbi, true);
/* parse mount options */
err = parse_options(sb, data, true);
if (err)
goto restore_opts;
+ /* flush outstanding errors before changing fs state */
+ flush_work(&sbi->s_error_work);
+
/*
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
@@ -2713,6 +2770,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
{
struct inode *qf_inode;
unsigned long qf_inum;
+ unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
int err;
BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
@@ -2728,7 +2786,15 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
}
/* Don't account quota for quota files to avoid recursion */
+ inode_lock(qf_inode);
qf_inode->i_flags |= S_NOQUOTA;
+
+ if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
+ F2FS_I(qf_inode)->i_flags |= qf_flag;
+ f2fs_set_inode_flags(qf_inode);
+ }
+ inode_unlock(qf_inode);
+
err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
iput(qf_inode);
return err;
@@ -2859,18 +2925,29 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
return -EBUSY;
}
+ if (path->dentry->d_sb != sb)
+ return -EXDEV;
+
err = f2fs_quota_sync(sb, type);
if (err)
return err;
- err = dquot_quota_on(sb, type, format_id, path);
+ inode = d_inode(path->dentry);
+
+ err = filemap_fdatawrite(inode->i_mapping);
if (err)
return err;
- inode = d_inode(path->dentry);
+ err = filemap_fdatawait(inode->i_mapping);
+ if (err)
+ return err;
+
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ return err;
inode_lock(inode);
- F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
+ F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
@@ -2895,7 +2972,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type)
goto out_put;
inode_lock(inode);
- F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
+ F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
@@ -3926,55 +4003,73 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return err;
}
-void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
+static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
+ sbi->stop_reason[reason]++;
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
+}
+
+static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned long flags;
int err;
f2fs_down_write(&sbi->sb_lock);
- if (raw_super->s_stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
- raw_super->s_stop_reason[reason]++;
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (sbi->error_dirty) {
+ memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
+ MAX_F2FS_ERRORS);
+ sbi->error_dirty = false;
+ }
+ memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
err = f2fs_commit_super(sbi, false);
- if (err)
- f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d",
- reason, err);
+
f2fs_up_write(&sbi->sb_lock);
+ if (err)
+ f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err);
}
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
{
- spin_lock(&sbi->error_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
if (!test_bit(flag, (unsigned long *)sbi->errors)) {
set_bit(flag, (unsigned long *)sbi->errors);
sbi->error_dirty = true;
}
- spin_unlock(&sbi->error_lock);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
}
static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
{
+ unsigned long flags;
bool need_update = false;
- spin_lock(&sbi->error_lock);
+ spin_lock_irqsave(&sbi->error_lock, flags);
if (sbi->error_dirty) {
memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
MAX_F2FS_ERRORS);
sbi->error_dirty = false;
need_update = true;
}
- spin_unlock(&sbi->error_lock);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
return need_update;
}
-void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
+static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
{
int err;
- f2fs_save_errors(sbi, error);
-
f2fs_down_write(&sbi->sb_lock);
if (!f2fs_update_errors(sbi))
@@ -3988,6 +4083,83 @@ out_unlock:
f2fs_up_write(&sbi->sb_lock);
}
+void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
+{
+ f2fs_save_errors(sbi, error);
+ f2fs_record_errors(sbi, error);
+}
+
+void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error)
+{
+ f2fs_save_errors(sbi, error);
+
+ if (!sbi->error_dirty)
+ return;
+ if (!test_bit(error, (unsigned long *)sbi->errors))
+ return;
+ schedule_work(&sbi->s_error_work);
+}
+
+static bool system_going_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ bool irq_context)
+{
+ struct super_block *sb = sbi->sb;
+ bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
+ bool continue_fs = !shutdown &&
+ F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
+
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+
+ if (!f2fs_hw_is_readonly(sbi)) {
+ save_stop_reason(sbi, reason);
+
+ if (irq_context && !shutdown)
+ schedule_work(&sbi->s_error_work);
+ else
+ f2fs_record_stop_reason(sbi);
+ }
+
+ /*
+ * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ * could panic during 'reboot -f' as the underlying device got already
+ * disabled.
+ */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
+ !shutdown && !system_going_down() &&
+ !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
+ panic("F2FS-fs (device %s): panic forced after error\n",
+ sb->s_id);
+
+ if (shutdown)
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+
+ /* continue filesystem operators if errors=continue */
+ if (continue_fs || f2fs_readonly(sb))
+ return;
+
+ f2fs_warn(sbi, "Remounting filesystem read-only");
+ /*
+ * Make sure updated value of ->s_mount_flags will be visible before
+ * ->s_flags update
+ */
+ smp_wmb();
+ sb->s_flags |= SB_RDONLY;
+}
+
+static void f2fs_record_error_work(struct work_struct *work)
+{
+ struct f2fs_sb_info *sbi = container_of(work,
+ struct f2fs_sb_info, s_error_work);
+
+ f2fs_record_stop_reason(sbi);
+}
+
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
@@ -4220,14 +4392,16 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+ memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sbi))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
sizeof(raw_super->uuid));
- default_options(sbi);
+ default_options(sbi, false);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
if (data && !options) {
@@ -4617,6 +4791,8 @@ free_sm:
f2fs_destroy_segment_manager(sbi);
stop_ckpt_thread:
f2fs_stop_ckpt_thread(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
f2fs_destroy_post_read_wq(sbi);
free_devices:
destroy_device_list(sbi);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 8ea05340bad9..48b7e0073884 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -842,68 +842,160 @@ static struct f2fs_attr f2fs_attr_##_name = { \
#define F2FS_GENERAL_RO_ATTR(name) \
static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
-#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname) \
-static struct f2fs_attr f2fs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = 0444 }, \
- .show = f2fs_sbi_show, \
- .struct_type = _struct_type, \
- .offset = offsetof(struct _struct_name, _elname), \
-}
+#ifdef CONFIG_F2FS_STAT_FS
+#define STAT_INFO_RO_ATTR(name, elname) \
+ F2FS_RO_ATTR(STAT_INFO, f2fs_stat_info, name, elname)
+#endif
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent_sleep_time,
- urgent_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_discard_request, max_discard_request);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, min_discard_issue_time, min_discard_issue_time);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, mid_discard_issue_time, mid_discard_issue_time);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_discard_issue_time, max_discard_issue_time);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_io_aware_gran, discard_io_aware_gran);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_urgent_util, discard_urgent_util);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
-F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_ordered_discard, max_ordered_discard);
-F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, max_roll_forward_node_blocks, max_rf_node_blocks);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
- interval_time[DISCARD_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
- umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
-#ifdef CONFIG_F2FS_IOSTAT
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms);
+#define GC_THREAD_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, name, elname)
+
+#define SM_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, name, elname)
+
+#define SM_INFO_GENERAL_RW_ATTR(elname) \
+ SM_INFO_RW_ATTR(elname, elname)
+
+#define DCC_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, name, elname)
+
+#define DCC_INFO_GENERAL_RW_ATTR(elname) \
+ DCC_INFO_RW_ATTR(elname, elname)
+
+#define NM_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, name, elname)
+
+#define NM_INFO_GENERAL_RW_ATTR(elname) \
+ NM_INFO_RW_ATTR(elname, elname)
+
+#define F2FS_SBI_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, name, elname)
+
+#define F2FS_SBI_GENERAL_RW_ATTR(elname) \
+ F2FS_SBI_RW_ATTR(elname, elname)
+
+#define F2FS_SBI_GENERAL_RO_ATTR(elname) \
+ F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, elname, elname)
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define FAULT_INFO_GENERAL_RW_ATTR(type, elname) \
+ F2FS_RW_ATTR(type, f2fs_fault_info, elname, elname)
#endif
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_io_bytes, max_io_bytes);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
+
+#define RESERVED_BLOCKS_GENERAL_RW_ATTR(elname) \
+ F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, elname, elname)
+
+#define CPRC_INFO_GENERAL_RW_ATTR(elname) \
+ F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, elname, elname)
+
+#define ATGC_INFO_RW_ATTR(name, elname) \
+ F2FS_RW_ATTR(ATGC_INFO, atgc_management, name, elname)
+
+/* GC_THREAD ATTR */
+GC_THREAD_RW_ATTR(gc_urgent_sleep_time, urgent_sleep_time);
+GC_THREAD_RW_ATTR(gc_min_sleep_time, min_sleep_time);
+GC_THREAD_RW_ATTR(gc_max_sleep_time, max_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+
+/* SM_INFO ATTR */
+SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
+SM_INFO_GENERAL_RW_ATTR(ipu_policy);
+SM_INFO_GENERAL_RW_ATTR(min_ipu_util);
+SM_INFO_GENERAL_RW_ATTR(min_fsync_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_seq_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_hot_blocks);
+SM_INFO_GENERAL_RW_ATTR(min_ssr_sections);
+
+/* DCC_INFO ATTR */
+DCC_INFO_RW_ATTR(max_small_discards, max_discards);
+DCC_INFO_GENERAL_RW_ATTR(max_discard_request);
+DCC_INFO_GENERAL_RW_ATTR(min_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(mid_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(max_discard_issue_time);
+DCC_INFO_GENERAL_RW_ATTR(discard_io_aware_gran);
+DCC_INFO_GENERAL_RW_ATTR(discard_urgent_util);
+DCC_INFO_GENERAL_RW_ATTR(discard_granularity);
+DCC_INFO_GENERAL_RW_ATTR(max_ordered_discard);
+
+/* NM_INFO ATTR */
+NM_INFO_RW_ATTR(max_roll_forward_node_blocks, max_rf_node_blocks);
+NM_INFO_GENERAL_RW_ATTR(ram_thresh);
+NM_INFO_GENERAL_RW_ATTR(ra_nid_pages);
+NM_INFO_GENERAL_RW_ATTR(dirty_nats_ratio);
+
+/* F2FS_SBI ATTR */
F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
+F2FS_SBI_RW_ATTR(gc_idle, gc_mode);
+F2FS_SBI_RW_ATTR(gc_urgent, gc_mode);
+F2FS_SBI_RW_ATTR(cp_interval, interval_time[CP_TIME]);
+F2FS_SBI_RW_ATTR(idle_interval, interval_time[REQ_TIME]);
+F2FS_SBI_RW_ATTR(discard_idle_interval, interval_time[DISCARD_TIME]);
+F2FS_SBI_RW_ATTR(gc_idle_interval, interval_time[GC_TIME]);
+F2FS_SBI_RW_ATTR(umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
+F2FS_SBI_RW_ATTR(gc_pin_file_thresh, gc_pin_file_threshold);
+F2FS_SBI_RW_ATTR(gc_reclaimed_segments, gc_reclaimed_segs);
+F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
+F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(dir_level);
+#ifdef CONFIG_F2FS_IOSTAT
+F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
+F2FS_SBI_GENERAL_RW_ATTR(iostat_period_ms);
+#endif
+F2FS_SBI_GENERAL_RW_ATTR(readdir_ra);
+F2FS_SBI_GENERAL_RW_ATTR(max_io_bytes);
+F2FS_SBI_GENERAL_RW_ATTR(data_io_flag);
+F2FS_SBI_GENERAL_RW_ATTR(node_io_flag);
+F2FS_SBI_GENERAL_RW_ATTR(gc_remaining_trials);
+F2FS_SBI_GENERAL_RW_ATTR(seq_file_ra_mul);
+F2FS_SBI_GENERAL_RW_ATTR(gc_segment_mode);
+F2FS_SBI_GENERAL_RW_ATTR(max_fragment_chunk);
+F2FS_SBI_GENERAL_RW_ATTR(max_fragment_hole);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+F2FS_SBI_GENERAL_RW_ATTR(compr_written_block);
+F2FS_SBI_GENERAL_RW_ATTR(compr_saved_block);
+F2FS_SBI_GENERAL_RW_ATTR(compr_new_inode);
+F2FS_SBI_GENERAL_RW_ATTR(compress_percent);
+F2FS_SBI_GENERAL_RW_ATTR(compress_watermark);
+#endif
+/* atomic write */
+F2FS_SBI_GENERAL_RO_ATTR(current_atomic_write);
+F2FS_SBI_GENERAL_RW_ATTR(peak_atomic_write);
+F2FS_SBI_GENERAL_RW_ATTR(committed_atomic_block);
+F2FS_SBI_GENERAL_RW_ATTR(revoked_atomic_block);
+/* block age extent cache */
+F2FS_SBI_GENERAL_RW_ATTR(hot_data_age_threshold);
+F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
+F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
+#ifdef CONFIG_BLK_DEV_ZONED
+F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+#endif
+
+/* STAT_INFO ATTR */
+#ifdef CONFIG_F2FS_STAT_FS
+STAT_INFO_RO_ATTR(cp_foreground_calls, cp_count);
+STAT_INFO_RO_ATTR(cp_background_calls, bg_cp_count);
+STAT_INFO_RO_ATTR(gc_foreground_calls, call_count);
+STAT_INFO_RO_ATTR(gc_background_calls, bg_gc);
+#endif
+
+/* FAULT_INFO ATTR */
#ifdef CONFIG_F2FS_FAULT_INJECTION
-F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
-F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+FAULT_INFO_GENERAL_RW_ATTR(FAULT_INFO_RATE, inject_rate);
+FAULT_INFO_GENERAL_RW_ATTR(FAULT_INFO_TYPE, inject_type);
#endif
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_remaining_trials, gc_remaining_trials);
-F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
+
+/* RESERVED_BLOCKS ATTR */
+RESERVED_BLOCKS_GENERAL_RW_ATTR(reserved_blocks);
+
+/* CPRC_INFO ATTR */
+CPRC_INFO_GENERAL_RW_ATTR(ckpt_thread_ioprio);
+
+/* ATGC_INFO ATTR */
+ATGC_INFO_RW_ATTR(atgc_candidate_ratio, candidate_ratio);
+ATGC_INFO_RW_ATTR(atgc_candidate_count, max_candidate_count);
+ATGC_INFO_RW_ATTR(atgc_age_weight, age_weight);
+ATGC_INFO_RW_ATTR(atgc_age_threshold, age_threshold);
+
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(ovp_segments);
@@ -917,10 +1009,6 @@ F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
F2FS_GENERAL_RO_ATTR(gc_mode);
#ifdef CONFIG_F2FS_STAT_FS
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_foreground_calls, call_count);
-F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_background_calls, bg_gc);
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
F2FS_GENERAL_RO_ATTR(moved_blocks_foreground);
F2FS_GENERAL_RO_ATTR(avg_vblocks);
@@ -935,8 +1023,6 @@ F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_FEATURE_RO_ATTR(block_zoned);
-F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, unusable_blocks_per_sec,
- unusable_blocks_per_sec);
#endif
F2FS_FEATURE_RO_ATTR(atomic_write);
F2FS_FEATURE_RO_ATTR(extra_attr);
@@ -956,37 +1042,9 @@ F2FS_FEATURE_RO_ATTR(casefold);
F2FS_FEATURE_RO_ATTR(readonly);
#ifdef CONFIG_F2FS_FS_COMPRESSION
F2FS_FEATURE_RO_ATTR(compression);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_written_block, compr_written_block);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compress_percent, compress_percent);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compress_watermark, compress_watermark);
#endif
F2FS_FEATURE_RO_ATTR(pin_file);
-/* For ATGC */
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_ratio, candidate_ratio);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_count, max_candidate_count);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_weight, age_weight);
-F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold);
-
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, seq_file_ra_mul, seq_file_ra_mul);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_segment_mode, gc_segment_mode);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_chunk, max_fragment_chunk);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_hole, max_fragment_hole);
-
-/* For atomic write */
-F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, current_atomic_write, current_atomic_write);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, peak_atomic_write, peak_atomic_write);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, committed_atomic_block, committed_atomic_block);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, revoked_atomic_block, revoked_atomic_block);
-
-/* For block age extent cache */
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, hot_data_age_threshold, hot_data_age_threshold);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, warm_data_age_threshold, warm_data_age_threshold);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, last_age_weight, last_age_weight);
-
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_urgent_sleep_time),
@@ -1386,12 +1444,19 @@ int __init f2fs_init_sysfs(void)
ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
NULL, "features");
- if (ret) {
- kobject_put(&f2fs_feat);
- kset_unregister(&f2fs_kset);
- } else {
- f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (ret)
+ goto put_kobject;
+
+ f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (!f2fs_proc_root) {
+ ret = -ENOMEM;
+ goto put_kobject;
}
+
+ return 0;
+put_kobject:
+ kobject_put(&f2fs_feat);
+ kset_unregister(&f2fs_kset);
return ret;
}
@@ -1430,23 +1495,24 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
if (err)
goto put_feature_list_kobj;
- if (f2fs_proc_root)
- sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ if (!sbi->s_proc) {
+ err = -ENOMEM;
+ goto put_feature_list_kobj;
+ }
- if (sbi->s_proc) {
- proc_create_single_data("segment_info", 0444, sbi->s_proc,
+ proc_create_single_data("segment_info", 0444, sbi->s_proc,
segment_info_seq_show, sb);
- proc_create_single_data("segment_bits", 0444, sbi->s_proc,
+ proc_create_single_data("segment_bits", 0444, sbi->s_proc,
segment_bits_seq_show, sb);
#ifdef CONFIG_F2FS_IOSTAT
- proc_create_single_data("iostat_info", 0444, sbi->s_proc,
+ proc_create_single_data("iostat_info", 0444, sbi->s_proc,
iostat_info_seq_show, sb);
#endif
- proc_create_single_data("victim_bits", 0444, sbi->s_proc,
+ proc_create_single_data("victim_bits", 0444, sbi->s_proc,
victim_bits_seq_show, sb);
- proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
+ proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
discard_plist_seq_show, sb);
- }
return 0;
put_feature_list_kobj:
kobject_put(&sbi->s_feature_list_kobj);
@@ -1462,8 +1528,7 @@ put_sb_kobj:
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
{
- if (sbi->s_proc)
- remove_proc_subtree(sbi->sb->s_id, f2fs_proc_root);
+ remove_proc_subtree(sbi->sb->s_id, f2fs_proc_root);
kobject_put(&sbi->s_stat_kobj);
wait_for_completion(&sbi->s_stat_kobj_unregister);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 213805d3592c..476b186b90a6 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -528,10 +528,12 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
+ if (!ipage)
+ f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
- f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
+ if (!ipage)
+ f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 416d652774a3..b1811c392e6f 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -83,6 +83,7 @@ struct f2fs_xattr_entry {
sizeof(struct f2fs_xattr_header) - \
sizeof(struct f2fs_xattr_entry))
+#define MIN_INLINE_XATTR_SIZE (sizeof(struct f2fs_xattr_header) / sizeof(__le32))
#define MAX_INLINE_XATTR_SIZE \
(DEF_ADDRS_PER_INODE - \
F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) - \
diff --git a/fs/file_table.c b/fs/file_table.c
index e06c68e2d757..fc7d677ff5ad 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -160,7 +160,7 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free_rcu(&f->f_rcuhead);
+ put_cred(f->f_cred);
return error;
}
@@ -208,8 +208,10 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
return ERR_PTR(-ENOMEM);
error = init_file(f, flags, cred);
- if (unlikely(error))
+ if (unlikely(error)) {
+ kmem_cache_free(filp_cachep, f);
return ERR_PTR(error);
+ }
percpu_counter_inc(&nr_files);
@@ -240,8 +242,10 @@ struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
return ERR_PTR(-ENOMEM);
error = init_file(f, flags, cred);
- if (unlikely(error))
+ if (unlikely(error)) {
+ kmem_cache_free(filp_cachep, f);
return ERR_PTR(error);
+ }
f->f_mode |= FMODE_NOACCOUNT;
@@ -265,8 +269,10 @@ struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
return ERR_PTR(-ENOMEM);
error = init_file(&ff->file, flags, cred);
- if (unlikely(error))
+ if (unlikely(error)) {
+ kfree(ff);
return ERR_PTR(error);
+ }
ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
return &ff->file;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 4d8d4f16c727..5f1be1da92ce 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -775,7 +775,8 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
NULL);
if (rc < 0)
- return rc;
+ return dax_mem2blk_err(rc);
+
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 1c407eba1e30..ae49256b7c8c 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -432,10 +432,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
if (error)
return error;
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
flush_dcache_page(page);
brelse(dibh);
SetPageUptodate(page);
@@ -489,18 +489,18 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
unsigned copied = 0;
unsigned amt;
struct page *page;
- void *p;
do {
+ page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -EINTR)
+ continue;
+ return PTR_ERR(page);
+ }
amt = size - copied;
if (offset + size > PAGE_SIZE)
amt = PAGE_SIZE - offset;
- page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
- p = kmap_atomic(page);
- memcpy(buf + copied, p + offset, amt);
- kunmap_atomic(p);
+ memcpy_from_page(buf + copied, page, offset, amt);
put_page(page);
copied += amt;
index++;
@@ -751,7 +751,6 @@ static const struct address_space_operations gfs2_aops = {
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
- .direct_IO = noop_direct_IO,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index c739b258a2d9..8d611fbcf0bd 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1729,8 +1729,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
if (offset >= maxsize) {
/*
- * The starting point lies beyond the allocated meta-data;
- * there are no blocks do deallocate.
+ * The starting point lies beyond the allocated metadata;
+ * there are no blocks to deallocate.
*/
return 0;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index f146447eac63..1bf3c4453516 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -630,6 +630,9 @@ int gfs2_open_common(struct inode *inode, struct file *file)
ret = generic_file_open(inode, file);
if (ret)
return ret;
+
+ if (!gfs2_is_jdata(GFS2_I(inode)))
+ file->f_mode |= FMODE_CAN_ODIRECT;
}
fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
@@ -1030,8 +1033,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
}
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
-retry:
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+retry:
window_size -= fault_in_iov_iter_readable(from, window_size);
if (!window_size) {
ret = -EFAULT;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 5adc7d85dbf3..1438e7465e30 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -145,8 +145,8 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
*
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
* when we're withdrawn. For example, to maintain metadata integrity, we should
- * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
- * iopen or the transaction glocks may be safely used because none of their
+ * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
+ * the iopen or freeze glock may be safely used because none of their
* metadata goes through the journal. So in general, we should disallow all
* glocks that are journaled, and allow all the others. One exception is:
* we need to allow our active journal to be promoted and demoted so others
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 01d433ed6ce7..54319328b16b 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -236,7 +236,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
truncate_inode_pages_range(mapping, start, end);
}
-static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf)
{
struct gfs2_rgrpd *rgd = gl->gl_object;
@@ -536,72 +536,53 @@ static int inode_go_held(struct gfs2_holder *gh)
*
*/
-static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf)
{
struct gfs2_inode *ip = gl->gl_object;
- struct inode *inode;
- unsigned long nrpages;
+ const struct inode *inode = &ip->i_inode;
if (ip == NULL)
return;
- inode = &ip->i_inode;
- xa_lock_irq(&inode->i_data.i_pages);
- nrpages = inode->i_data.nrpages;
- xa_unlock_irq(&inode->i_data.i_pages);
-
gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
"p:%lu\n", fs_id_buf,
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
- IF2DT(ip->i_inode.i_mode), ip->i_flags,
+ IF2DT(inode->i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
- (unsigned long long)i_size_read(inode), nrpages);
+ (unsigned long long)i_size_read(inode),
+ inode->i_data.nrpages);
}
/**
- * freeze_go_sync - promote/demote the freeze glock
+ * freeze_go_callback - A cluster node is requesting a freeze
* @gl: the glock
+ * @remote: true if this came from a different cluster node
*/
-static int freeze_go_sync(struct gfs2_glock *gl)
+static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
{
- int error = 0;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct super_block *sb = sdp->sd_vfs;
+
+ if (!remote ||
+ gl->gl_state != LM_ST_SHARED ||
+ gl->gl_demote_state != LM_ST_UNLOCKED)
+ return;
/*
- * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
- * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
- * all the nodes should have the freeze glock in SH mode and they all
- * call do_xmote: One for EX and the others for UN. They ALL must
- * freeze locally, and they ALL must queue freeze work. The freeze_work
- * calls freeze_func, which tries to reacquire the freeze glock in SH,
- * effectively waiting for the thaw on the node who holds it in EX.
- * Once thawed, the work func acquires the freeze glock in
- * SH and everybody goes back to thawed.
+ * Try to get an active super block reference to prevent racing with
+ * unmount (see trylock_super()). But note that unmount isn't the only
+ * place where a write lock on s_umount is taken, and we can fail here
+ * because of things like remount as well.
*/
- if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
- !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
- atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
- error = freeze_super(sdp->sd_vfs);
- if (error) {
- fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
- error);
- if (gfs2_withdrawn(sdp)) {
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
- return 0;
- }
- gfs2_assert_withdraw(sdp, 0);
- }
- queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
- GFS2_LFC_FREEZE_GO_SYNC);
- else /* read-only mounts */
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ if (down_read_trylock(&sb->s_umount)) {
+ atomic_inc(&sb->s_active);
+ up_read(&sb->s_umount);
+ if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
+ deactivate_super(sb);
}
- return 0;
}
/**
@@ -761,9 +742,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
};
const struct gfs2_glock_operations gfs2_freeze_glops = {
- .go_sync = freeze_go_sync,
.go_xmote_bh = freeze_go_xmote_bh,
.go_demote_ok = freeze_go_demote_ok,
+ .go_callback = freeze_go_callback,
.go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 79485329118b..04f2d78e8658 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -221,7 +221,7 @@ struct gfs2_glock_operations {
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_instantiate) (struct gfs2_glock *gl);
int (*go_held)(struct gfs2_holder *gh);
- void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
+ void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
void (*go_free)(struct gfs2_glock *gl);
@@ -600,7 +600,7 @@ enum {
SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9,
- SDF_FS_FROZEN = 10,
+ SDF_FREEZE_INITIATOR = 10,
SDF_WITHDRAWING = 11, /* Will withdraw eventually */
SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
@@ -608,12 +608,7 @@ enum {
withdrawing */
SDF_DEACTIVATING = 15,
SDF_EVICTING = 16,
-};
-
-enum gfs2_freeze_state {
- SFS_UNFROZEN = 0,
- SFS_STARTING_FREEZE = 1,
- SFS_FROZEN = 2,
+ SDF_FROZEN = 17,
};
#define GFS2_FSNAME_LEN 256
@@ -841,7 +836,6 @@ struct gfs2_sbd {
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
- atomic_t sd_freeze_state;
struct mutex sd_freeze_mutex;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 71911bf9ab34..54911294687c 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -296,10 +296,8 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- if (gl->gl_lksb.sb_lkid == 0) {
- gfs2_glock_free(gl);
- return;
- }
+ if (gl->gl_lksb.sb_lkid == 0)
+ goto out_free;
clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -307,17 +305,13 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
- gfs2_glock_free(gl);
- return;
- }
+ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
+ goto out_free;
/* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !gl->gl_lksb.sb_lvbptr) {
- gfs2_glock_free(gl);
- return;
- }
+ !gl->gl_lksb.sb_lvbptr)
+ goto out_free;
again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
@@ -331,8 +325,11 @@ again:
fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
- return;
}
+ return;
+
+out_free:
+ gfs2_glock_free(gl);
}
static void gdlm_cancel(struct gfs2_glock *gl)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index d750d1128bed..aa568796207c 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -914,9 +914,8 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
{
blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
- gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
+ gfs2_assert_withdraw(sdp, !test_bit(SDF_FROZEN, &sdp->sd_flags));
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
@@ -1036,7 +1035,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
{
struct gfs2_trans *tr = NULL;
unsigned int reserved_blocks = 0, used_blocks = 0;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
unsigned int first_log_head;
unsigned int reserved_revokes = 0;
@@ -1067,7 +1066,7 @@ repeat:
if (tr) {
sdp->sd_log_tr = NULL;
tr->tr_first = first_log_head;
- if (unlikely (state == SFS_FROZEN)) {
+ if (unlikely(frozen)) {
if (gfs2_assert_withdraw_delayed(sdp,
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
goto out_withdraw;
@@ -1092,7 +1091,7 @@ repeat:
if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (unlikely(state == SFS_FROZEN))
+ if (unlikely(frozen))
if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
goto out_withdraw;
@@ -1136,8 +1135,6 @@ repeat:
if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LOG_HEAD_FLUSH_FREEZE))
gfs2_log_shutdown(sdp);
- if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
out_end:
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 1902413d5d12..251322b01631 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -427,10 +427,11 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host lh;
- void *kaddr = kmap_atomic(page);
+ void *kaddr;
unsigned int offset;
bool ret = false;
+ kaddr = kmap_local_page(page);
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
if (lh.lh_sequence >= head->lh_sequence)
@@ -441,7 +442,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
}
}
}
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return ret;
}
@@ -626,11 +627,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
__be32 *ptr;
clear_buffer_escaped(bh);
- kaddr = kmap_atomic(bh->b_page);
+ kaddr = kmap_local_page(bh->b_page);
ptr = kaddr + bh_offset(bh);
if (*ptr == cpu_to_be32(GFS2_MAGIC))
set_buffer_escaped(bh);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
static int blocknr_cmp(void *priv, const struct list_head *a,
@@ -696,14 +697,12 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
lock_buffer(bd2->bd_bh);
if (buffer_escaped(bd2->bd_bh)) {
- void *kaddr;
+ void *p;
+
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
- ptr = page_address(page);
- kaddr = kmap_atomic(bd2->bd_bh->b_page);
- memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
- bd2->bd_bh->b_size);
- kunmap_atomic(kaddr);
- *(__be32 *)ptr = 0;
+ p = page_address(page);
+ memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
+ *(__be32 *)p = 0;
clear_buffer_escaped(bd2->bd_bh);
unlock_buffer(bd2->bd_bh);
brelse(bd2->bd_bh);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cd962985b058..8a27957dbfee 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -135,7 +135,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_rwsem(&sdp->sd_log_flush_lock);
atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait);
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
mutex_init(&sdp->sd_freeze_mutex);
return sdp;
@@ -434,7 +433,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
CREATE, &sdp->sd_freeze_gl);
if (error) {
- fs_err(sdp, "can't create transaction glock: %d\n", error);
+ fs_err(sdp, "can't create freeze glock: %d\n", error);
goto fail_rename;
}
@@ -1140,7 +1139,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
- struct gfs2_holder freeze_gh;
int error;
sdp = init_sbd(sb);
@@ -1269,15 +1267,15 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
}
}
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+ error = gfs2_freeze_lock_shared(sdp);
if (error)
goto fail_per_node;
if (!sb_rdonly(sb))
error = gfs2_make_fs_rw(sdp);
- gfs2_freeze_unlock(&freeze_gh);
if (error) {
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
if (sdp->sd_quotad_process)
kthread_stop(sdp->sd_quotad_process);
sdp->sd_quotad_process = NULL;
@@ -1590,12 +1588,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
fc->sb_flags |= SB_RDONLY;
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
- struct gfs2_holder freeze_gh;
-
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
- if (error)
- return -EINVAL;
-
if (fc->sb_flags & SB_RDONLY) {
gfs2_make_fs_ro(sdp);
} else {
@@ -1603,7 +1595,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
if (error)
errorfc(fc, "unable to remount read-write");
}
- gfs2_freeze_unlock(&freeze_gh);
}
sdp->sd_args = *newargs;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 1ed17226d9ed..704192b73605 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -75,6 +75,9 @@
#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
+#define QC_CHANGE 0
+#define QC_SYNC 1
+
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
/* -> sd_bitmap_lock */
static DEFINE_SPINLOCK(qd_lock);
@@ -470,7 +473,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
spin_unlock(&qd_lock);
if (qd) {
- gfs2_assert_warn(sdp, qd->qd_change_sync);
error = bh_get(qd);
if (error) {
clear_bit(QDF_LOCKED, &qd->qd_flags);
@@ -591,6 +593,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
error = -EIO;
+ gfs2_qa_put(ip);
goto out;
}
@@ -661,7 +664,7 @@ static int sort_qd(const void *a, const void *b)
return 0;
}
-static void do_qc(struct gfs2_quota_data *qd, s64 change)
+static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
@@ -686,16 +689,18 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
qd->qd_change = x;
spin_unlock(&qd_lock);
- if (!x) {
+ if (qc_type == QC_CHANGE) {
+ if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
+ qd_hold(qd);
+ slot_hold(qd);
+ }
+ } else {
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
clear_bit(QDF_CHANGE, &qd->qd_flags);
qc->qc_flags = 0;
qc->qc_id = 0;
slot_put(qd);
qd_put(qd);
- } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
- qd_hold(qd);
- slot_hold(qd);
}
if (change < 0) /* Reset quiet flag if we freed some blocks */
@@ -711,7 +716,6 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct buffer_head *bh;
- void *kaddr;
u64 blk;
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
unsigned to_write = bytes, pg_off = off;
@@ -763,10 +767,8 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
}
/* Write to the page, now that we have setup the buffer(s) */
- kaddr = kmap_atomic(page);
- memcpy(kaddr + off, buf, bytes);
+ memcpy_to_page(page, off, buf, bytes);
flush_dcache_page(page);
- kunmap_atomic(kaddr);
unlock_page(page);
put_page(page);
@@ -955,7 +957,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
if (error)
goto out_end_trans;
- do_qc(qd, -qd->qd_change_sync);
+ do_qc(qd, -qd->qd_change_sync, QC_SYNC);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
@@ -1281,7 +1283,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
qid_eq(qd->qd_id, make_kqid_gid(gid))) {
- do_qc(qd, change);
+ do_qc(qd, change, QC_CHANGE);
}
}
}
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 2bb085a72e8e..9c7a9f640bad 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -404,7 +404,7 @@ void gfs2_recover_func(struct work_struct *work)
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host head;
- struct gfs2_holder j_gh, ji_gh, thaw_gh;
+ struct gfs2_holder j_gh, ji_gh;
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
int ro = 0;
unsigned int pass;
@@ -420,10 +420,10 @@ void gfs2_recover_func(struct work_struct *work)
if (sdp->sd_args.ar_spectator)
goto fail;
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
- fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
+ fs_info(sdp, "jid=%u: Trying to acquire journal glock...\n",
jd->jd_jid);
jlocked = 1;
- /* Acquire the journal lock so we can do recovery */
+ /* Acquire the journal glock so we can do recovery */
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
LM_ST_EXCLUSIVE,
@@ -465,14 +465,14 @@ void gfs2_recover_func(struct work_struct *work)
ktime_ms_delta(t_jhd, t_jlck));
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
- fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
- jd->jd_jid);
-
- /* Acquire a shared hold on the freeze lock */
+ mutex_lock(&sdp->sd_freeze_mutex);
- error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
- if (error)
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
+ mutex_unlock(&sdp->sd_freeze_mutex);
+ fs_warn(sdp, "jid=%u: Can't replay: filesystem "
+ "is frozen\n", jd->jd_jid);
goto fail_gunlock_ji;
+ }
if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
ro = 1;
@@ -496,7 +496,7 @@ void gfs2_recover_func(struct work_struct *work)
fs_warn(sdp, "jid=%u: Can't replay: read-only block "
"device\n", jd->jd_jid);
error = -EROFS;
- goto fail_gunlock_thaw;
+ goto fail_gunlock_nofreeze;
}
t_tlck = ktime_get();
@@ -514,7 +514,7 @@ void gfs2_recover_func(struct work_struct *work)
lops_after_scan(jd, error, pass);
if (error) {
up_read(&sdp->sd_log_flush_lock);
- goto fail_gunlock_thaw;
+ goto fail_gunlock_nofreeze;
}
}
@@ -522,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work)
clean_journal(jd, &head);
up_read(&sdp->sd_log_flush_lock);
- gfs2_freeze_unlock(&thaw_gh);
+ mutex_unlock(&sdp->sd_freeze_mutex);
t_rep = ktime_get();
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
@@ -543,8 +543,8 @@ void gfs2_recover_func(struct work_struct *work)
fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
goto done;
-fail_gunlock_thaw:
- gfs2_freeze_unlock(&thaw_gh);
+fail_gunlock_nofreeze:
+ mutex_unlock(&sdp->sd_freeze_mutex);
fail_gunlock_ji:
if (jlocked) {
gfs2_glock_dq_uninit(&ji_gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 3b9b76e980ad..9308190895c8 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -2584,8 +2584,8 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- rgrp_unlock_local(rgd);
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
+ rgrp_unlock_local(rgd);
gfs2_statfs_change(sdp, 0, +1, -1);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a84bf6444bba..9f4d5d6549ee 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -332,7 +332,12 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
struct lfcc *lfcc;
LIST_HEAD(list);
struct gfs2_log_header_host lh;
- int error;
+ int error, error2;
+
+ /*
+ * Grab all the journal glocks in SH mode. We are *probably* doing
+ * that to prevent recovery.
+ */
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
@@ -349,11 +354,13 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
list_add(&lfcc->list, &list);
}
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NOEXP | GL_NOPID,
&sdp->sd_freeze_gh);
if (error)
- goto out;
+ goto relock_shared;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
error = gfs2_jdesc_check(jd);
@@ -368,8 +375,14 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
}
}
- if (error)
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ if (!error)
+ goto out; /* success */
+
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
+relock_shared:
+ error2 = gfs2_freeze_lock_shared(sdp);
+ gfs2_assert_withdraw(sdp, !error2);
out:
while (!list_empty(&list)) {
@@ -463,7 +476,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
* @flags: The type of dirty
*
* Unfortunately it can be called under any combination of inode
- * glock and transaction lock, so we have to check carefully.
+ * glock and freeze glock, so we have to check carefully.
*
* At the moment this deals only with atime - it should be possible
* to expand that role in future, once a review of the locking has
@@ -615,6 +628,8 @@ restart:
/* Release stuff */
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
iput(sdp->sd_jindex);
iput(sdp->sd_statfs_inode);
iput(sdp->sd_rindex);
@@ -669,59 +684,109 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
return sdp->sd_log_error;
}
-void gfs2_freeze_func(struct work_struct *work)
+static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
{
- int error;
- struct gfs2_holder freeze_gh;
- struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
struct super_block *sb = sdp->sd_vfs;
+ int error;
- atomic_inc(&sb->s_active);
- error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
- if (error) {
- gfs2_assert_withdraw(sdp, 0);
- } else {
- atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
- error = thaw_super(sb);
- if (error) {
- fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
- error);
- gfs2_assert_withdraw(sdp, 0);
+ error = freeze_super(sb);
+ if (error)
+ return error;
+
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
+ if (gfs2_withdrawn(sdp)) {
+ thaw_super(sb);
+ return -EIO;
}
- gfs2_freeze_unlock(&freeze_gh);
}
+ return 0;
+}
+
+static int gfs2_do_thaw(struct gfs2_sbd *sdp)
+{
+ struct super_block *sb = sdp->sd_vfs;
+ int error;
+
+ error = gfs2_freeze_lock_shared(sdp);
+ if (error)
+ goto fail;
+ error = thaw_super(sb);
+ if (!error)
+ return 0;
+
+fail:
+ fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
+ gfs2_assert_withdraw(sdp, 0);
+ return error;
+}
+
+void gfs2_freeze_func(struct work_struct *work)
+{
+ struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
+ struct super_block *sb = sdp->sd_vfs;
+ int error;
+
+ mutex_lock(&sdp->sd_freeze_mutex);
+ error = -EBUSY;
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags))
+ goto freeze_failed;
+
+ error = gfs2_freeze_locally(sdp);
+ if (error)
+ goto freeze_failed;
+
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ set_bit(SDF_FROZEN, &sdp->sd_flags);
+
+ error = gfs2_do_thaw(sdp);
+ if (error)
+ goto out;
+
+ clear_bit(SDF_FROZEN, &sdp->sd_flags);
+ goto out;
+
+freeze_failed:
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
+
+out:
+ mutex_unlock(&sdp->sd_freeze_mutex);
deactivate_super(sb);
- clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
- wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
- return;
}
/**
- * gfs2_freeze - prevent further writes to the filesystem
+ * gfs2_freeze_super - prevent further writes to the filesystem
* @sb: the VFS structure for the filesystem
*
*/
-static int gfs2_freeze(struct super_block *sb)
+static int gfs2_freeze_super(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
- mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
- error = -EBUSY;
+ if (!mutex_trylock(&sdp->sd_freeze_mutex))
+ return -EBUSY;
+ error = -EBUSY;
+ if (test_bit(SDF_FROZEN, &sdp->sd_flags))
goto out;
- }
for (;;) {
- if (gfs2_withdrawn(sdp)) {
- error = -EINVAL;
+ error = gfs2_freeze_locally(sdp);
+ if (error) {
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
+ error);
goto out;
}
error = gfs2_lock_fs_check_clean(sdp);
if (!error)
- break;
+ break; /* success */
+
+ error = gfs2_do_thaw(sdp);
+ if (error)
+ goto out;
if (error == -EBUSY)
fs_err(sdp, "waiting for recovery before freeze\n");
@@ -735,32 +800,58 @@ static int gfs2_freeze(struct super_block *sb)
fs_err(sdp, "retrying...\n");
msleep(1000);
}
- set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
+
out:
+ if (!error) {
+ set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
+ set_bit(SDF_FROZEN, &sdp->sd_flags);
+ }
mutex_unlock(&sdp->sd_freeze_mutex);
return error;
}
/**
- * gfs2_unfreeze - reallow writes to the filesystem
+ * gfs2_thaw_super - reallow writes to the filesystem
* @sb: the VFS structure for the filesystem
*
*/
-static int gfs2_unfreeze(struct super_block *sb)
+static int gfs2_thaw_super(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
+ int error;
- mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
- !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
- mutex_unlock(&sdp->sd_freeze_mutex);
- return -EINVAL;
+ if (!mutex_trylock(&sdp->sd_freeze_mutex))
+ return -EBUSY;
+ error = -EINVAL;
+ if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
+ goto out;
+
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
+ error = gfs2_do_thaw(sdp);
+
+ if (!error) {
+ clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
+ clear_bit(SDF_FROZEN, &sdp->sd_flags);
}
+out:
+ mutex_unlock(&sdp->sd_freeze_mutex);
+ return error;
+}
+
+void gfs2_thaw_freeze_initiator(struct super_block *sb)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+
+ mutex_lock(&sdp->sd_freeze_mutex);
+ if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
+ goto out;
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
+out:
mutex_unlock(&sdp->sd_freeze_mutex);
- return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
}
/**
@@ -1004,7 +1095,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
{
struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
- int val;
+ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
+
+ spin_lock(&sdp->sd_tune.gt_spin);
+ logd_secs = sdp->sd_tune.gt_logd_secs;
+ quota_quantum = sdp->sd_tune.gt_quota_quantum;
+ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
+ statfs_slow = sdp->sd_tune.gt_statfs_slow;
+ spin_unlock(&sdp->sd_tune.gt_spin);
if (is_ancestor(root, sdp->sd_master_dir))
seq_puts(s, ",meta");
@@ -1059,17 +1157,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
}
if (args->ar_discard)
seq_puts(s, ",discard");
- val = sdp->sd_tune.gt_logd_secs;
- if (val != 30)
- seq_printf(s, ",commit=%d", val);
- val = sdp->sd_tune.gt_statfs_quantum;
- if (val != 30)
- seq_printf(s, ",statfs_quantum=%d", val);
- else if (sdp->sd_tune.gt_statfs_slow)
+ if (logd_secs != 30)
+ seq_printf(s, ",commit=%d", logd_secs);
+ if (statfs_quantum != 30)
+ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
+ else if (statfs_slow)
seq_puts(s, ",statfs_quantum=0");
- val = sdp->sd_tune.gt_quota_quantum;
- if (val != 60)
- seq_printf(s, ",quota_quantum=%d", val);
+ if (quota_quantum != 60)
+ seq_printf(s, ",quota_quantum=%d", quota_quantum);
if (args->ar_statfs_percent)
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
@@ -1131,9 +1226,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return -EIO;
}
- error = gfs2_rindex_update(sdp);
- if (error)
- return error;
+ gfs2_rindex_update(sdp);
error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
@@ -1334,9 +1427,6 @@ static int evict_unlinked_inode(struct inode *inode)
goto out;
}
- if (ip->i_gl)
- gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
-
/*
* As soon as we clear the bitmap for the dinode, gfs2_create_inode()
* can get called to recreate it, or even gfs2_inode_lookup() if the
@@ -1350,6 +1440,9 @@ static int evict_unlinked_inode(struct inode *inode)
*/
ret = gfs2_dinode_dealloc(ip);
+ if (!ret && ip->i_gl)
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
+
out:
return ret;
}
@@ -1528,8 +1621,8 @@ const struct super_operations gfs2_super_ops = {
.evict_inode = gfs2_evict_inode,
.put_super = gfs2_put_super,
.sync_fs = gfs2_sync_fs,
- .freeze_super = gfs2_freeze,
- .thaw_super = gfs2_unfreeze,
+ .freeze_super = gfs2_freeze_super,
+ .thaw_super = gfs2_thaw_super,
.statfs = gfs2_statfs,
.drop_inode = gfs2_drop_inode,
.show_options = gfs2_show_options,
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 58d13fd77aed..bba58629bc45 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -46,6 +46,7 @@ extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
extern int gfs2_statfs_sync(struct super_block *sb, int type);
extern void gfs2_freeze_func(struct work_struct *work);
+extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 454dc2ff8b5e..2dfbe2f188dd 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -82,6 +82,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"RO Recovery: %d\n"
"Skip DLM Unlock: %d\n"
"Force AIL Flush: %d\n"
+ "FS Freeze Initiator: %d\n"
"FS Frozen: %d\n"
"Withdrawing: %d\n"
"Withdraw In Prog: %d\n"
@@ -111,7 +112,8 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
test_bit(SDF_RORECOVERY, &f),
test_bit(SDF_SKIP_DLM_UNLOCK, &f),
test_bit(SDF_FORCE_AIL_FLUSH, &f),
- test_bit(SDF_FS_FROZEN, &f),
+ test_bit(SDF_FREEZE_INITIATOR, &f),
+ test_bit(SDF_FROZEN, &f),
test_bit(SDF_WITHDRAWING, &f),
test_bit(SDF_WITHDRAW_IN_PROG, &f),
test_bit(SDF_REMOTE_WITHDRAW, &f),
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 63fec11ef2ce..ec1631257978 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -233,7 +233,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
struct gfs2_bufdata *bd;
struct gfs2_meta_header *mh;
struct gfs2_trans *tr = current->journal_info;
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
lock_buffer(bh);
if (buffer_pinned(bh)) {
@@ -267,7 +266,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
(unsigned long long)bd->bd_bh->b_blocknr);
BUG();
}
- if (unlikely(state == SFS_FROZEN)) {
+ if (unlikely(test_bit(SDF_FROZEN, &sdp->sd_flags))) {
fs_info(sdp, "GFS2:adding buf while frozen\n");
gfs2_assert_withdraw(sdp, 0);
}
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 7a6aeffcdf5c..dac22b1c1a2e 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -93,21 +93,18 @@ out_unlock:
}
/**
- * gfs2_freeze_lock - hold the freeze glock
+ * gfs2_freeze_lock_shared - hold the freeze glock
* @sdp: the superblock
- * @freeze_gh: pointer to the requested holder
- * @caller_flags: any additional flags needed by the caller
*/
-int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
- int caller_flags)
+int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
{
- int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
int error;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
- freeze_gh);
- if (error && error != GLR_TRYFAILED)
- fs_err(sdp, "can't lock the freeze lock: %d\n", error);
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
+ &sdp->sd_freeze_gh);
+ if (error)
+ fs_err(sdp, "can't lock the freeze glock: %d\n", error);
return error;
}
@@ -124,7 +121,6 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
struct gfs2_inode *ip;
struct gfs2_glock *i_gl;
u64 no_formal_ino;
- int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
int ret = 0;
int tries;
@@ -152,24 +148,18 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
*/
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
if (!sb_rdonly(sdp->sd_vfs)) {
- struct gfs2_holder freeze_gh;
-
- gfs2_holder_mark_uninitialized(&freeze_gh);
- if (sdp->sd_freeze_gl &&
- !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
- ret = gfs2_freeze_lock(sdp, &freeze_gh,
- log_write_allowed ? 0 : LM_FLAG_TRY);
- if (ret == GLR_TRYFAILED)
- ret = 0;
- }
- if (!ret)
- gfs2_make_fs_ro(sdp);
+ bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
+
+ gfs2_make_fs_ro(sdp);
+
+ if (locked)
+ mutex_unlock(&sdp->sd_freeze_mutex);
+
/*
* Dequeue any pending non-system glock holders that can no
* longer be granted because the file system is withdrawn.
*/
gfs2_gl_dq_holders(sdp);
- gfs2_freeze_unlock(&freeze_gh);
}
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
@@ -187,15 +177,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
}
sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq(&sdp->sd_jinode_gh);
- if (test_bit(SDF_FS_FROZEN, &sdp->sd_flags)) {
- /* Make sure gfs2_unfreeze works if partially-frozen */
- flush_work(&sdp->sd_freeze_work);
- atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
- thaw_super(sdp->sd_vfs);
- } else {
- wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
- TASK_UNINTERRUPTIBLE);
- }
+ gfs2_thaw_freeze_initiator(sdp->sd_vfs);
+ wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
/*
* holder_uninit to force glock_put, to force dlm to let go
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 78ec190f4155..cdb839529175 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -149,8 +149,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose);
-extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
- struct gfs2_holder *freeze_gh, int caller_flags);
+extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
#define gfs2_io_error(sdp) \
diff --git a/fs/inode.c b/fs/inode.c
index d37fad91c8da..8fefb69e1f84 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1156,8 +1156,10 @@ lock:
*/
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
- WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
- WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
+ if (inode1)
+ WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
+ if (inode2)
+ WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
lock_two_inodes(inode1, inode2, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
}
EXPORT_SYMBOL(lock_two_nondirectories);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index a4fa81af60d9..adb92cdb24b0 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1076,7 +1076,7 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
{
loff_t start_byte;
loff_t end_byte;
- int blocksize = i_blocksize(inode);
+ unsigned int blocksize = i_blocksize(inode);
if (iomap->type != IOMAP_DELALLOC)
return 0;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 45b6919903e6..5a1a4af9d3d2 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -655,7 +655,9 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
return kn;
err_out3:
+ spin_lock(&kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+ spin_unlock(&kernfs_idr_lock);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index e3972aa3045a..5d85715be763 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -93,6 +93,12 @@ void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host, struct
block->b_status = nlm_lck_blocked;
}
+struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host)
+{
+ return host->h_rpcclnt;
+}
+EXPORT_SYMBOL_GPL(nlmclnt_rpc_clnt);
+
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
diff --git a/fs/namei.c b/fs/namei.c
index 91171da719c5..e56ff39a79bc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4874,8 +4874,7 @@ int vfs_rename(struct renamedata *rd)
d_exchange(old_dentry, new_dentry);
}
out:
- if (source)
- inode_unlock(source);
+ inode_unlock(source);
if (target)
inode_unlock(target);
dput(new_dentry);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f50e025ae406..e4c5f193ed5e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -184,6 +184,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_net = get_net(cl_init->net);
clp->cl_principal = "*";
+ clp->cl_xprtsec = cl_init->xprtsec;
return clp;
error_cleanup:
@@ -326,6 +327,10 @@ again:
sap))
continue;
+ /* Match the xprt security policy */
+ if (clp->cl_xprtsec.policy != data->xprtsec.policy)
+ continue;
+
refcount_inc(&clp->cl_count);
return clp;
}
@@ -458,6 +463,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
switch (proto) {
case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
case XPRT_TRANSPORT_RDMA:
if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_TCP_RETRANS;
@@ -510,6 +516,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
.version = clp->rpc_ops->version,
.authflavor = flavor,
.cred = cl_init->cred,
+ .xprtsec = cl_init->xprtsec,
};
if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
@@ -592,6 +599,7 @@ static int nfs_start_lockd(struct nfs_server *server)
server->nlm_host = host;
server->destroy = nfs_destroy_server;
+ nfs_sysfs_link_rpc_client(server, nlmclnt_rpc_clnt(host), NULL);
return 0;
}
@@ -621,6 +629,7 @@ int nfs_init_server_rpcclient(struct nfs_server *server,
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
+ nfs_sysfs_link_rpc_client(server, server->client, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
@@ -675,6 +684,7 @@ static int nfs_init_server(struct nfs_server *server,
.cred = server->cred,
.nconnect = ctx->nfs_server.nconnect,
.init_flags = (1UL << NFS_CS_REUSEPORT),
+ .xprtsec = ctx->xprtsec,
};
struct nfs_client *clp;
int error;
@@ -690,6 +700,8 @@ static int nfs_init_server(struct nfs_server *server,
return PTR_ERR(clp);
server->nfs_client = clp;
+ nfs_sysfs_add_server(server);
+ nfs_sysfs_link_rpc_client(server, clp->cl_rpcclient, "_state");
/* Initialise the client representation from the mount data */
server->flags = ctx->flags;
@@ -944,6 +956,8 @@ void nfs_server_remove_lists(struct nfs_server *server)
}
EXPORT_SYMBOL_GPL(nfs_server_remove_lists);
+static DEFINE_IDA(s_sysfs_ids);
+
/*
* Allocate and initialise a server record
*/
@@ -955,6 +969,12 @@ struct nfs_server *nfs_alloc_server(void)
if (!server)
return NULL;
+ server->s_sysfs_id = ida_alloc(&s_sysfs_ids, GFP_KERNEL);
+ if (server->s_sysfs_id < 0) {
+ kfree(server);
+ return NULL;
+ }
+
server->client = server->client_acl = ERR_PTR(-EINVAL);
/* Zero out the NFS state stuff */
@@ -1001,6 +1021,12 @@ void nfs_free_server(struct nfs_server *server)
nfs_put_client(server->nfs_client);
+ if (server->kobj.state_initialized) {
+ nfs_sysfs_remove_server(server);
+ kobject_put(&server->kobj);
+ }
+ ida_free(&s_sysfs_ids, server->s_sysfs_id);
+
ida_destroy(&server->lockowner_id);
ida_destroy(&server->openowner_id);
nfs_free_iostats(server->io_stats);
@@ -1102,6 +1128,11 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
server->fsid = fattr->fsid;
+ nfs_sysfs_add_server(server);
+
+ nfs_sysfs_link_rpc_client(server,
+ server->nfs_client->cl_rpcclient, "_state");
+
error = nfs_init_server_rpcclient(server,
source->client->cl_timeout,
flavor);
@@ -1385,6 +1416,7 @@ error_0:
void nfs_fs_proc_exit(void)
{
remove_proc_subtree("fs/nfsfs", NULL);
+ ida_destroy(&s_sysfs_ids);
}
#endif /* CONFIG_PROC_FS */
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 9bcd53d5c7d4..853e8d609bb3 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -18,6 +18,9 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
+
+#include <net/handshake.h>
+
#include "nfs.h"
#include "internal.h"
@@ -88,6 +91,7 @@ enum nfs_param {
Opt_vers,
Opt_wsize,
Opt_write,
+ Opt_xprtsec,
};
enum {
@@ -194,6 +198,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("vers", Opt_vers),
fsparam_enum ("write", Opt_write, nfs_param_enums_write),
fsparam_u32 ("wsize", Opt_wsize),
+ fsparam_string("xprtsec", Opt_xprtsec),
{}
};
@@ -267,6 +272,20 @@ static const struct constant_table nfs_secflavor_tokens[] = {
{}
};
+enum {
+ Opt_xprtsec_none,
+ Opt_xprtsec_tls,
+ Opt_xprtsec_mtls,
+ nr__Opt_xprtsec
+};
+
+static const struct constant_table nfs_xprtsec_policies[] = {
+ { "none", Opt_xprtsec_none },
+ { "tls", Opt_xprtsec_tls },
+ { "mtls", Opt_xprtsec_mtls },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -320,9 +339,21 @@ static int nfs_validate_transport_protocol(struct fs_context *fc,
default:
ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
}
+
+ if (ctx->xprtsec.policy != RPC_XPRTSEC_NONE)
+ switch (ctx->nfs_server.protocol) {
+ case XPRT_TRANSPORT_TCP:
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP_TLS;
+ break;
+ default:
+ goto out_invalid_xprtsec_policy;
+ }
+
return 0;
out_invalid_transport_udp:
return nfs_invalf(fc, "NFS: Unsupported transport protocol udp");
+out_invalid_xprtsec_policy:
+ return nfs_invalf(fc, "NFS: Transport does not support xprtsec");
}
/*
@@ -430,6 +461,29 @@ static int nfs_parse_security_flavors(struct fs_context *fc,
return 0;
}
+static int nfs_parse_xprtsec_policy(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+ trace_nfs_mount_assign(param->key, param->string);
+
+ switch (lookup_constant(nfs_xprtsec_policies, param->string, -1)) {
+ case Opt_xprtsec_none:
+ ctx->xprtsec.policy = RPC_XPRTSEC_NONE;
+ break;
+ case Opt_xprtsec_tls:
+ ctx->xprtsec.policy = RPC_XPRTSEC_TLS_ANON;
+ break;
+ case Opt_xprtsec_mtls:
+ ctx->xprtsec.policy = RPC_XPRTSEC_TLS_X509;
+ break;
+ default:
+ return nfs_invalf(fc, "NFS: Unrecognized transport security policy");
+ }
+ return 0;
+}
+
static int nfs_parse_version_string(struct fs_context *fc,
const char *string)
{
@@ -696,6 +750,11 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
if (ret < 0)
return ret;
break;
+ case Opt_xprtsec:
+ ret = nfs_parse_xprtsec_policy(fc, param);
+ if (ret < 0)
+ return ret;
+ break;
case Opt_proto:
if (!param->string)
@@ -791,16 +850,19 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->mount_server.addrlen = len;
break;
case Opt_nconnect:
+ trace_nfs_mount_assign(param->key, param->string);
if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS)
goto out_of_bounds;
ctx->nfs_server.nconnect = result.uint_32;
break;
case Opt_max_connect:
+ trace_nfs_mount_assign(param->key, param->string);
if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS)
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
case Opt_lookupcache:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_lookupcache_all:
ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
@@ -817,6 +879,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_local_lock:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_local_lock_all:
ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK |
@@ -837,6 +900,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_write:
+ trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
case Opt_write_lazy:
ctx->flags &=
@@ -1569,6 +1633,9 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->selected_flavor = RPC_AUTH_MAXFLAVOR;
ctx->minorversion = 0;
ctx->need_mount = true;
+ ctx->xprtsec.policy = RPC_XPRTSEC_NONE;
+ ctx->xprtsec.cert_serial = TLS_NO_CERT;
+ ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
fc->s_iflags |= SB_I_STABLE_WRITES;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index a910b9a638c5..8172dd4135a1 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -845,7 +845,7 @@ int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
- STATX_INO | STATX_SIZE | STATX_BLOCKS | STATX_BTIME |
+ STATX_INO | STATX_SIZE | STATX_BLOCKS |
STATX_CHANGE_COOKIE;
if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b5f21d35d30e..913c09806c7f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -81,6 +81,7 @@ struct nfs_client_initdata {
struct net *net;
const struct rpc_timeout *timeparms;
const struct cred *cred;
+ struct xprtsec_parms xprtsec;
};
/*
@@ -101,6 +102,7 @@ struct nfs_fs_context {
unsigned int bsize;
struct nfs_auth_info auth_info;
rpc_authflavor_t selected_flavor;
+ struct xprtsec_parms xprtsec;
char *client_address;
unsigned int version;
unsigned int minorversion;
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 669cda757a5c..eff3802c5e03 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -4,6 +4,8 @@
#include <linux/sunrpc/addr.h>
#include "internal.h"
#include "nfs3_fs.h"
+#include "netns.h"
+#include "sysfs.h"
#ifdef CONFIG_NFS_V3_ACL
static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
@@ -31,6 +33,8 @@ static void nfs_init_server_aclclient(struct nfs_server *server)
if (IS_ERR(server->client_acl))
goto out_noacl;
+ nfs_sysfs_link_rpc_client(server, server->client_acl, NULL);
+
/* No errors! Assume that Sun nfsacls are supported */
server->caps |= NFS_CAP_ACLS;
return;
@@ -93,6 +97,7 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
+ .xprtsec = mds_clp->cl_xprtsec,
};
struct nfs_client *clp;
char buf[INET6_ADDRSTRLEN + 1];
@@ -102,8 +107,12 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
- if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP)
- cl_init.nconnect = mds_clp->cl_nconnect;
+ switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_clp->cl_nconnect > 1)
+ cl_init.nconnect = mds_clp->cl_nconnect;
+ }
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 93e306bf4430..63802d195556 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -1190,15 +1190,19 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
const void *buf, size_t buflen, int flags)
{
struct nfs_server *server = NFS_SERVER(inode);
+ __u32 bitmask[NFS_BITMASK_SZ];
struct page *pages[NFS4XATTR_MAXPAGES];
struct nfs42_setxattrargs arg = {
.fh = NFS_FH(inode),
+ .bitmask = bitmask,
.xattr_pages = pages,
.xattr_len = buflen,
.xattr_name = name,
.xattr_flags = flags,
};
- struct nfs42_setxattrres res;
+ struct nfs42_setxattrres res = {
+ .server = server,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
.rpc_argp = &arg,
@@ -1210,13 +1214,22 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
if (buflen > server->sxasize)
return -ERANGE;
+ res.fattr = nfs_alloc_fattr();
+ if (!res.fattr)
+ return -ENOMEM;
+
if (buflen > 0) {
np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
- if (np < 0)
- return np;
+ if (np < 0) {
+ ret = np;
+ goto out;
+ }
} else
np = 0;
+ nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask,
+ inode, NFS_INO_INVALID_CHANGE);
+
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
&res.seq_res, 1);
trace_nfs4_setxattr(inode, name, ret);
@@ -1224,9 +1237,13 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
for (; np > 0; np--)
put_page(pages[np - 1]);
- if (!ret)
+ if (!ret) {
nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
+ ret = nfs_post_op_update_inode(inode, res.fattr);
+ }
+out:
+ kfree(res.fattr);
return ret;
}
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index 76ae11834206..911f634ba3da 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -991,6 +991,29 @@ static void nfs4_xattr_cache_init_once(void *p)
INIT_LIST_HEAD(&cache->dispose);
}
+static int nfs4_xattr_shrinker_init(struct shrinker *shrinker,
+ struct list_lru *lru, const char *name)
+{
+ int ret = 0;
+
+ ret = register_shrinker(shrinker, name);
+ if (ret)
+ return ret;
+
+ ret = list_lru_init_memcg(lru, shrinker);
+ if (ret)
+ unregister_shrinker(shrinker);
+
+ return ret;
+}
+
+static void nfs4_xattr_shrinker_destroy(struct shrinker *shrinker,
+ struct list_lru *lru)
+{
+ unregister_shrinker(shrinker);
+ list_lru_destroy(lru);
+}
+
int __init nfs4_xattr_cache_init(void)
{
int ret = 0;
@@ -1002,44 +1025,30 @@ int __init nfs4_xattr_cache_init(void)
if (nfs4_xattr_cache_cachep == NULL)
return -ENOMEM;
- ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
- &nfs4_xattr_large_entry_shrinker);
- if (ret)
- goto out4;
-
- ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
- &nfs4_xattr_entry_shrinker);
- if (ret)
- goto out3;
-
- ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
- &nfs4_xattr_cache_shrinker);
- if (ret)
- goto out2;
-
- ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache");
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru,
+ "nfs-xattr_cache");
if (ret)
goto out1;
- ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry");
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru,
+ "nfs-xattr_entry");
if (ret)
- goto out;
+ goto out2;
- ret = register_shrinker(&nfs4_xattr_large_entry_shrinker,
- "nfs-xattr_large_entry");
+ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_large_entry_shrinker,
+ &nfs4_xattr_large_entry_lru,
+ "nfs-xattr_large_entry");
if (!ret)
return 0;
- unregister_shrinker(&nfs4_xattr_entry_shrinker);
-out:
- unregister_shrinker(&nfs4_xattr_cache_shrinker);
-out1:
- list_lru_destroy(&nfs4_xattr_cache_lru);
+ nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru);
out2:
- list_lru_destroy(&nfs4_xattr_entry_lru);
-out3:
- list_lru_destroy(&nfs4_xattr_large_entry_lru);
-out4:
+ nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru);
+out1:
kmem_cache_destroy(nfs4_xattr_cache_cachep);
return ret;
@@ -1047,11 +1056,11 @@ out4:
void nfs4_xattr_cache_exit(void)
{
- unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
- unregister_shrinker(&nfs4_xattr_entry_shrinker);
- unregister_shrinker(&nfs4_xattr_cache_shrinker);
- list_lru_destroy(&nfs4_xattr_large_entry_lru);
- list_lru_destroy(&nfs4_xattr_entry_lru);
- list_lru_destroy(&nfs4_xattr_cache_lru);
+ nfs4_xattr_shrinker_destroy(&nfs4_xattr_large_entry_shrinker,
+ &nfs4_xattr_large_entry_lru);
+ nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
+ &nfs4_xattr_entry_lru);
+ nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
+ &nfs4_xattr_cache_lru);
kmem_cache_destroy(nfs4_xattr_cache_cachep);
}
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index a6df815a140c..95234208dc9e 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -7,6 +7,9 @@
#include "nfs42.h"
+/* Not limited by NFS itself, limited by the generic xattr code */
+#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX)
+
#define encode_fallocate_maxsz (encode_stateid_maxsz + \
2 /* offset */ + \
2 /* length */)
@@ -89,6 +92,18 @@
2 /* dst offset */ + \
2 /* count */)
#define decode_clone_maxsz (op_decode_hdr_maxsz)
+#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ nfs4_xattr_name_maxsz)
+#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + pagepad_maxsz)
+#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \
+ 1 + nfs4_xattr_name_maxsz + 1)
+#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
+#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1)
+#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
+#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ nfs4_xattr_name_maxsz)
+#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
#define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
@@ -186,55 +201,40 @@
decode_putfh_maxsz + \
decode_clone_maxsz + \
decode_getattr_maxsz)
-
-/* Not limited by NFS itself, limited by the generic xattr code */
-#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX)
-
-#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \
- nfs4_xattr_name_maxsz)
-#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + pagepad_maxsz)
-#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \
- 1 + nfs4_xattr_name_maxsz + 1)
-#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1)
-#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
-#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
- nfs4_xattr_name_maxsz)
-#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
- decode_change_info_maxsz)
-
-#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_getxattr_maxsz)
-#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_getxattr_maxsz)
-#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_setxattr_maxsz)
-#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_setxattr_maxsz)
-#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_listxattrs_maxsz)
-#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_listxattrs_maxsz)
-#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \
- encode_sequence_maxsz + \
- encode_putfh_maxsz + \
- encode_removexattr_maxsz)
-#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \
- decode_sequence_maxsz + \
- decode_putfh_maxsz + \
- decode_removexattr_maxsz)
+#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getxattr_maxsz)
+#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getxattr_maxsz)
+#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_setxattr_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_setxattr_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_listxattrs_maxsz)
+#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_listxattrs_maxsz)
+#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_removexattr_maxsz)
+#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_removexattr_maxsz)
/*
* These values specify the maximum amount of data that is not
@@ -317,6 +317,18 @@ static void encode_copy(struct xdr_stream *xdr,
encode_nl4_server(xdr, args->cp_src);
}
+static void encode_copy_commit(struct xdr_stream *xdr,
+ const struct nfs42_copy_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr);
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->dst_pos);
+ *p = cpu_to_be32(args->count);
+}
+
static void encode_offload_cancel(struct xdr_stream *xdr,
const struct nfs42_offload_status_args *args,
struct compound_hdr *hdr)
@@ -452,20 +464,6 @@ static void encode_setxattr(struct xdr_stream *xdr,
xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len);
}
-static int decode_setxattr(struct xdr_stream *xdr,
- struct nfs4_change_info *cinfo)
-{
- int status;
-
- status = decode_op_hdr(xdr, OP_SETXATTR);
- if (status)
- goto out;
- status = decode_change_info(xdr, cinfo);
-out:
- return status;
-}
-
-
static void encode_getxattr(struct xdr_stream *xdr, const char *name,
struct compound_hdr *hdr)
{
@@ -473,43 +471,6 @@ static void encode_getxattr(struct xdr_stream *xdr, const char *name,
encode_string(xdr, strlen(name), name);
}
-static int decode_getxattr(struct xdr_stream *xdr,
- struct nfs42_getxattrres *res,
- struct rpc_rqst *req)
-{
- int status;
- __be32 *p;
- u32 len, rdlen;
-
- status = decode_op_hdr(xdr, OP_GETXATTR);
- if (status)
- return status;
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- len = be32_to_cpup(p);
-
- /*
- * Only check against the page length here. The actual
- * requested length may be smaller, but that is only
- * checked against after possibly caching a valid reply.
- */
- if (len > req->rq_rcv_buf.page_len)
- return -ERANGE;
-
- res->xattr_len = len;
-
- if (len > 0) {
- rdlen = xdr_read_pages(xdr, len);
- if (rdlen < len)
- return -EIO;
- }
-
- return 0;
-}
-
static void encode_removexattr(struct xdr_stream *xdr, const char *name,
struct compound_hdr *hdr)
{
@@ -517,21 +478,6 @@ static void encode_removexattr(struct xdr_stream *xdr, const char *name,
encode_string(xdr, strlen(name), name);
}
-
-static int decode_removexattr(struct xdr_stream *xdr,
- struct nfs4_change_info *cinfo)
-{
- int status;
-
- status = decode_op_hdr(xdr, OP_REMOVEXATTR);
- if (status)
- goto out;
-
- status = decode_change_info(xdr, cinfo);
-out:
- return status;
-}
-
static void encode_listxattrs(struct xdr_stream *xdr,
const struct nfs42_listxattrsargs *arg,
struct compound_hdr *hdr)
@@ -553,104 +499,6 @@ static void encode_listxattrs(struct xdr_stream *xdr,
*p = cpu_to_be32(arg->count + 8 + 4);
}
-static int decode_listxattrs(struct xdr_stream *xdr,
- struct nfs42_listxattrsres *res)
-{
- int status;
- __be32 *p;
- u32 count, len, ulen;
- size_t left, copied;
- char *buf;
-
- status = decode_op_hdr(xdr, OP_LISTXATTRS);
- if (status) {
- /*
- * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL
- * should be translated to ERANGE.
- */
- if (status == -ETOOSMALL)
- status = -ERANGE;
- /*
- * Special case: for LISTXATTRS, NFS4ERR_NOXATTR
- * should be translated to success with zero-length reply.
- */
- if (status == -ENODATA) {
- res->eof = true;
- status = 0;
- }
- goto out;
- }
-
- p = xdr_inline_decode(xdr, 8);
- if (unlikely(!p))
- return -EIO;
-
- xdr_decode_hyper(p, &res->cookie);
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- left = res->xattr_len;
- buf = res->xattr_buf;
-
- count = be32_to_cpup(p);
- copied = 0;
-
- /*
- * We have asked for enough room to encode the maximum number
- * of possible attribute names, so everything should fit.
- *
- * But, don't rely on that assumption. Just decode entries
- * until they don't fit anymore, just in case the server did
- * something odd.
- */
- while (count--) {
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- len = be32_to_cpup(p);
- if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
- status = -ERANGE;
- goto out;
- }
-
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return -EIO;
-
- ulen = len + XATTR_USER_PREFIX_LEN + 1;
- if (buf) {
- if (ulen > left) {
- status = -ERANGE;
- goto out;
- }
-
- memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
- memcpy(buf + XATTR_USER_PREFIX_LEN, p, len);
-
- buf[ulen - 1] = 0;
- buf += ulen;
- left -= ulen;
- }
- copied += ulen;
- }
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- return -EIO;
-
- res->eof = be32_to_cpup(p);
- res->copied = copied;
-
-out:
- if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX)
- status = -E2BIG;
-
- return status;
-}
-
/*
* Encode ALLOCATE request
*/
@@ -671,18 +519,6 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
encode_nops(&hdr);
}
-static void encode_copy_commit(struct xdr_stream *xdr,
- const struct nfs42_copy_args *args,
- struct compound_hdr *hdr)
-{
- __be32 *p;
-
- encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr);
- p = reserve_space(xdr, 12);
- p = xdr_encode_hyper(p, args->dst_pos);
- *p = cpu_to_be32(args->count);
-}
-
/*
* Encode COPY request
*/
@@ -871,6 +707,90 @@ static void nfs4_xdr_enc_layouterror(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode SETXATTR request
+ */
+static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_setxattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setxattr(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode GETXATTR request
+ */
+static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_getxattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ encode_getxattr(xdr, args->xattr_name, &hdr);
+
+ rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->xattr_len,
+ replen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LISTXATTR request
+ */
+static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfs42_listxattrsargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen + op_decode_hdr_maxsz + 2 + 1;
+ encode_listxattrs(xdr, args, &hdr);
+
+ rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, replen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode REMOVEXATTR request
+ */
+static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfs42_removexattrargs *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_removexattr(xdr, args->xattr_name, &hdr);
+ encode_nops(&hdr);
+}
+
static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
return decode_op_hdr(xdr, OP_ALLOCATE);
@@ -1192,6 +1112,168 @@ static int decode_layouterror(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_LAYOUTERROR);
}
+static int decode_setxattr(struct xdr_stream *xdr,
+ struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_SETXATTR);
+ if (status)
+ goto out;
+ status = decode_change_info(xdr, cinfo);
+out:
+ return status;
+}
+
+static int decode_getxattr(struct xdr_stream *xdr,
+ struct nfs42_getxattrres *res,
+ struct rpc_rqst *req)
+{
+ int status;
+ __be32 *p;
+ u32 len, rdlen;
+
+ status = decode_op_hdr(xdr, OP_GETXATTR);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ len = be32_to_cpup(p);
+
+ /*
+ * Only check against the page length here. The actual
+ * requested length may be smaller, but that is only
+ * checked against after possibly caching a valid reply.
+ */
+ if (len > req->rq_rcv_buf.page_len)
+ return -ERANGE;
+
+ res->xattr_len = len;
+
+ if (len > 0) {
+ rdlen = xdr_read_pages(xdr, len);
+ if (rdlen < len)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int decode_removexattr(struct xdr_stream *xdr,
+ struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_REMOVEXATTR);
+ if (status)
+ goto out;
+
+ status = decode_change_info(xdr, cinfo);
+out:
+ return status;
+}
+
+static int decode_listxattrs(struct xdr_stream *xdr,
+ struct nfs42_listxattrsres *res)
+{
+ int status;
+ __be32 *p;
+ u32 count, len, ulen;
+ size_t left, copied;
+ char *buf;
+
+ status = decode_op_hdr(xdr, OP_LISTXATTRS);
+ if (status) {
+ /*
+ * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL
+ * should be translated to ERANGE.
+ */
+ if (status == -ETOOSMALL)
+ status = -ERANGE;
+ /*
+ * Special case: for LISTXATTRS, NFS4ERR_NOXATTR
+ * should be translated to success with zero-length reply.
+ */
+ if (status == -ENODATA) {
+ res->eof = true;
+ status = 0;
+ }
+ goto out;
+ }
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ return -EIO;
+
+ xdr_decode_hyper(p, &res->cookie);
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ left = res->xattr_len;
+ buf = res->xattr_buf;
+
+ count = be32_to_cpup(p);
+ copied = 0;
+
+ /*
+ * We have asked for enough room to encode the maximum number
+ * of possible attribute names, so everything should fit.
+ *
+ * But, don't rely on that assumption. Just decode entries
+ * until they don't fit anymore, just in case the server did
+ * something odd.
+ */
+ while (count--) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ len = be32_to_cpup(p);
+ if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+ status = -ERANGE;
+ goto out;
+ }
+
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -EIO;
+
+ ulen = len + XATTR_USER_PREFIX_LEN + 1;
+ if (buf) {
+ if (ulen > left) {
+ status = -ERANGE;
+ goto out;
+ }
+
+ memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+ memcpy(buf + XATTR_USER_PREFIX_LEN, p, len);
+
+ buf[ulen - 1] = 0;
+ buf += ulen;
+ left -= ulen;
+ }
+ copied += ulen;
+ }
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+
+ res->eof = be32_to_cpup(p);
+ res->copied = copied;
+
+out:
+ if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX)
+ status = -E2BIG;
+
+ return status;
+}
+
/*
* Decode ALLOCATE request
*/
@@ -1481,22 +1563,9 @@ out:
return status;
}
-#ifdef CONFIG_NFS_V4_2
-static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const void *data)
-{
- const struct nfs42_setxattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- encode_setxattr(xdr, args, &hdr);
- encode_nops(&hdr);
-}
-
+/*
+ * Decode SETXATTR request
+ */
static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
void *data)
{
@@ -1513,33 +1582,17 @@ static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
-
status = decode_setxattr(xdr, &res->cinfo);
+ if (status)
+ goto out;
+ status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
-static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
- const void *data)
-{
- const struct nfs42_getxattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
- uint32_t replen;
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- replen = hdr.replen + op_decode_hdr_maxsz + 1;
- encode_getxattr(xdr, args->xattr_name, &hdr);
-
- rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->xattr_len,
- replen);
-
- encode_nops(&hdr);
-}
-
+/*
+ * Decode GETXATTR request
+ */
static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *data)
{
@@ -1561,26 +1614,9 @@ out:
return status;
}
-static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
- struct xdr_stream *xdr, const void *data)
-{
- const struct nfs42_listxattrsargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
- uint32_t replen;
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- replen = hdr.replen + op_decode_hdr_maxsz + 2 + 1;
- encode_listxattrs(xdr, args, &hdr);
-
- rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, replen);
-
- encode_nops(&hdr);
-}
-
+/*
+ * Decode LISTXATTR request
+ */
static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *data)
{
@@ -1604,21 +1640,9 @@ out:
return status;
}
-static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req,
- struct xdr_stream *xdr, const void *data)
-{
- const struct nfs42_removexattrargs *args = data;
- struct compound_hdr hdr = {
- .minorversion = nfs4_xdr_minorversion(&args->seq_args),
- };
-
- encode_compound_hdr(xdr, req, &hdr);
- encode_sequence(xdr, &args->seq_args, &hdr);
- encode_putfh(xdr, args->fh, &hdr);
- encode_removexattr(xdr, args->xattr_name, &hdr);
- encode_nops(&hdr);
-}
-
+/*
+ * Decode REMOVEXATTR request
+ */
static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req,
struct xdr_stream *xdr, void *data)
{
@@ -1640,5 +1664,4 @@ static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req,
out:
return status;
}
-#endif
#endif /* __LINUX_FS_NFS_NFS4_2XDR_H */
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index d3051b051a56..d9114a754db7 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -18,6 +18,7 @@
#include "nfs4idmap.h"
#include "pnfs.h"
#include "netns.h"
+#include "sysfs.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -896,7 +897,8 @@ static int nfs4_set_client(struct nfs_server *server,
int proto, const struct rpc_timeout *timeparms,
u32 minorversion, unsigned int nconnect,
unsigned int max_connect,
- struct net *net)
+ struct net *net,
+ struct xprtsec_parms *xprtsec)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
@@ -909,6 +911,7 @@ static int nfs4_set_client(struct nfs_server *server,
.net = net,
.timeparms = timeparms,
.cred = server->cred,
+ .xprtsec = *xprtsec,
};
struct nfs_client *clp;
@@ -916,8 +919,11 @@ static int nfs4_set_client(struct nfs_server *server,
__set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags);
else
cl_init.max_connect = max_connect;
- if (proto == XPRT_TRANSPORT_TCP)
+ switch (proto) {
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
cl_init.nconnect = nconnect;
+ }
if (server->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
@@ -947,6 +953,9 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
server->nfs_client = clp;
+ nfs_sysfs_add_server(server);
+ nfs_sysfs_link_rpc_client(server, clp->cl_rpcclient, "_state");
+
return 0;
}
@@ -978,6 +987,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
+ .xprtsec = mds_srv->nfs_client->cl_xprtsec,
};
char buf[INET6_ADDRSTRLEN + 1];
@@ -985,9 +995,13 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
- if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP) {
- cl_init.nconnect = mds_clp->cl_nconnect;
- cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_clp->cl_nconnect > 1) {
+ cl_init.nconnect = mds_clp->cl_nconnect;
+ cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ }
}
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
@@ -1157,7 +1171,8 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
ctx->minorversion,
ctx->nfs_server.nconnect,
ctx->nfs_server.max_connect,
- fc->net_ns);
+ fc->net_ns,
+ &ctx->xprtsec);
if (error < 0)
return error;
@@ -1219,8 +1234,8 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
+ int proto, error;
bool auth_probe;
- int error;
server = nfs_alloc_server();
if (!server)
@@ -1247,23 +1262,28 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
parent_client->cl_mvops->minor_version,
parent_client->cl_nconnect,
parent_client->cl_max_connect,
- parent_client->cl_net);
+ parent_client->cl_net,
+ &parent_client->cl_xprtsec);
if (!error)
goto init_server;
#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
+ proto = XPRT_TRANSPORT_TCP;
+ if (parent_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ proto = XPRT_TRANSPORT_TCP_TLS;
rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
error = nfs4_set_client(server,
ctx->nfs_server.hostname,
&ctx->nfs_server._address,
ctx->nfs_server.addrlen,
parent_client->cl_ipaddr,
- XPRT_TRANSPORT_TCP,
+ proto,
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
parent_client->cl_nconnect,
parent_client->cl_max_connect,
- parent_client->cl_net);
+ parent_client->cl_net,
+ &parent_client->cl_xprtsec);
if (error < 0)
goto error;
@@ -1314,6 +1334,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
.dstaddr = (struct sockaddr *)sap,
.addrlen = salen,
.servername = hostname,
+ /* cel: bleh. We might need to pass TLS parameters here */
};
char buf[INET6_ADDRSTRLEN + 1];
struct sockaddr_storage address;
@@ -1336,7 +1357,8 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
error = nfs4_set_client(server, hostname, sap, salen, buf,
clp->cl_proto, clnt->cl_timeout,
clp->cl_minorversion,
- clp->cl_nconnect, clp->cl_max_connect, net);
+ clp->cl_nconnect, clp->cl_max_connect,
+ net, &clp->cl_xprtsec);
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d3665390c4cb..e1a886b58354 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -921,6 +921,7 @@ out:
out_noaction:
return ret;
session_recover:
+ set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
nfs4_schedule_session_recovery(session, status);
dprintk("%s ERROR: %d Reset session\n", __func__, status);
nfs41_sequence_free_slot(res);
@@ -7159,7 +7160,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
struct nfs4_lock_state *lsp = data->lsp;
- struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
@@ -7167,7 +7167,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
switch (task->tk_status) {
case 0:
- renew_lease(server, data->timestamp);
+ renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
+ data->timestamp);
if (data->arg.new_lock && !data->cancelled) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
@@ -7188,8 +7189,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
if (!nfs4_stateid_match(&data->arg.open_stateid,
&lsp->ls_state->open_stateid))
goto out_restart;
- else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
- goto out_restart;
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
&lsp->ls_stateid))
goto out_restart;
@@ -9371,7 +9370,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0) {
+ if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index bbe49315d99e..e079987af4a3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1210,6 +1210,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+ if (clp->cl_rpcclient->cl_shutdown)
+ return;
+
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
wake_up_var(&clp->cl_state);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 30e53e93049e..2284f749d892 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -59,6 +59,8 @@
#include <linux/uaccess.h>
#include <linux/nfs_ssc.h>
+#include <uapi/linux/tls.h>
+
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
@@ -68,6 +70,8 @@
#include "nfs4session.h"
#include "pnfs.h"
#include "nfs.h"
+#include "netns.h"
+#include "sysfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -491,6 +495,16 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
+ switch (clp->cl_xprtsec.policy) {
+ case RPC_XPRTSEC_TLS_ANON:
+ seq_puts(m, ",xprtsec=tls");
+ break;
+ case RPC_XPRTSEC_TLS_X509:
+ seq_puts(m, ",xprtsec=mtls");
+ break;
+ default:
+ break;
+ }
if (version != 4)
nfs_show_mountd_options(m, nfss, showdefaults);
@@ -1077,6 +1091,7 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
&sb->s_blocksize_bits);
nfs_super_set_maxbytes(sb, server->maxfilesize);
+ nfs_sysfs_move_server_to_sb(sb);
server->has_sec_mnt_opts = ctx->has_sec_mnt_opts;
}
@@ -1319,13 +1334,14 @@ error_splat_super:
}
/*
- * Destroy an NFS2/3 superblock
+ * Destroy an NFS superblock
*/
void nfs_kill_super(struct super_block *s)
{
struct nfs_server *server = NFS_SB(s);
dev_t dev = s->s_dev;
+ nfs_sysfs_move_sb_to_server(server);
generic_shutdown_super(s);
nfs_fscache_release_super_cookie(s);
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 0cbcd2dfa732..acda8f033d30 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -12,17 +12,18 @@
#include <linux/string.h>
#include <linux/nfs_fs.h>
#include <linux/rcupdate.h>
+#include <linux/lockd/lockd.h>
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
-struct kobject *nfs_client_kobj;
-static struct kset *nfs_client_kset;
+static struct kset *nfs_kset;
-static void nfs_netns_object_release(struct kobject *kobj)
+static void nfs_kset_release(struct kobject *kobj)
{
- kfree(kobj);
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+ kfree(kset);
}
static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type(
@@ -31,46 +32,42 @@ static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type(
return &net_ns_type_operations;
}
-static struct kobj_type nfs_netns_object_type = {
- .release = nfs_netns_object_release,
+static struct kobj_type nfs_kset_type = {
+ .release = nfs_kset_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = nfs_netns_object_child_ns_type,
};
-static struct kobject *nfs_netns_object_alloc(const char *name,
- struct kset *kset, struct kobject *parent)
+int nfs_sysfs_init(void)
{
- struct kobject *kobj;
+ int ret;
+
+ nfs_kset = kzalloc(sizeof(*nfs_kset), GFP_KERNEL);
+ if (!nfs_kset)
+ return -ENOMEM;
- kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
- if (kobj) {
- kobj->kset = kset;
- if (kobject_init_and_add(kobj, &nfs_netns_object_type,
- parent, "%s", name) == 0)
- return kobj;
- kobject_put(kobj);
+ ret = kobject_set_name(&nfs_kset->kobj, "nfs");
+ if (ret) {
+ kfree(nfs_kset);
+ return ret;
}
- return NULL;
-}
-int nfs_sysfs_init(void)
-{
- nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
- if (!nfs_client_kset)
- return -ENOMEM;
- nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
- if (!nfs_client_kobj) {
- kset_unregister(nfs_client_kset);
- nfs_client_kset = NULL;
- return -ENOMEM;
+ nfs_kset->kobj.parent = fs_kobj;
+ nfs_kset->kobj.ktype = &nfs_kset_type;
+ nfs_kset->kobj.kset = NULL;
+
+ ret = kset_register(nfs_kset);
+ if (ret) {
+ kfree(nfs_kset);
+ return ret;
}
+
return 0;
}
void nfs_sysfs_exit(void)
{
- kobject_put(nfs_client_kobj);
- kset_unregister(nfs_client_kset);
+ kset_unregister(nfs_kset);
}
static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
@@ -127,7 +124,6 @@ static void nfs_netns_client_release(struct kobject *kobj)
kobject);
kfree(rcu_dereference_raw(c->identifier));
- kfree(c);
}
static const void *nfs_netns_client_namespace(const struct kobject *kobj)
@@ -151,6 +147,25 @@ static struct kobj_type nfs_netns_client_type = {
.namespace = nfs_netns_client_namespace,
};
+static void nfs_netns_object_release(struct kobject *kobj)
+{
+ struct nfs_netns_client *c = container_of(kobj,
+ struct nfs_netns_client,
+ nfs_net_kobj);
+ kfree(c);
+}
+
+static const void *nfs_netns_namespace(const struct kobject *kobj)
+{
+ return container_of(kobj, struct nfs_netns_client, nfs_net_kobj)->net;
+}
+
+static struct kobj_type nfs_netns_object_type = {
+ .release = nfs_netns_object_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = nfs_netns_namespace,
+};
+
static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
struct net *net)
{
@@ -159,10 +174,19 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
p->net = net;
- p->kobject.kset = nfs_client_kset;
+ p->kobject.kset = nfs_kset;
+ p->nfs_net_kobj.kset = nfs_kset;
+
+ if (kobject_init_and_add(&p->nfs_net_kobj, &nfs_netns_object_type,
+ parent, "net") != 0) {
+ kobject_put(&p->nfs_net_kobj);
+ return NULL;
+ }
+
if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
- parent, "nfs_client") == 0)
+ &p->nfs_net_kobj, "nfs_client") == 0)
return p;
+
kobject_put(&p->kobject);
}
return NULL;
@@ -172,7 +196,7 @@ void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net)
{
struct nfs_netns_client *clp;
- clp = nfs_netns_client_alloc(nfs_client_kobj, net);
+ clp = nfs_netns_client_alloc(&nfs_kset->kobj, net);
if (clp) {
netns->nfs_client = clp;
kobject_uevent(&clp->kobject, KOBJ_ADD);
@@ -187,6 +211,149 @@ void nfs_netns_sysfs_destroy(struct nfs_net *netns)
kobject_uevent(&clp->kobject, KOBJ_REMOVE);
kobject_del(&clp->kobject);
kobject_put(&clp->kobject);
+ kobject_del(&clp->nfs_net_kobj);
+ kobject_put(&clp->nfs_net_kobj);
netns->nfs_client = NULL;
}
}
+
+static bool shutdown_match_client(const struct rpc_task *task, const void *data)
+{
+ return true;
+}
+
+static void shutdown_client(struct rpc_clnt *clnt)
+{
+ clnt->cl_shutdown = 1;
+ rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
+}
+
+static ssize_t
+shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ bool shutdown = server->flags & NFS_MOUNT_SHUTDOWN;
+ return sysfs_emit(buf, "%d\n", shutdown);
+}
+
+static ssize_t
+shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nfs_server *server;
+ int ret, val;
+
+ server = container_of(kobj, struct nfs_server, kobj);
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ /* already shut down? */
+ if (server->flags & NFS_MOUNT_SHUTDOWN)
+ goto out;
+
+ server->flags |= NFS_MOUNT_SHUTDOWN;
+ shutdown_client(server->client);
+ shutdown_client(server->nfs_client->cl_rpcclient);
+
+ if (!IS_ERR(server->client_acl))
+ shutdown_client(server->client_acl);
+
+ if (server->nlm_host)
+ shutdown_client(server->nlm_host->h_rpcclnt);
+out:
+ return count;
+}
+
+static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+
+#define RPC_CLIENT_NAME_SIZE 64
+
+void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ struct rpc_clnt *clnt, const char *uniq)
+{
+ char name[RPC_CLIENT_NAME_SIZE];
+ int ret;
+
+ strcpy(name, clnt->cl_program->name);
+ strcat(name, uniq ? uniq : "");
+ strcat(name, "_client");
+
+ ret = sysfs_create_link_nowarn(&server->kobj,
+ &clnt->cl_sysfs->kobject, name);
+ if (ret < 0)
+ pr_warn("NFS: can't create link to %s in sysfs (%d)\n",
+ name, ret);
+}
+EXPORT_SYMBOL_GPL(nfs_sysfs_link_rpc_client);
+
+static void nfs_sysfs_sb_release(struct kobject *kobj)
+{
+ /* no-op: why? see lib/kobject.c kobject_cleanup() */
+}
+
+static const void *nfs_netns_server_namespace(const struct kobject *kobj)
+{
+ return container_of(kobj, struct nfs_server, kobj)->nfs_client->cl_net;
+}
+
+static struct kobj_type nfs_sb_ktype = {
+ .release = nfs_sysfs_sb_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = nfs_netns_server_namespace,
+ .child_ns_type = nfs_netns_object_child_ns_type,
+};
+
+void nfs_sysfs_add_server(struct nfs_server *server)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&server->kobj, &nfs_sb_ktype,
+ &nfs_kset->kobj, "server-%d", server->s_sysfs_id);
+ if (ret < 0) {
+ pr_warn("NFS: nfs sysfs add server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+ return;
+ }
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_shutdown.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
+
+void nfs_sysfs_move_server_to_sb(struct super_block *s)
+{
+ struct nfs_server *server = s->s_fs_info;
+ int ret;
+
+ ret = kobject_rename(&server->kobj, s->s_id);
+ if (ret < 0)
+ pr_warn("NFS: rename sysfs %s failed (%d)\n",
+ server->kobj.name, ret);
+}
+
+void nfs_sysfs_move_sb_to_server(struct nfs_server *server)
+{
+ const char *s;
+ int ret = -ENOMEM;
+
+ s = kasprintf(GFP_KERNEL, "server-%d", server->s_sysfs_id);
+ if (s)
+ ret = kobject_rename(&server->kobj, s);
+ if (ret < 0)
+ pr_warn("NFS: rename sysfs %s failed (%d)\n",
+ server->kobj.name, ret);
+}
+
+/* unlink, not dec-ref */
+void nfs_sysfs_remove_server(struct nfs_server *server)
+{
+ kobject_del(&server->kobj);
+}
diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h
index 5501ef573c32..c5d1990cade5 100644
--- a/fs/nfs/sysfs.h
+++ b/fs/nfs/sysfs.h
@@ -10,11 +10,12 @@
struct nfs_netns_client {
struct kobject kobject;
+ struct kobject nfs_net_kobj;
struct net *net;
const char __rcu *identifier;
};
-extern struct kobject *nfs_client_kobj;
+extern struct kobject *nfs_net_kobj;
extern int nfs_sysfs_init(void);
extern void nfs_sysfs_exit(void);
@@ -22,4 +23,11 @@ extern void nfs_sysfs_exit(void);
void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net);
void nfs_netns_sysfs_destroy(struct nfs_net *netns);
+void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ struct rpc_clnt *clnt, const char *sysfs_prefix);
+void nfs_sysfs_add_server(struct nfs_server *s);
+void nfs_sysfs_move_server_to_sb(struct super_block *s);
+void nfs_sysfs_move_sb_to_server(struct nfs_server *s);
+void nfs_sysfs_remove_server(struct nfs_server *s);
+
#endif
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 95d7d8790bc3..f69c451018e3 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1623,6 +1623,20 @@ static int fanotify_events_supported(struct fsnotify_group *group,
return -EINVAL;
/*
+ * mount and sb marks are not allowed on kernel internal pseudo fs,
+ * like pipe_mnt, because that would subscribe to events on all the
+ * anonynous pipes in the system.
+ *
+ * SB_NOUSER covers all of the internal pseudo fs whose objects are not
+ * exposed to user's mount namespace, but there are other SB_KERNMOUNT
+ * fs, like nsfs, debugfs, for which the value of allowing sb and mount
+ * mark is questionable. For now we leave them alone.
+ */
+ if (mark_type != FAN_MARK_INODE &&
+ path->mnt->mnt_sb->s_flags & SB_NOUSER)
+ return -EINVAL;
+
+ /*
* We shouldn't have allowed setting dirent events and the directory
* flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode,
* but because we always allowed it, error only when using new APIs.
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 0b8bc66377db..a9d82bbb4729 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -573,7 +573,7 @@ add_alloc_in_same_attr_seg:
sbi, run, vcn, lcn, to_allocate, &pre_alloc,
is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
is_mft ? 0 :
- (sbi->record_size -
+ (sbi->record_size -
le32_to_cpu(rec->used) + 8) /
3 +
1,
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index c0c6bcbc8c05..42631b31adf1 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (!attr->non_res) {
lsize = le32_to_cpu(attr->res.data_size);
- le = kmalloc(al_aligned(lsize), GFP_NOFS);
+ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
if (!le) {
err = -ENOMEM;
goto out;
@@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (err < 0)
goto out;
- le = kmalloc(al_aligned(lsize), GFP_NOFS);
+ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
if (!le) {
err = -ENOMEM;
goto out;
@@ -375,8 +375,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
* al_delete_le - Delete first le from the list which matches its parameters.
*/
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
- const __le16 *name, size_t name_len,
- const struct MFT_REF *ref)
+ const __le16 *name, u8 name_len, const struct MFT_REF *ref)
{
u16 size;
struct ATTR_LIST_ENTRY *le;
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 9a6c6a09d70c..107e808e06ea 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -287,8 +287,8 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
/* Check bits before 'bit'. */
ib = wnd->zone_bit == wnd->zone_end ||
bit < wnd->zone_end ?
- 0 :
- wnd->zone_end;
+ 0 :
+ wnd->zone_end;
while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
bit -= 1;
@@ -298,8 +298,8 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
/* Check bits after 'end_in'. */
ib = wnd->zone_bit == wnd->zone_end ||
end_in > wnd->zone_bit ?
- wnd->nbits :
- wnd->zone_bit;
+ wnd->nbits :
+ wnd->zone_bit;
while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
end_in += 1;
@@ -418,7 +418,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
n3 = rb_first(&wnd->count_tree);
wnd->extent_max =
n3 ? rb_entry(n3, struct e_node, count.node)->count.key :
- 0;
+ 0;
return;
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 9be3e8edf4f3..1d6c824246c4 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -179,7 +179,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
{
int err = 0;
struct address_space *mapping = inode->i_mapping;
- u32 blocksize = 1 << inode->i_blkbits;
+ u32 blocksize = i_blocksize(inode);
pgoff_t idx = vbo >> PAGE_SHIFT;
u32 from = vbo & (PAGE_SIZE - 1);
pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -192,7 +192,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
for (; idx < idx_end; idx += 1, from = 0) {
page_off = (loff_t)idx << PAGE_SHIFT;
to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
- PAGE_SIZE;
+ PAGE_SIZE;
iblock = page_off >> inode->i_blkbits;
page = find_or_create_page(mapping, idx,
@@ -1078,7 +1078,7 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
- __generic_file_write_iter(iocb, from);
+ __generic_file_write_iter(iocb, from);
out:
inode_unlock(inode);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 2bfcf1a989c9..16bd9faa2d28 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -77,7 +77,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
- NULL;
+ NULL;
}
/*
@@ -92,7 +92,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
- NULL;
+ NULL;
}
/*
@@ -236,6 +236,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return attr;
out:
+ ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
return NULL;
}
@@ -384,7 +385,7 @@ bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
* ni_remove_attr - Remove all attributes for the given type/name/id.
*/
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, size_t name_len, bool base_only,
+ const __le16 *name, u8 name_len, bool base_only,
const __le16 *id)
{
int err;
@@ -517,6 +518,9 @@ out:
*/
static int ni_repack(struct ntfs_inode *ni)
{
+#if 1
+ return 0;
+#else
int err = 0;
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct mft_inode *mi, *mi_p = NULL;
@@ -639,6 +643,7 @@ static int ni_repack(struct ntfs_inode *ni)
run_close(&run);
return err;
+#endif
}
/*
@@ -813,10 +818,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
* Looks like one record_size is always enough.
*/
le = kmalloc(al_aligned(rs), GFP_NOFS);
- if (!le) {
- err = -ENOMEM;
- goto out;
- }
+ if (!le)
+ return -ENOMEM;
mi_get_ref(&ni->mi, &le->ref);
ni->attr_list.le = le;
@@ -865,15 +868,16 @@ int ni_create_attr_list(struct ntfs_inode *ni)
if (to_free > free_b) {
err = -EINVAL;
- goto out1;
+ goto out;
}
}
/* Allocate child MFT. */
err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
if (err)
- goto out1;
+ goto out;
+ err = -EINVAL;
/* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */
while (to_free > 0) {
struct ATTRIB *b = arr_move[--nb];
@@ -882,7 +886,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
b->name_len, asize, name_off);
- WARN_ON(!attr);
+ if (!attr)
+ goto out;
mi_get_ref(mi, &le_b[nb]->ref);
le_b[nb]->id = attr->id;
@@ -892,17 +897,20 @@ int ni_create_attr_list(struct ntfs_inode *ni)
attr->id = le_b[nb]->id;
/* Remove from primary record. */
- WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
+ if (!mi_remove_attr(NULL, &ni->mi, b))
+ goto out;
if (to_free <= asize)
break;
to_free -= asize;
- WARN_ON(!nb);
+ if (!nb)
+ goto out;
}
attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
- WARN_ON(!attr);
+ if (!attr)
+ goto out;
attr->non_res = 0;
attr->flags = 0;
@@ -916,14 +924,12 @@ int ni_create_attr_list(struct ntfs_inode *ni)
ni->attr_list.dirty = false;
mark_inode_dirty(&ni->vfs_inode);
- goto out;
+ return 0;
-out1:
+out:
kfree(ni->attr_list.le);
ni->attr_list.le = NULL;
ni->attr_list.size = 0;
-
-out:
return err;
}
@@ -1638,14 +1644,13 @@ int ni_delete_all(struct ntfs_inode *ni)
* Return: File name attribute by its value.
*/
struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
- const struct cpu_str *uni,
+ const struct le_str *uni,
const struct MFT_REF *home_dir,
struct mft_inode **mi,
struct ATTR_LIST_ENTRY **le)
{
struct ATTRIB *attr = NULL;
struct ATTR_FILE_NAME *fname;
- struct le_str *fns;
if (le)
*le = NULL;
@@ -1669,10 +1674,9 @@ next:
if (uni->len != fname->name_len)
goto next;
- fns = (struct le_str *)&fname->name_len;
- if (ntfs_cmp_names_cpu(uni, fns, NULL, false))
+ if (ntfs_cmp_names(uni->name, uni->len, fname->name, uni->len, NULL,
+ false))
goto next;
-
return fname;
}
@@ -1757,8 +1761,8 @@ int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
/* Resize nonresident empty attribute in-place only. */
new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
- (SIZEOF_NONRESIDENT_EX + 8) :
- (SIZEOF_NONRESIDENT + 8);
+ (SIZEOF_NONRESIDENT_EX + 8) :
+ (SIZEOF_NONRESIDENT + 8);
if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
return -EOPNOTSUPP;
@@ -2910,7 +2914,7 @@ int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
/* Find name in record. */
mi_get_ref(&dir_ni->mi, &de_name->home);
- fname = ni_fname_name(ni, (struct cpu_str *)&de_name->name_len,
+ fname = ni_fname_name(ni, (struct le_str *)&de_name->name_len,
&de_name->home, &mi, &le);
if (!fname)
return -ENOENT;
@@ -3160,8 +3164,8 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
__le64 valid_le;
dup->alloc_size = is_attr_ext(attr) ?
- attr->nres.total_size :
- attr->nres.alloc_size;
+ attr->nres.total_size :
+ attr->nres.alloc_size;
dup->data_size = attr->nres.data_size;
if (new_valid > data_size)
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 57762c5fe68b..12f28cdf5c83 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -828,8 +828,8 @@ static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
memcpy(rt + 1, tbl + 1, esize * used);
rt->free_goal = free_goal == ~0u ?
- cpu_to_le32(~0u) :
- cpu_to_le32(sizeof(struct RESTART_TABLE) +
+ cpu_to_le32(~0u) :
+ cpu_to_le32(sizeof(struct RESTART_TABLE) +
free_goal * esize);
if (tbl->first_free) {
@@ -1090,8 +1090,8 @@ static inline u64 base_lsn(struct ntfs_log *log,
<< log->file_data_bits) +
((((is_log_record_end(hdr) &&
h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) ?
- le16_to_cpu(hdr->record_hdr.next_record_off) :
- log->page_size) +
+ le16_to_cpu(hdr->record_hdr.next_record_off) :
+ log->page_size) +
lsn) >>
3);
@@ -1299,8 +1299,8 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
log->clst_per_page = 1;
log->first_page = major_ver >= 2 ?
- 0x22 * page_size :
- ((sys_page_size << 1) + (page_size << 1));
+ 0x22 * page_size :
+ ((sys_page_size << 1) + (page_size << 1));
log->major_ver = major_ver;
log->minor_ver = minor_ver;
}
@@ -1513,8 +1513,8 @@ static u32 current_log_avail(struct ntfs_log *log)
* If there is no oldest lsn then start at the first page of the file.
*/
oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) ?
- log->first_page :
- (log->oldest_lsn_off & ~log->sys_page_mask);
+ log->first_page :
+ (log->oldest_lsn_off & ~log->sys_page_mask);
/*
* We will use the next log page offset to compute the next free page.
@@ -1522,9 +1522,9 @@ static u32 current_log_avail(struct ntfs_log *log)
* If we are at the first page then use the end of the file.
*/
next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) ?
- log->next_page + log->page_size :
+ log->next_page + log->page_size :
log->next_page == log->first_page ? log->l_size :
- log->next_page;
+ log->next_page;
/* If the two offsets are the same then there is no available space. */
if (oldest_off == next_free_off)
@@ -1535,8 +1535,8 @@ static u32 current_log_avail(struct ntfs_log *log)
*/
free_bytes =
oldest_off < next_free_off ?
- log->total_avail_pages - (next_free_off - oldest_off) :
- oldest_off - next_free_off;
+ log->total_avail_pages - (next_free_off - oldest_off) :
+ oldest_off - next_free_off;
free_bytes >>= log->page_bits;
return free_bytes * log->reserved;
@@ -1671,7 +1671,7 @@ next_tail:
best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
best_lsn2 = second_tail ? base_lsn(log, second_tail, second_file_off) :
- 0;
+ 0;
if (first_tail && second_tail) {
if (best_lsn1 > best_lsn2) {
@@ -1767,7 +1767,7 @@ tail_read:
page_cnt = page_pos = 1;
curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) :
- log->next_page;
+ log->next_page;
wrapped_file =
curpage_off == log->first_page &&
@@ -1826,8 +1826,8 @@ use_cur_page:
((lsn_cur >> log->file_data_bits) +
((curpage_off <
(lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) ?
- 1 :
- 0)) != expected_seq) {
+ 1 :
+ 0)) != expected_seq) {
goto check_tail;
}
@@ -2643,8 +2643,8 @@ static inline bool check_index_root(const struct ATTRIB *attr,
const struct INDEX_ROOT *root = resident_data(attr);
u8 index_bits = le32_to_cpu(root->index_block_size) >=
sbi->cluster_size ?
- sbi->cluster_bits :
- SECTOR_SHIFT;
+ sbi->cluster_bits :
+ SECTOR_SHIFT;
u8 block_clst = root->index_block_clst;
if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
@@ -3861,9 +3861,9 @@ check_restart_area:
/* If we have a valid page then grab a pointer to the restart area. */
ra2 = rst_info.valid_page ?
- Add2Ptr(rst_info.r_page,
+ Add2Ptr(rst_info.r_page,
le16_to_cpu(rst_info.r_page->ra_off)) :
- NULL;
+ NULL;
if (rst_info.chkdsk_was_run ||
(ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 28cc421102e5..33afee0f5559 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -9,6 +9,7 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
@@ -173,12 +174,12 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
fo = le16_to_cpu(rhdr->fix_off);
fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
- le16_to_cpu(rhdr->fix_num);
+ le16_to_cpu(rhdr->fix_num);
/* Check errors. */
if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
fn * SECTOR_SIZE > bytes) {
- return -EINVAL; /* Native chkntfs returns ok! */
+ return -E_NTFS_CORRUPT;
}
/* Get fixup pointer. */
@@ -1661,7 +1662,8 @@ int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
return 0;
}
-struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
+struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
+ enum RECORD_FLAG flag)
{
int err = 0;
struct super_block *sb = sbi->sb;
@@ -1673,8 +1675,7 @@ struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
ni = ntfs_i(inode);
- err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
- false);
+ err = mi_format_new(&ni->mi, sbi, rno, flag, false);
if (err)
goto out;
@@ -1937,7 +1938,7 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
break;
sii_e = (struct NTFS_DE_SII *)ne;
- if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
+ if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
continue;
next_id = le32_to_cpu(sii_e->sec_id) + 1;
@@ -1998,18 +1999,18 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
goto out;
t32 = le32_to_cpu(sii_e->sec_hdr.size);
- if (t32 < SIZEOF_SECURITY_HDR) {
+ if (t32 < sizeof(struct SECURITY_HDR)) {
err = -EINVAL;
goto out;
}
- if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
+ if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
/* Looks like too big security. 0x10000 - is arbitrary big number. */
err = -EFBIG;
goto out;
}
- *size = t32 - SIZEOF_SECURITY_HDR;
+ *size = t32 - sizeof(struct SECURITY_HDR);
p = kmalloc(*size, GFP_NOFS);
if (!p) {
@@ -2023,14 +2024,14 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
if (err)
goto out;
- if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
+ if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
err = -EINVAL;
goto out;
}
err = ntfs_read_run_nb(sbi, &ni->file.run,
le64_to_cpu(sii_e->sec_hdr.off) +
- SIZEOF_SECURITY_HDR,
+ sizeof(struct SECURITY_HDR),
p, *size, NULL);
if (err)
goto out;
@@ -2069,7 +2070,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
struct NTFS_DE_SDH sdh_e;
struct NTFS_DE_SII sii_e;
struct SECURITY_HDR *d_security;
- u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
+ u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
u32 aligned_sec_size = ALIGN(new_sec_size, 16);
struct SECURITY_KEY hash_key;
struct ntfs_fnd *fnd_sdh = NULL;
@@ -2207,14 +2208,14 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Fill SII entry. */
sii_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
- sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
+ sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
sii_e.de.view.res = 0;
- sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
+ sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
sii_e.de.flags = 0;
sii_e.de.res = 0;
sii_e.sec_id = d_security->key.sec_id;
- memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
+ memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
if (err)
@@ -2223,7 +2224,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Fill SDH entry. */
sdh_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
- sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
+ sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
sdh_e.de.view.res = 0;
sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
@@ -2231,7 +2232,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
sdh_e.de.res = 0;
sdh_e.key.hash = d_security->key.hash;
sdh_e.key.sec_id = d_security->key.sec_id;
- memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
+ memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
sdh_e.magic[0] = cpu_to_le16('I');
sdh_e.magic[1] = cpu_to_le16('I');
@@ -2522,7 +2523,8 @@ out:
/*
* run_deallocate - Deallocate clusters.
*/
-int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
+int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ bool trim)
{
CLST lcn, len;
size_t idx = 0;
@@ -2578,13 +2580,13 @@ static inline bool name_has_forbidden_chars(const struct le_str *fname)
return false;
}
-static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
+static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
const struct le_str *fname)
{
int port_digit;
const __le16 *name = fname->name;
int len = fname->len;
- u16 *upcase = sbi->upcase;
+ const u16 *upcase = sbi->upcase;
/* check for 3 chars reserved names (device names) */
/* name by itself or with any extension is forbidden */
@@ -2618,3 +2620,60 @@ bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
return !name_has_forbidden_chars(fname) &&
!is_reserved_name(sbi, fname);
}
+
+/*
+ * ntfs_set_label - updates current ntfs label.
+ */
+int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
+{
+ int err;
+ struct ATTRIB *attr;
+ struct ntfs_inode *ni = sbi->volume.ni;
+ const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
+ /* Allocate PATH_MAX bytes. */
+ struct cpu_str *uni = __getname();
+
+ if (!uni)
+ return -ENOMEM;
+
+ err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
+ UTF16_LITTLE_ENDIAN);
+ if (err < 0)
+ goto out;
+
+ if (uni->len > max_ulen) {
+ ntfs_warn(sbi->sb, "new label is too long");
+ err = -EFBIG;
+ goto out;
+ }
+
+ ni_lock(ni);
+
+ /* Ignore any errors. */
+ ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
+
+ err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
+ 0, &attr, NULL, NULL);
+ if (err < 0)
+ goto unlock_out;
+
+ /* write new label in on-disk struct. */
+ memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
+
+ /* update cached value of current label. */
+ if (len >= ARRAY_SIZE(sbi->volume.label))
+ len = ARRAY_SIZE(sbi->volume.label) - 1;
+ memcpy(sbi->volume.label, label, len);
+ sbi->volume.label[len] = 0;
+ mark_inode_dirty_sync(&ni->vfs_inode);
+
+unlock_out:
+ ni_unlock(ni);
+
+ if (!err)
+ err = _ni_write_inode(&ni->vfs_inode, 0);
+
+out:
+ __putname(uni);
+ return err;
+} \ No newline at end of file
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 0a48d2d67219..124c6e822623 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -432,8 +432,8 @@ next_run:
nbits = 8 * (data_size - vbo);
ok = nbits > from ?
- (*fn)((ulong *)bh->b_data, from, nbits, ret) :
- false;
+ (*fn)((ulong *)bh->b_data, from, nbits, ret) :
+ false;
put_bh(bh);
if (ok) {
@@ -1113,6 +1113,12 @@ ok:
*node = in;
out:
+ if (err == -E_NTFS_CORRUPT) {
+ ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ err = -EINVAL;
+ }
+
if (ib != in->index)
kfree(ib);
@@ -1676,8 +1682,8 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
/* Create alloc and bitmap attributes (if not). */
err = run_is_empty(&indx->alloc_run) ?
- indx_create_allocate(indx, ni, &new_vbn) :
- indx_add_allocate(indx, ni, &new_vbn);
+ indx_create_allocate(indx, ni, &new_vbn) :
+ indx_add_allocate(indx, ni, &new_vbn);
/* Layout of record may be changed, so rescan root. */
root = indx_get_root(indx, ni, &attr, &mi);
@@ -1868,8 +1874,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
(*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
up_e + 1, le16_to_cpu(up_e->key_size),
ctx) < 0 ?
- hdr2 :
- hdr1,
+ hdr2 :
+ hdr1,
new_de, NULL, ctx);
indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
@@ -2340,7 +2346,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
re, ctx,
fnd->level - 1,
fnd) :
- indx_insert_into_root(indx, ni, re, e,
+ indx_insert_into_root(indx, ni, re, e,
ctx, fnd, 0);
kfree(re);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 6c560245eef4..dc7e7ab701c6 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -263,7 +263,7 @@ next_attr:
goto next_attr;
run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run :
- &ni->file.run;
+ &ni->file.run;
break;
case ATTR_ROOT:
@@ -291,8 +291,8 @@ next_attr:
goto out;
mode = sb->s_root ?
- (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
- (S_IFDIR | 0777);
+ (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
+ (S_IFDIR | 0777);
goto next_attr;
case ATTR_ALLOC:
@@ -450,7 +450,7 @@ end_enum:
inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
- &ntfs_aops;
+ &ntfs_aops;
if (ino != MFT_REC_MFT)
init_rwsem(&ni->file.run_lock);
} else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
@@ -787,7 +787,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ret = blockdev_direct_IO(iocb, inode, iter,
wr ? ntfs_get_block_direct_IO_W :
- ntfs_get_block_direct_IO_R);
+ ntfs_get_block_direct_IO_R);
if (ret > 0)
end = vbo + ret;
@@ -1191,11 +1191,11 @@ out:
* - ntfs_symlink
* - ntfs_mkdir
* - ntfs_atomic_open
- *
+ *
* NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked
*/
-struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
+struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry,
const struct cpu_str *uni, umode_t mode,
dev_t dev, const char *symname, u32 size,
struct ntfs_fnd *fnd)
@@ -1309,7 +1309,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
if (err)
goto out2;
- ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
+ ni = ntfs_new_inode(sbi, ino, S_ISDIR(mode) ? RECORD_FLAG_DIR : 0);
if (IS_ERR(ni)) {
err = PTR_ERR(ni);
ni = NULL;
@@ -1437,8 +1437,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
- root->ihdr.de_off =
- cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
+ root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR));
root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
sizeof(struct NTFS_DE));
root->ihdr.total = root->ihdr.used;
@@ -1605,7 +1604,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
- &ntfs_aops;
+ &ntfs_aops;
init_rwsem(&ni->file.run_lock);
} else {
inode->i_op = &ntfs_special_inode_operations;
diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
index 61e161c7c567..4aae598d6d88 100644
--- a/fs/ntfs3/lznt.c
+++ b/fs/ntfs3/lznt.c
@@ -297,7 +297,7 @@ next:
struct lznt *get_lznt_ctx(int level)
{
struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) :
- sizeof(struct lznt),
+ sizeof(struct lznt),
GFP_NOFS);
if (r)
@@ -393,8 +393,8 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
} else {
/* This chunk does not contain compressed data. */
unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end ?
- unc_end - unc_chunk :
- LZNT_CHUNK_SIZE;
+ unc_end - unc_chunk :
+ LZNT_CHUNK_SIZE;
if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
cmpr_end) {
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index 9736b1e4a0f6..70f8c859e0ad 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -109,8 +109,8 @@ static int ntfs_create(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFREG | mode,
- 0, NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFREG | mode, 0,
+ NULL, 0, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -125,8 +125,8 @@ static int ntfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, mode, rdev,
- NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, mode, rdev, NULL, 0,
+ NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -199,8 +199,8 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
u32 size = strlen(symname);
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777,
- 0, symname, size, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
+ symname, size, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -213,8 +213,8 @@ static int ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode,
- 0, NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
+ NULL, 0, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -422,19 +422,10 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
* fnd contains tree's path to insert to.
* If fnd is not NULL then dir is locked.
*/
-
- /*
- * Unfortunately I don't know how to get here correct 'struct nameidata *nd'
- * or 'struct mnt_idmap *idmap'.
- * See atomic_open in fs/namei.c.
- * This is why xfstest/633 failed.
- * Looks like ntfs_atomic_open must accept 'struct mnt_idmap *idmap' as argument.
- */
-
- inode = ntfs_create_inode(&nop_mnt_idmap, dir, dentry, uni, mode, 0,
- NULL, 0, fnd);
+ inode = ntfs_create_inode(mnt_idmap(file->f_path.mnt), dir, dentry, uni,
+ mode, 0, NULL, 0, fnd);
err = IS_ERR(inode) ? PTR_ERR(inode) :
- finish_open(file, dentry, ntfs_file_open);
+ finish_open(file, dentry, ntfs_file_open);
dput(d);
out2:
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 90151e56c122..98b76d1b09e7 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -95,11 +95,10 @@ enum RECORD_NUM {
MFT_REC_BITMAP = 6,
MFT_REC_BOOT = 7,
MFT_REC_BADCLUST = 8,
- //MFT_REC_QUOTA = 9,
- MFT_REC_SECURE = 9, // NTFS 3.0
+ MFT_REC_SECURE = 9,
MFT_REC_UPCASE = 10,
- MFT_REC_EXTEND = 11, // NTFS 3.0
- MFT_REC_RESERVED = 11,
+ MFT_REC_EXTEND = 11,
+ MFT_REC_RESERVED = 12,
MFT_REC_FREE = 16,
MFT_REC_USER = 24,
};
@@ -109,7 +108,6 @@ enum ATTR_TYPE {
ATTR_STD = cpu_to_le32(0x10),
ATTR_LIST = cpu_to_le32(0x20),
ATTR_NAME = cpu_to_le32(0x30),
- // ATTR_VOLUME_VERSION on Nt4
ATTR_ID = cpu_to_le32(0x40),
ATTR_SECURE = cpu_to_le32(0x50),
ATTR_LABEL = cpu_to_le32(0x60),
@@ -118,7 +116,6 @@ enum ATTR_TYPE {
ATTR_ROOT = cpu_to_le32(0x90),
ATTR_ALLOC = cpu_to_le32(0xA0),
ATTR_BITMAP = cpu_to_le32(0xB0),
- // ATTR_SYMLINK on Nt4
ATTR_REPARSE = cpu_to_le32(0xC0),
ATTR_EA_INFO = cpu_to_le32(0xD0),
ATTR_EA = cpu_to_le32(0xE0),
@@ -144,6 +141,7 @@ enum FILE_ATTRIBUTE {
FILE_ATTRIBUTE_ENCRYPTED = cpu_to_le32(0x00004000),
FILE_ATTRIBUTE_VALID_FLAGS = cpu_to_le32(0x00007fb7),
FILE_ATTRIBUTE_DIRECTORY = cpu_to_le32(0x10000000),
+ FILE_ATTRIBUTE_INDEX = cpu_to_le32(0x20000000)
};
static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
@@ -266,7 +264,7 @@ enum RECORD_FLAG {
RECORD_FLAG_IN_USE = cpu_to_le16(0x0001),
RECORD_FLAG_DIR = cpu_to_le16(0x0002),
RECORD_FLAG_SYSTEM = cpu_to_le16(0x0004),
- RECORD_FLAG_UNKNOWN = cpu_to_le16(0x0008),
+ RECORD_FLAG_INDEX = cpu_to_le16(0x0008),
};
/* MFT Record structure. */
@@ -290,6 +288,15 @@ struct MFT_REC {
#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
+/*
+ * define MFTRECORD_FIXUP_OFFSET as MFTRECORD_FIXUP_OFFSET_3 (0x30)
+ * to format new mft records with bigger header (as current ntfs.sys does)
+ *
+ * define MFTRECORD_FIXUP_OFFSET as MFTRECORD_FIXUP_OFFSET_1 (0x2A)
+ * to format new mft records with smaller header (as old ntfs.sys did)
+ * Both variants are valid.
+ */
+#define MFTRECORD_FIXUP_OFFSET MFTRECORD_FIXUP_OFFSET_1
static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
@@ -331,18 +338,18 @@ struct ATTR_NONRESIDENT {
__le64 svcn; // 0x10: Starting VCN of this segment.
__le64 evcn; // 0x18: End VCN of this segment.
__le16 run_off; // 0x20: Offset to packed runs.
- // Unit of Compression size for this stream, expressed
- // as a log of the cluster size.
+ // Unit of Compression size for this stream, expressed
+ // as a log of the cluster size.
//
- // 0 means file is not compressed
- // 1, 2, 3, and 4 are potentially legal values if the
- // stream is compressed, however the implementation
- // may only choose to use 4, or possibly 3. Note
- // that 4 means cluster size time 16. If convenient
- // the implementation may wish to accept a
- // reasonable range of legal values here (1-5?),
- // even if the implementation only generates
- // a smaller set of values itself.
+ // 0 means file is not compressed
+ // 1, 2, 3, and 4 are potentially legal values if the
+ // stream is compressed, however the implementation
+ // may only choose to use 4, or possibly 3.
+ // Note that 4 means cluster size time 16.
+ // If convenient the implementation may wish to accept a
+ // reasonable range of legal values here (1-5?),
+ // even if the implementation only generates
+ // a smaller set of values itself.
u8 c_unit; // 0x22:
u8 res1[5]; // 0x23:
__le64 alloc_size; // 0x28: The allocated size of attribute in bytes.
@@ -836,16 +843,22 @@ static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
/* Object ID (0x40) */
struct OBJECT_ID {
struct GUID ObjId; // 0x00: Unique Id assigned to file.
- struct GUID BirthVolumeId; // 0x10: Birth Volume Id is the Object Id of the Volume on.
- // which the Object Id was allocated. It never changes.
- struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
- // ever assigned to this MFT Record. I.e. If the Object Id
- // is changed for some reason, this field will reflect the
- // original value of the Object Id.
- struct GUID DomainId; // 0x30: Domain Id is currently unused but it is intended to be
- // used in a network environment where the local machine is
- // part of a Windows 2000 Domain. This may be used in a Windows
- // 2000 Advanced Server managed domain.
+
+ // Birth Volume Id is the Object Id of the Volume on.
+ // which the Object Id was allocated. It never changes.
+ struct GUID BirthVolumeId; //0x10:
+
+ // Birth Object Id is the first Object Id that was
+ // ever assigned to this MFT Record. I.e. If the Object Id
+ // is changed for some reason, this field will reflect the
+ // original value of the Object Id.
+ struct GUID BirthObjectId; // 0x20:
+
+ // Domain Id is currently unused but it is intended to be
+ // used in a network environment where the local machine is
+ // part of a Windows 2000 Domain. This may be used in a Windows
+ // 2000 Advanced Server managed domain.
+ struct GUID DomainId; // 0x30:
};
static_assert(sizeof(struct OBJECT_ID) == 0x40);
@@ -855,32 +868,35 @@ struct NTFS_DE_O {
struct NTFS_DE de;
struct GUID ObjId; // 0x10: Unique Id assigned to file.
struct MFT_REF ref; // 0x20: MFT record number with this file.
- struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
- // which the Object Id was allocated. It never changes.
- struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
- // ever assigned to this MFT Record. I.e. If the Object Id
- // is changed for some reason, this field will reflect the
- // original value of the Object Id.
- // This field is valid if data_size == 0x48.
- struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
- // to be used in a network environment where the local
- // machine is part of a Windows 2000 Domain. This may be
- // used in a Windows 2000 Advanced Server managed domain.
+
+ // Birth Volume Id is the Object Id of the Volume on
+ // which the Object Id was allocated. It never changes.
+ struct GUID BirthVolumeId; // 0x28:
+
+ // Birth Object Id is the first Object Id that was
+ // ever assigned to this MFT Record. I.e. If the Object Id
+ // is changed for some reason, this field will reflect the
+ // original value of the Object Id.
+ // This field is valid if data_size == 0x48.
+ struct GUID BirthObjectId; // 0x38:
+
+ // Domain Id is currently unused but it is intended
+ // to be used in a network environment where the local
+ // machine is part of a Windows 2000 Domain. This may be
+ // used in a Windows 2000 Advanced Server managed domain.
+ struct GUID BirthDomainId; // 0x48:
};
static_assert(sizeof(struct NTFS_DE_O) == 0x58);
-#define NTFS_OBJECT_ENTRY_DATA_SIZE1 \
- 0x38 // struct NTFS_DE_O.BirthDomainId is not used
-#define NTFS_OBJECT_ENTRY_DATA_SIZE2 \
- 0x48 // struct NTFS_DE_O.BirthDomainId is used
-
/* Q Directory entry structure ( rule = 0x11 ) */
struct NTFS_DE_Q {
struct NTFS_DE de;
__le32 owner_id; // 0x10: Unique Id assigned to file
+
+ /* here is 0x30 bytes of user quota. NOTE: 4 byte aligned! */
__le32 Version; // 0x14: 0x02
- __le32 flags2; // 0x18: Quota flags, see above
+ __le32 Flags; // 0x18: Quota flags, see above
__le64 BytesUsed; // 0x1C:
__le64 ChangeTime; // 0x24:
__le64 WarningLimit; // 0x28:
@@ -888,9 +904,9 @@ struct NTFS_DE_Q {
__le64 ExceededTime; // 0x3C:
// SID is placed here
-}; // sizeof() = 0x44
+}__packed; // sizeof() = 0x44
-#define SIZEOF_NTFS_DE_Q 0x44
+static_assert(sizeof(struct NTFS_DE_Q) == 0x44);
#define SecurityDescriptorsBlockSize 0x40000 // 256K
#define SecurityDescriptorMaxSize 0x20000 // 128K
@@ -912,7 +928,7 @@ struct SECURITY_HDR {
*/
} __packed;
-#define SIZEOF_SECURITY_HDR 0x14
+static_assert(sizeof(struct SECURITY_HDR) == 0x14);
/* SII Directory entry structure */
struct NTFS_DE_SII {
@@ -921,7 +937,8 @@ struct NTFS_DE_SII {
struct SECURITY_HDR sec_hdr; // 0x14:
} __packed;
-#define SIZEOF_SII_DIRENTRY 0x28
+static_assert(offsetof(struct NTFS_DE_SII, sec_hdr) == 0x14);
+static_assert(sizeof(struct NTFS_DE_SII) == 0x28);
/* SDH Directory entry structure */
struct NTFS_DE_SDH {
@@ -1155,7 +1172,7 @@ struct REPARSE_DATA_BUFFER {
#define FILE_NEED_EA 0x80 // See ntifs.h
/*
- *FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
+ * FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
* interpreted without understanding the associated extended attributes.
*/
struct EA_INFO {
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index eb01f7e76479..629403ede6e5 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -53,6 +53,8 @@ enum utf16_endian;
#define E_NTFS_NONRESIDENT 556
/* NTFS specific error code about punch hole. */
#define E_NTFS_NOTALIGNED 557
+/* NTFS specific error code when on-disk struct is corrupted. */
+#define E_NTFS_CORRUPT 558
/* sbi->flags */
@@ -274,7 +276,7 @@ struct ntfs_sb_info {
__le16 flags; // Cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY.
u8 major_ver;
u8 minor_ver;
- char label[65];
+ char label[256];
bool real_dirty; // Real fs state.
} volume;
@@ -284,7 +286,6 @@ struct ntfs_sb_info {
struct ntfs_inode *ni;
u32 next_id;
u64 next_off;
-
__le32 def_security_id;
} security;
@@ -312,6 +313,7 @@ struct ntfs_sb_info {
struct ntfs_mount_options *options;
struct ratelimit_state msg_ratelimit;
+ struct proc_dir_entry *procdir;
};
/* One MFT record(usually 1024 bytes), consists of attributes. */
@@ -465,8 +467,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
struct ATTR_LIST_ENTRY **new_le);
bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
- const __le16 *name, size_t name_len,
- const struct MFT_REF *ref);
+ const __le16 *name, u8 name_len, const struct MFT_REF *ref);
int al_update(struct ntfs_inode *ni, int sync);
static inline size_t al_aligned(size_t size)
{
@@ -525,7 +526,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, size_t name_len, bool base_only,
+ const __le16 *name, u8 name_len, bool base_only,
const __le16 *id);
int ni_create_attr_list(struct ntfs_inode *ni);
int ni_expand_list(struct ntfs_inode *ni);
@@ -542,7 +543,7 @@ void ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
struct mft_inode *mi, struct ATTR_LIST_ENTRY *le);
int ni_delete_all(struct ntfs_inode *ni);
struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
- const struct cpu_str *uni,
+ const struct le_str *uni,
const struct MFT_REF *home,
struct mft_inode **mi,
struct ATTR_LIST_ENTRY **entry);
@@ -629,7 +630,7 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes);
struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
- bool dir);
+ enum RECORD_FLAG flag);
extern const u8 s_default_security[0x50];
bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
int ntfs_security_init(struct ntfs_sb_info *sbi);
@@ -647,8 +648,10 @@ int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
const struct MFT_REF *ref);
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
-int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
+int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ bool trim);
bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *name);
+int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len);
/* Globals from index.c */
int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
@@ -706,8 +709,8 @@ int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2);
int inode_write_data(struct inode *inode, const void *data, size_t bytes);
-struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
+struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry,
const struct cpu_str *uni, umode_t mode,
dev_t dev, const char *symname, u32 size,
struct ntfs_fnd *fnd);
@@ -736,7 +739,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
// TODO: id?
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
enum ATTR_TYPE type, const __le16 *name,
- size_t name_len, const __le16 *id);
+ u8 name_len, const __le16 *id);
static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
struct ATTR_LIST_ENTRY *le)
{
@@ -856,12 +859,12 @@ unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase,
/* globals from xattr.c */
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap,
- struct dentry *dentry, int type);
+struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ int type);
int ntfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
- struct inode *dir);
+ struct inode *dir);
#else
#define ntfs_get_acl NULL
#define ntfs_set_acl NULL
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 2a281cead2bc..c12ebffc94da 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
struct rw_semaphore *rw_lock = NULL;
if (is_mounted(sbi)) {
- if (!is_mft) {
+ if (!is_mft && mft_ni) {
rw_lock = &mft_ni->file.run_lock;
down_read(rw_lock);
}
@@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
ni_lock(mft_ni);
down_write(rw_lock);
}
- err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
+ err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
vbo >> sbi->cluster_bits);
if (rw_lock) {
up_write(rw_lock);
@@ -180,6 +180,12 @@ ok:
return 0;
out:
+ if (err == -E_NTFS_CORRUPT) {
+ ntfs_err(sbi->sb, "mft corrupted");
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ err = -EINVAL;
+ }
+
return err;
}
@@ -296,7 +302,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
*/
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
enum ATTR_TYPE type, const __le16 *name,
- size_t name_len, const __le16 *id)
+ u8 name_len, const __le16 *id)
{
u32 type_in = le32_to_cpu(type);
u32 atype;
@@ -382,6 +388,8 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
rec->seq = cpu_to_le16(seq);
rec->flags = RECORD_FLAG_IN_USE | flags;
+ if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
+ rec->mft_record = cpu_to_le32(rno);
mi->dirty = true;
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 47612d16c027..cb8cf0161177 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -434,8 +434,8 @@ requires_new_range:
if (should_add_tail) {
tail_lcn = r->lcn == SPARSE_LCN ?
- SPARSE_LCN :
- (r->lcn + Tovcn);
+ SPARSE_LCN :
+ (r->lcn + Tovcn);
tail_vcn = r->vcn + Tovcn;
tail_len = r->len - Tovcn;
}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5158dd31fd97..1a02072b6b0e 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -57,6 +57,7 @@
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/nls.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/statfs.h>
@@ -116,8 +117,8 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
/* Use static allocated buffer, if possible. */
name = atomic_dec_and_test(&s_name_buf_cnt) ?
- s_name_buf :
- kmalloc(sizeof(s_name_buf), GFP_NOFS);
+ s_name_buf :
+ kmalloc(sizeof(s_name_buf), GFP_NOFS);
if (name) {
struct dentry *de = d_find_alias(inode);
@@ -257,6 +258,7 @@ enum Opt {
Opt_err,
};
+// clang-format off
static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_u32("uid", Opt_uid),
fsparam_u32("gid", Opt_gid),
@@ -277,9 +279,13 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag_no("nocase", Opt_nocase),
{}
};
+// clang-format on
/*
* Load nls table or if @nls is utf8 then return NULL.
+ *
+ * It is good idea to use here "const char *nls".
+ * But load_nls accepts "char*".
*/
static struct nls_table *ntfs_load_nls(char *nls)
{
@@ -436,6 +442,103 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *proc_info_root;
+
+/*
+ * ntfs3_volinfo:
+ *
+ * The content of /proc/fs/ntfs3/<dev>/volinfo
+ *
+ * ntfs3.1
+ * cluster size
+ * number of clusters
+*/
+static int ntfs3_volinfo(struct seq_file *m, void *o)
+{
+ struct super_block *sb = m->private;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
+ sbi->volume.minor_ver, sbi->cluster_size,
+ sbi->used.bitmap.nbits);
+
+ return 0;
+}
+
+static int ntfs3_volinfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ntfs3_volinfo, pde_data(inode));
+}
+
+/* read /proc/fs/ntfs3/<dev>/label */
+static int ntfs3_label_show(struct seq_file *m, void *o)
+{
+ struct super_block *sb = m->private;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ seq_printf(m, "%s\n", sbi->volume.label);
+
+ return 0;
+}
+
+/* write /proc/fs/ntfs3/<dev>/label */
+static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ struct super_block *sb = pde_data(file_inode(file));
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ ssize_t ret = count;
+ u8 *label = kmalloc(count, GFP_NOFS);
+
+ if (!label)
+ return -ENOMEM;
+
+ if (copy_from_user(label, buffer, ret)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ while (ret > 0 && label[ret - 1] == '\n')
+ ret -= 1;
+
+ err = ntfs_set_label(sbi, label, ret);
+
+ if (err < 0) {
+ ntfs_err(sb, "failed (%d) to write label", err);
+ ret = err;
+ goto out;
+ }
+
+ *ppos += count;
+ ret = count;
+out:
+ kfree(label);
+ return ret;
+}
+
+static int ntfs3_label_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ntfs3_label_show, pde_data(inode));
+}
+
+static const struct proc_ops ntfs3_volinfo_fops = {
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_open = ntfs3_volinfo_open,
+};
+
+static const struct proc_ops ntfs3_label_fops = {
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_open = ntfs3_label_open,
+ .proc_write = ntfs3_label_write,
+};
+
+#endif
+
static struct kmem_cache *ntfs_inode_cachep;
static struct inode *ntfs_alloc_inode(struct super_block *sb)
@@ -510,6 +613,16 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
+#ifdef CONFIG_PROC_FS
+ // Remove /proc/fs/ntfs3/..
+ if (sbi->procdir) {
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+ }
+#endif
+
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
@@ -711,9 +824,16 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
/*
* ntfs_init_from_boot - Init internal info from on-disk boot sector.
+ *
+ * NTFS mount begins from boot - special formatted 512 bytes.
+ * There are two boots: the first and the last 512 bytes of volume.
+ * The content of boot is not changed during ntfs life.
+ *
+ * NOTE: ntfs.sys checks only first (primary) boot.
+ * chkdsk checks both boots.
*/
static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
- u64 dev_size)
+ u64 dev_size, struct NTFS_BOOT **boot2)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
int err;
@@ -724,6 +844,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct MFT_REC *rec;
u16 fn, ao;
u8 cluster_bits;
+ u32 boot_off = 0;
+ const char *hint = "Primary boot";
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
@@ -731,11 +853,12 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (!bh)
return -EIO;
+check_boot:
err = -EINVAL;
- boot = (struct NTFS_BOOT *)bh->b_data;
+ boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) {
- ntfs_err(sb, "Boot's signature is not NTFS.");
+ ntfs_err(sb, "%s signature is not NTFS.", hint);
goto out;
}
@@ -748,14 +871,16 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
boot->bytes_per_sector[0];
if (boot_sector_size < SECTOR_SIZE ||
!is_power_of_2(boot_sector_size)) {
- ntfs_err(sb, "Invalid bytes per sector %u.", boot_sector_size);
+ ntfs_err(sb, "%s: invalid bytes per sector %u.", hint,
+ boot_sector_size);
goto out;
}
/* cluster size: 512, 1K, 2K, 4K, ... 2M */
sct_per_clst = true_sectors_per_clst(boot);
if ((int)sct_per_clst < 0 || !is_power_of_2(sct_per_clst)) {
- ntfs_err(sb, "Invalid sectors per cluster %u.", sct_per_clst);
+ ntfs_err(sb, "%s: invalid sectors per cluster %u.", hint,
+ sct_per_clst);
goto out;
}
@@ -771,20 +896,20 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (mlcn * sct_per_clst >= sectors || mlcn2 * sct_per_clst >= sectors) {
ntfs_err(
sb,
- "Start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
- mlcn, mlcn2, sectors);
+ "%s: start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
+ hint, mlcn, mlcn2, sectors);
goto out;
}
sbi->record_size = record_size =
boot->record_size < 0 ? 1 << (-boot->record_size) :
- (u32)boot->record_size << cluster_bits;
+ (u32)boot->record_size << cluster_bits;
sbi->record_bits = blksize_bits(record_size);
sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
/* Check MFT record size. */
if (record_size < SECTOR_SIZE || !is_power_of_2(record_size)) {
- ntfs_err(sb, "Invalid bytes per MFT record %u (%d).",
+ ntfs_err(sb, "%s: invalid bytes per MFT record %u (%d).", hint,
record_size, boot->record_size);
goto out;
}
@@ -796,18 +921,18 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
}
sbi->index_size = boot->index_size < 0 ?
- 1u << (-boot->index_size) :
- (u32)boot->index_size << cluster_bits;
+ 1u << (-boot->index_size) :
+ (u32)boot->index_size << cluster_bits;
/* Check index record size. */
if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
- ntfs_err(sb, "Invalid bytes per index %u(%d).", sbi->index_size,
- boot->index_size);
+ ntfs_err(sb, "%s: invalid bytes per index %u(%d).", hint,
+ sbi->index_size, boot->index_size);
goto out;
}
if (sbi->index_size > MAXIMUM_BYTES_PER_INDEX) {
- ntfs_err(sb, "Unsupported bytes per index %u.",
+ ntfs_err(sb, "%s: unsupported bytes per index %u.", hint,
sbi->index_size);
goto out;
}
@@ -834,7 +959,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* Compare boot's cluster and sector. */
if (sbi->cluster_size < boot_sector_size) {
- ntfs_err(sb, "Invalid bytes per cluster (%u).",
+ ntfs_err(sb, "%s: invalid bytes per cluster (%u).", hint,
sbi->cluster_size);
goto out;
}
@@ -850,7 +975,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
}
sbi->max_bytes_per_attr =
- record_size - ALIGN(MFTRECORD_FIXUP_OFFSET_1, 8) -
+ record_size - ALIGN(MFTRECORD_FIXUP_OFFSET, 8) -
ALIGN(((record_size >> SECTOR_SHIFT) * sizeof(short)), 8) -
ALIGN(sizeof(enum ATTR_TYPE), 8);
@@ -892,10 +1017,10 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->new_rec = rec;
rec->rhdr.sign = NTFS_FILE_SIGNATURE;
- rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
+ rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET);
fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
rec->rhdr.fix_num = cpu_to_le16(fn);
- ao = ALIGN(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn, 8);
+ ao = ALIGN(MFTRECORD_FIXUP_OFFSET + sizeof(short) * fn, 8);
rec->attr_off = cpu_to_le16(ao);
rec->used = cpu_to_le32(ao + ALIGN(sizeof(enum ATTR_TYPE), 8));
rec->total = cpu_to_le32(sbi->record_size);
@@ -930,7 +1055,34 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
err = 0;
+ if (bh->b_blocknr && !sb_rdonly(sb)) {
+ /*
+ * Alternative boot is ok but primary is not ok.
+ * Do not update primary boot here 'cause it may be faked boot.
+ * Let ntfs to be mounted and update boot later.
+ */
+ *boot2 = kmemdup(boot, sizeof(*boot), GFP_NOFS | __GFP_NOWARN);
+ }
+
out:
+ if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
+ u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
+ u64 lbo = dev_size - sizeof(*boot);
+
+ /*
+ * Try alternative boot (last sector)
+ */
+ brelse(bh);
+
+ sb_set_blocksize(sb, block_size);
+ bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
+ if (!bh)
+ return -EINVAL;
+
+ boot_off = lbo & (block_size - 1);
+ hint = "Alternative boot";
+ goto check_boot;
+ }
brelse(bh);
return err;
@@ -955,6 +1107,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct ATTR_DEF_ENTRY *t;
u16 *shared;
struct MFT_REF ref;
+ bool ro = sb_rdonly(sb);
+ struct NTFS_BOOT *boot2 = NULL;
ref.high = 0;
@@ -985,7 +1139,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
/* Parse boot. */
err = ntfs_init_from_boot(sb, bdev_logical_block_size(bdev),
- bdev_nr_bytes(bdev));
+ bdev_nr_bytes(bdev), &boot2);
if (err)
goto out;
@@ -1035,6 +1189,10 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->volume.minor_ver = info->minor_ver;
sbi->volume.flags = info->flags;
sbi->volume.ni = ni;
+ if (info->flags & VOLUME_FLAG_DIRTY) {
+ sbi->volume.real_dirty = true;
+ ntfs_info(sb, "It is recommened to use chkdsk.");
+ }
/* Load $MFTMirr to estimate recs_mirr. */
ref.low = cpu_to_le32(MFT_REC_MIRR);
@@ -1069,21 +1227,16 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
iput(inode);
- if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
- if (!sb_rdonly(sb)) {
- ntfs_warn(sb,
- "failed to replay log file. Can't mount rw!");
- err = -EINVAL;
- goto out;
- }
- } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
- if (!sb_rdonly(sb) && !options->force) {
- ntfs_warn(
- sb,
- "volume is dirty and \"force\" flag is not set!");
- err = -EINVAL;
- goto out;
- }
+ if ((sbi->flags & NTFS_FLAGS_NEED_REPLAY) && !ro) {
+ ntfs_warn(sb, "failed to replay log file. Can't mount rw!");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if ((sbi->volume.flags & VOLUME_FLAG_DIRTY) && !ro && !options->force) {
+ ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
+ err = -EINVAL;
+ goto out;
}
/* Load $MFT. */
@@ -1173,7 +1326,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
bad_len += len;
bad_frags += 1;
- if (sb_rdonly(sb))
+ if (ro)
continue;
if (wnd_set_used_safe(&sbi->used.bitmap, lcn, len, &tt) || tt) {
@@ -1368,6 +1521,44 @@ load_root:
goto put_inode_out;
}
+ if (boot2) {
+ /*
+ * Alternative boot is ok but primary is not ok.
+ * Volume is recognized as NTFS. Update primary boot.
+ */
+ struct buffer_head *bh0 = sb_getblk(sb, 0);
+ if (bh0) {
+ if (buffer_locked(bh0))
+ __wait_on_buffer(bh0);
+
+ lock_buffer(bh0);
+ memcpy(bh0->b_data, boot2, sizeof(*boot2));
+ set_buffer_uptodate(bh0);
+ mark_buffer_dirty(bh0);
+ unlock_buffer(bh0);
+ if (!sync_dirty_buffer(bh0))
+ ntfs_warn(sb, "primary boot is updated");
+ put_bh(bh0);
+ }
+
+ kfree(boot2);
+ }
+
+#ifdef CONFIG_PROC_FS
+ /* Create /proc/fs/ntfs3/.. */
+ if (proc_info_root) {
+ struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
+ static_assert((S_IRUGO | S_IWUSR) == 0644);
+ if (e) {
+ proc_create_data("volinfo", S_IRUGO, e,
+ &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", S_IRUGO | S_IWUSR, e,
+ &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+ }
+#endif
+
return 0;
put_inode_out:
@@ -1380,6 +1571,7 @@ out:
put_mount_options(sbi->options);
put_ntfs(sbi);
sb->s_fs_info = NULL;
+ kfree(boot2);
return err;
}
@@ -1473,12 +1665,14 @@ static void ntfs_fs_free(struct fs_context *fc)
put_mount_options(opts);
}
+// clang-format off
static const struct fs_context_operations ntfs_context_ops = {
.parse_param = ntfs_fs_parse_param,
.get_tree = ntfs_fs_get_tree,
.reconfigure = ntfs_fs_reconfigure,
.free = ntfs_fs_free,
};
+// clang-format on
/*
* ntfs_init_fs_context - Initialize sbi and opts
@@ -1559,6 +1753,12 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
+
+#ifdef CONFIG_PROC_FS
+ /* Create "/proc/fs/ntfs3" */
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+#endif
+
err = ntfs3_init_bitmap();
if (err)
return err;
@@ -1590,6 +1790,12 @@ static void __exit exit_ntfs_fs(void)
kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
ntfs3_exit_bitmap();
+
+#ifdef CONFIG_PROC_FS
+ if (proc_info_root)
+ remove_proc_entry("fs/ntfs3", NULL);
+#endif
+
}
MODULE_LICENSE("GPL");
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index c3de60a4543f..023f314e8950 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -24,7 +24,7 @@
static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
{
return ea->size ? le32_to_cpu(ea->size) :
- ALIGN(struct_size(ea, name,
+ ALIGN(struct_size(ea, name,
1 + ea->name_len +
le16_to_cpu(ea->elength)),
4);
@@ -141,6 +141,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
memset(Add2Ptr(ea_p, size), 0, add_bytes);
+ err = -EINVAL;
/* Check all attributes for consistency. */
for (off = 0; off < size; off += ea_size) {
const struct EA_FULL *ef = Add2Ptr(ea_p, off);
@@ -214,6 +215,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
ea = Add2Ptr(ea_all, off);
ea_size = unpacked_ea_size(ea);
+ if (!ea->name_len)
+ break;
+
if (buffer) {
if (ret + ea->name_len + 1 > bytes_per_buffer) {
err = -ERANGE;
@@ -524,8 +528,8 @@ out:
/*
* ntfs_get_acl - inode_operations::get_acl
*/
-struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap,
- struct dentry *dentry, int type)
+struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ int type)
{
struct inode *inode = d_inode(dentry);
struct ntfs_inode *ni = ntfs_i(inode);
@@ -592,8 +596,7 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
case ACL_TYPE_ACCESS:
/* Do not change i_mode if we are in init_acl */
if (acl && !init_acl) {
- err = posix_acl_update_mode(idmap, inode, &mode,
- &acl);
+ err = posix_acl_update_mode(idmap, inode, &mode, &acl);
if (err)
return err;
}
@@ -816,10 +819,9 @@ out:
* ntfs_setxattr - inode_operations::setxattr
*/
static noinline int ntfs_setxattr(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *de, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+ struct mnt_idmap *idmap, struct dentry *de,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
int err = -EINVAL;
struct ntfs_inode *ni = ntfs_i(inode);
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 304d12186ccd..3123da7cfb30 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -17,9 +17,9 @@ config OCFS2_FS
You'll want to install the ocfs2-tools package in order to at least
get "mount.ocfs2".
- Project web page: https://oss.oracle.com/projects/ocfs2
- Tools web page: https://oss.oracle.com/projects/ocfs2-tools
- OCFS2 mailing lists: https://oss.oracle.com/projects/ocfs2/mailman/
+ Project web page: https://ocfs2.wiki.kernel.org/
+ Tools web page: https://github.com/markfasheh/ocfs2-tools
+ OCFS2 mailing lists: https://subspace.kernel.org/lists.linux.dev.html
For more information on OCFS2, see the file
<file:Documentation/filesystems/ocfs2.rst>.
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 4142d1a457ff..9402591f12aa 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -70,14 +70,6 @@ enum {
OVL_XINO_ON,
};
-/* The set of options that user requested explicitly via mount options */
-struct ovl_opt_set {
- bool metacopy;
- bool redirect;
- bool nfs_export;
- bool index;
-};
-
/*
* The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
* where:
@@ -368,30 +360,6 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
}
-
-/* params.c */
-#define OVL_MAX_STACK 500
-
-struct ovl_fs_context_layer {
- char *name;
- struct path path;
-};
-
-struct ovl_fs_context {
- struct path upper;
- struct path work;
- size_t capacity;
- size_t nr; /* includes nr_data */
- size_t nr_data;
- struct ovl_opt_set set;
- struct ovl_fs_context_layer *lower;
-};
-
-int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
- bool workdir);
-int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc);
-void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx);
-
/* util.c */
int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
@@ -791,3 +759,12 @@ int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
/* export.c */
extern const struct export_operations ovl_export_operations;
+
+/* super.c */
+int ovl_fill_super(struct super_block *sb, struct fs_context *fc);
+
+/* Will this overlay be forced to mount/remount ro? */
+static inline bool ovl_force_readonly(struct ovl_fs *ofs)
+{
+ return (!ovl_upper_mnt(ofs) || !ofs->workdir);
+}
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index d17d6b483dd0..a63160dbb0f9 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -1,12 +1,124 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/fs.h>
+#include <linux/module.h>
#include <linux/namei.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/seq_file.h>
#include <linux/xattr.h>
#include "overlayfs.h"
+#include "params.h"
+
+static bool ovl_redirect_dir_def = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_DIR);
+module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
+MODULE_PARM_DESC(redirect_dir,
+ "Default to on or off for the redirect_dir feature");
+
+static bool ovl_redirect_always_follow =
+ IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
+module_param_named(redirect_always_follow, ovl_redirect_always_follow,
+ bool, 0644);
+MODULE_PARM_DESC(redirect_always_follow,
+ "Follow redirects even if redirect_dir feature is turned off");
+
+static bool ovl_xino_auto_def = IS_ENABLED(CONFIG_OVERLAY_FS_XINO_AUTO);
+module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644);
+MODULE_PARM_DESC(xino_auto,
+ "Auto enable xino feature");
+
+static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
+module_param_named(index, ovl_index_def, bool, 0644);
+MODULE_PARM_DESC(index,
+ "Default to on or off for the inodes index feature");
+
+static bool ovl_nfs_export_def = IS_ENABLED(CONFIG_OVERLAY_FS_NFS_EXPORT);
+module_param_named(nfs_export, ovl_nfs_export_def, bool, 0644);
+MODULE_PARM_DESC(nfs_export,
+ "Default to on or off for the NFS export feature");
+
+static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY);
+module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
+MODULE_PARM_DESC(metacopy,
+ "Default to on or off for the metadata only copy up feature");
+
+enum {
+ Opt_lowerdir,
+ Opt_upperdir,
+ Opt_workdir,
+ Opt_default_permissions,
+ Opt_redirect_dir,
+ Opt_index,
+ Opt_uuid,
+ Opt_nfs_export,
+ Opt_userxattr,
+ Opt_xino,
+ Opt_metacopy,
+ Opt_volatile,
+};
+
+static const struct constant_table ovl_parameter_bool[] = {
+ { "on", true },
+ { "off", false },
+ {}
+};
+
+static const struct constant_table ovl_parameter_xino[] = {
+ { "off", OVL_XINO_OFF },
+ { "auto", OVL_XINO_AUTO },
+ { "on", OVL_XINO_ON },
+ {}
+};
+
+const char *ovl_xino_mode(struct ovl_config *config)
+{
+ return ovl_parameter_xino[config->xino].name;
+}
+
+static int ovl_xino_def(void)
+{
+ return ovl_xino_auto_def ? OVL_XINO_AUTO : OVL_XINO_OFF;
+}
+
+const struct constant_table ovl_parameter_redirect_dir[] = {
+ { "off", OVL_REDIRECT_OFF },
+ { "follow", OVL_REDIRECT_FOLLOW },
+ { "nofollow", OVL_REDIRECT_NOFOLLOW },
+ { "on", OVL_REDIRECT_ON },
+ {}
+};
+
+static const char *ovl_redirect_mode(struct ovl_config *config)
+{
+ return ovl_parameter_redirect_dir[config->redirect_mode].name;
+}
+
+static int ovl_redirect_mode_def(void)
+{
+ return ovl_redirect_dir_def ? OVL_REDIRECT_ON :
+ ovl_redirect_always_follow ? OVL_REDIRECT_FOLLOW :
+ OVL_REDIRECT_NOFOLLOW;
+}
+
+#define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
+const struct fs_parameter_spec ovl_parameter_spec[] = {
+ fsparam_string_empty("lowerdir", Opt_lowerdir),
+ fsparam_string("upperdir", Opt_upperdir),
+ fsparam_string("workdir", Opt_workdir),
+ fsparam_flag("default_permissions", Opt_default_permissions),
+ fsparam_enum("redirect_dir", Opt_redirect_dir, ovl_parameter_redirect_dir),
+ fsparam_enum("index", Opt_index, ovl_parameter_bool),
+ fsparam_enum("uuid", Opt_uuid, ovl_parameter_bool),
+ fsparam_enum("nfs_export", Opt_nfs_export, ovl_parameter_bool),
+ fsparam_flag("userxattr", Opt_userxattr),
+ fsparam_enum("xino", Opt_xino, ovl_parameter_xino),
+ fsparam_enum("metacopy", Opt_metacopy, ovl_parameter_bool),
+ fsparam_flag("volatile", Opt_volatile),
+ {}
+};
static ssize_t ovl_parse_param_split_lowerdirs(char *str)
{
@@ -110,8 +222,8 @@ static int ovl_mount_dir(const char *name, struct path *path)
return err;
}
-int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
- bool workdir)
+static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
+ bool workdir)
{
int err;
struct ovl_fs *ofs = fc->s_fs_info;
@@ -154,7 +266,7 @@ int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
return 0;
}
-void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
+static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
{
for (size_t nr = 0; nr < ctx->nr; nr++) {
path_put(&ctx->lower[nr].path);
@@ -179,7 +291,7 @@ void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
* Append data "/lower5" as data lower layer. This requires that
* there's at least one regular lower layer present.
*/
-int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
{
int err;
struct ovl_fs_context *ctx = fc->fs_private;
@@ -387,3 +499,415 @@ out_err:
/* Intentionally don't realloc to a smaller size. */
return err;
}
+
+static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ int err = 0;
+ struct fs_parse_result result;
+ struct ovl_fs *ofs = fc->s_fs_info;
+ struct ovl_config *config = &ofs->config;
+ struct ovl_fs_context *ctx = fc->fs_private;
+ int opt;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ /*
+ * On remount overlayfs has always ignored all mount
+ * options no matter if malformed or not so for
+ * backwards compatibility we do the same here.
+ */
+ if (fc->oldapi)
+ return 0;
+
+ /*
+ * Give us the freedom to allow changing mount options
+ * with the new mount api in the future. So instead of
+ * silently ignoring everything we report a proper
+ * error. This is only visible for users of the new
+ * mount api.
+ */
+ return invalfc(fc, "No changes allowed in reconfigure");
+ }
+
+ opt = fs_parse(fc, ovl_parameter_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_lowerdir:
+ err = ovl_parse_param_lowerdir(param->string, fc);
+ break;
+ case Opt_upperdir:
+ fallthrough;
+ case Opt_workdir:
+ err = ovl_parse_param_upperdir(param->string, fc,
+ (Opt_workdir == opt));
+ break;
+ case Opt_default_permissions:
+ config->default_permissions = true;
+ break;
+ case Opt_redirect_dir:
+ config->redirect_mode = result.uint_32;
+ if (config->redirect_mode == OVL_REDIRECT_OFF) {
+ config->redirect_mode = ovl_redirect_always_follow ?
+ OVL_REDIRECT_FOLLOW :
+ OVL_REDIRECT_NOFOLLOW;
+ }
+ ctx->set.redirect = true;
+ break;
+ case Opt_index:
+ config->index = result.uint_32;
+ ctx->set.index = true;
+ break;
+ case Opt_uuid:
+ config->uuid = result.uint_32;
+ break;
+ case Opt_nfs_export:
+ config->nfs_export = result.uint_32;
+ ctx->set.nfs_export = true;
+ break;
+ case Opt_xino:
+ config->xino = result.uint_32;
+ break;
+ case Opt_metacopy:
+ config->metacopy = result.uint_32;
+ ctx->set.metacopy = true;
+ break;
+ case Opt_volatile:
+ config->ovl_volatile = true;
+ break;
+ case Opt_userxattr:
+ config->userxattr = true;
+ break;
+ default:
+ pr_err("unrecognized mount option \"%s\" or missing value\n",
+ param->key);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int ovl_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, ovl_fill_super);
+}
+
+static inline void ovl_fs_context_free(struct ovl_fs_context *ctx)
+{
+ ovl_parse_param_drop_lowerdir(ctx);
+ path_put(&ctx->upper);
+ path_put(&ctx->work);
+ kfree(ctx->lower);
+ kfree(ctx);
+}
+
+static void ovl_free(struct fs_context *fc)
+{
+ struct ovl_fs *ofs = fc->s_fs_info;
+ struct ovl_fs_context *ctx = fc->fs_private;
+
+ /*
+ * ofs is stored in the fs_context when it is initialized.
+ * ofs is transferred to the superblock on a successful mount,
+ * but if an error occurs before the transfer we have to free
+ * it here.
+ */
+ if (ofs)
+ ovl_free_fs(ofs);
+
+ if (ctx)
+ ovl_fs_context_free(ctx);
+}
+
+static int ovl_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct ovl_fs *ofs = sb->s_fs_info;
+ struct super_block *upper_sb;
+ int ret = 0;
+
+ if (!(fc->sb_flags & SB_RDONLY) && ovl_force_readonly(ofs))
+ return -EROFS;
+
+ if (fc->sb_flags & SB_RDONLY && !sb_rdonly(sb)) {
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ if (ovl_should_sync(ofs)) {
+ down_read(&upper_sb->s_umount);
+ ret = sync_filesystem(upper_sb);
+ up_read(&upper_sb->s_umount);
+ }
+ }
+
+ return ret;
+}
+
+static const struct fs_context_operations ovl_context_ops = {
+ .parse_param = ovl_parse_param,
+ .get_tree = ovl_get_tree,
+ .reconfigure = ovl_reconfigure,
+ .free = ovl_free,
+};
+
+/*
+ * This is called during fsopen() and will record the user namespace of
+ * the caller in fc->user_ns since we've raised FS_USERNS_MOUNT. We'll
+ * need it when we actually create the superblock to verify that the
+ * process creating the superblock is in the same user namespace as
+ * process that called fsopen().
+ */
+int ovl_init_fs_context(struct fs_context *fc)
+{
+ struct ovl_fs_context *ctx;
+ struct ovl_fs *ofs;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
+ if (!ctx)
+ return -ENOMEM;
+
+ /*
+ * By default we allocate for three lower layers. It's likely
+ * that it'll cover most users.
+ */
+ ctx->lower = kmalloc_array(3, sizeof(*ctx->lower), GFP_KERNEL_ACCOUNT);
+ if (!ctx->lower)
+ goto out_err;
+ ctx->capacity = 3;
+
+ ofs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL);
+ if (!ofs)
+ goto out_err;
+
+ ofs->config.redirect_mode = ovl_redirect_mode_def();
+ ofs->config.index = ovl_index_def;
+ ofs->config.uuid = true;
+ ofs->config.nfs_export = ovl_nfs_export_def;
+ ofs->config.xino = ovl_xino_def();
+ ofs->config.metacopy = ovl_metacopy_def;
+
+ fc->s_fs_info = ofs;
+ fc->fs_private = ctx;
+ fc->ops = &ovl_context_ops;
+ return 0;
+
+out_err:
+ ovl_fs_context_free(ctx);
+ return -ENOMEM;
+
+}
+
+void ovl_free_fs(struct ovl_fs *ofs)
+{
+ struct vfsmount **mounts;
+ unsigned i;
+
+ iput(ofs->workbasedir_trap);
+ iput(ofs->indexdir_trap);
+ iput(ofs->workdir_trap);
+ dput(ofs->whiteout);
+ dput(ofs->indexdir);
+ dput(ofs->workdir);
+ if (ofs->workdir_locked)
+ ovl_inuse_unlock(ofs->workbasedir);
+ dput(ofs->workbasedir);
+ if (ofs->upperdir_locked)
+ ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
+
+ /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
+ mounts = (struct vfsmount **) ofs->layers;
+ for (i = 0; i < ofs->numlayer; i++) {
+ iput(ofs->layers[i].trap);
+ mounts[i] = ofs->layers[i].mnt;
+ kfree(ofs->layers[i].name);
+ }
+ kern_unmount_array(mounts, ofs->numlayer);
+ kfree(ofs->layers);
+ for (i = 0; i < ofs->numfs; i++)
+ free_anon_bdev(ofs->fs[i].pseudo_dev);
+ kfree(ofs->fs);
+
+ kfree(ofs->config.upperdir);
+ kfree(ofs->config.workdir);
+ if (ofs->creator_cred)
+ put_cred(ofs->creator_cred);
+ kfree(ofs);
+}
+
+int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
+ struct ovl_config *config)
+{
+ struct ovl_opt_set set = ctx->set;
+
+ if (ctx->nr_data > 0 && !config->metacopy) {
+ pr_err("lower data-only dirs require metacopy support.\n");
+ return -EINVAL;
+ }
+
+ /* Workdir/index are useless in non-upper mount */
+ if (!config->upperdir) {
+ if (config->workdir) {
+ pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+ config->workdir);
+ kfree(config->workdir);
+ config->workdir = NULL;
+ }
+ if (config->index && set.index) {
+ pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n");
+ set.index = false;
+ }
+ config->index = false;
+ }
+
+ if (!config->upperdir && config->ovl_volatile) {
+ pr_info("option \"volatile\" is meaningless in a non-upper mount, ignoring it.\n");
+ config->ovl_volatile = false;
+ }
+
+ /*
+ * This is to make the logic below simpler. It doesn't make any other
+ * difference, since redirect_dir=on is only used for upper.
+ */
+ if (!config->upperdir && config->redirect_mode == OVL_REDIRECT_FOLLOW)
+ config->redirect_mode = OVL_REDIRECT_ON;
+
+ /* Resolve metacopy -> redirect_dir dependency */
+ if (config->metacopy && config->redirect_mode != OVL_REDIRECT_ON) {
+ if (set.metacopy && set.redirect) {
+ pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
+ ovl_redirect_mode(config));
+ return -EINVAL;
+ }
+ if (set.redirect) {
+ /*
+ * There was an explicit redirect_dir=... that resulted
+ * in this conflict.
+ */
+ pr_info("disabling metacopy due to redirect_dir=%s\n",
+ ovl_redirect_mode(config));
+ config->metacopy = false;
+ } else {
+ /* Automatically enable redirect otherwise. */
+ config->redirect_mode = OVL_REDIRECT_ON;
+ }
+ }
+
+ /* Resolve nfs_export -> index dependency */
+ if (config->nfs_export && !config->index) {
+ if (!config->upperdir &&
+ config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
+ pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
+ config->nfs_export = false;
+ } else if (set.nfs_export && set.index) {
+ pr_err("conflicting options: nfs_export=on,index=off\n");
+ return -EINVAL;
+ } else if (set.index) {
+ /*
+ * There was an explicit index=off that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to index=off\n");
+ config->nfs_export = false;
+ } else {
+ /* Automatically enable index otherwise. */
+ config->index = true;
+ }
+ }
+
+ /* Resolve nfs_export -> !metacopy dependency */
+ if (config->nfs_export && config->metacopy) {
+ if (set.nfs_export && set.metacopy) {
+ pr_err("conflicting options: nfs_export=on,metacopy=on\n");
+ return -EINVAL;
+ }
+ if (set.metacopy) {
+ /*
+ * There was an explicit metacopy=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to metacopy=on\n");
+ config->nfs_export = false;
+ } else {
+ /*
+ * There was an explicit nfs_export=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling metacopy due to nfs_export=on\n");
+ config->metacopy = false;
+ }
+ }
+
+
+ /* Resolve userxattr -> !redirect && !metacopy dependency */
+ if (config->userxattr) {
+ if (set.redirect &&
+ config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
+ pr_err("conflicting options: userxattr,redirect_dir=%s\n",
+ ovl_redirect_mode(config));
+ return -EINVAL;
+ }
+ if (config->metacopy && set.metacopy) {
+ pr_err("conflicting options: userxattr,metacopy=on\n");
+ return -EINVAL;
+ }
+ /*
+ * Silently disable default setting of redirect and metacopy.
+ * This shall be the default in the future as well: these
+ * options must be explicitly enabled if used together with
+ * userxattr.
+ */
+ config->redirect_mode = OVL_REDIRECT_NOFOLLOW;
+ config->metacopy = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ovl_show_options
+ * @m: the seq_file handle
+ * @dentry: The dentry to query
+ *
+ * Prints the mount options for a given superblock.
+ * Returns zero; does not fail.
+ */
+int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct ovl_fs *ofs = sb->s_fs_info;
+ size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
+ const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower];
+
+ /* ofs->layers[0] is the upper layer */
+ seq_printf(m, ",lowerdir=%s", ofs->layers[1].name);
+ /* dump regular lower layers */
+ for (nr = 2; nr < nr_merged_lower; nr++)
+ seq_printf(m, ":%s", ofs->layers[nr].name);
+ /* dump data lower layers */
+ for (nr = 0; nr < ofs->numdatalayer; nr++)
+ seq_printf(m, "::%s", data_layers[nr].name);
+ if (ofs->config.upperdir) {
+ seq_show_option(m, "upperdir", ofs->config.upperdir);
+ seq_show_option(m, "workdir", ofs->config.workdir);
+ }
+ if (ofs->config.default_permissions)
+ seq_puts(m, ",default_permissions");
+ if (ofs->config.redirect_mode != ovl_redirect_mode_def())
+ seq_printf(m, ",redirect_dir=%s",
+ ovl_redirect_mode(&ofs->config));
+ if (ofs->config.index != ovl_index_def)
+ seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
+ if (!ofs->config.uuid)
+ seq_puts(m, ",uuid=off");
+ if (ofs->config.nfs_export != ovl_nfs_export_def)
+ seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
+ "on" : "off");
+ if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(ofs))
+ seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config));
+ if (ofs->config.metacopy != ovl_metacopy_def)
+ seq_printf(m, ",metacopy=%s",
+ ofs->config.metacopy ? "on" : "off");
+ if (ofs->config.ovl_volatile)
+ seq_puts(m, ",volatile");
+ if (ofs->config.userxattr)
+ seq_puts(m, ",userxattr");
+ return 0;
+}
diff --git a/fs/overlayfs/params.h b/fs/overlayfs/params.h
new file mode 100644
index 000000000000..8750da68ab2a
--- /dev/null
+++ b/fs/overlayfs/params.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+
+struct ovl_fs;
+struct ovl_config;
+
+extern const struct fs_parameter_spec ovl_parameter_spec[];
+extern const struct constant_table ovl_parameter_redirect_dir[];
+
+/* The set of options that user requested explicitly via mount options */
+struct ovl_opt_set {
+ bool metacopy;
+ bool redirect;
+ bool nfs_export;
+ bool index;
+};
+
+#define OVL_MAX_STACK 500
+
+struct ovl_fs_context_layer {
+ char *name;
+ struct path path;
+};
+
+struct ovl_fs_context {
+ struct path upper;
+ struct path work;
+ size_t capacity;
+ size_t nr; /* includes nr_data */
+ size_t nr_data;
+ struct ovl_opt_set set;
+ struct ovl_fs_context_layer *lower;
+};
+
+int ovl_init_fs_context(struct fs_context *fc);
+void ovl_free_fs(struct ovl_fs *ofs);
+int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
+ struct ovl_config *config);
+int ovl_show_options(struct seq_file *m, struct dentry *dentry);
+const char *ovl_xino_mode(struct ovl_config *config);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index c14c52560fd6..5b069f1a1e44 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -19,6 +19,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include "overlayfs.h"
+#include "params.h"
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
MODULE_DESCRIPTION("Overlay filesystem");
@@ -27,38 +28,6 @@ MODULE_LICENSE("GPL");
struct ovl_dir_cache;
-static bool ovl_redirect_dir_def = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_DIR);
-module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
-MODULE_PARM_DESC(redirect_dir,
- "Default to on or off for the redirect_dir feature");
-
-static bool ovl_redirect_always_follow =
- IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
-module_param_named(redirect_always_follow, ovl_redirect_always_follow,
- bool, 0644);
-MODULE_PARM_DESC(redirect_always_follow,
- "Follow redirects even if redirect_dir feature is turned off");
-
-static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
-module_param_named(index, ovl_index_def, bool, 0644);
-MODULE_PARM_DESC(index,
- "Default to on or off for the inodes index feature");
-
-static bool ovl_nfs_export_def = IS_ENABLED(CONFIG_OVERLAY_FS_NFS_EXPORT);
-module_param_named(nfs_export, ovl_nfs_export_def, bool, 0644);
-MODULE_PARM_DESC(nfs_export,
- "Default to on or off for the NFS export feature");
-
-static bool ovl_xino_auto_def = IS_ENABLED(CONFIG_OVERLAY_FS_XINO_AUTO);
-module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644);
-MODULE_PARM_DESC(xino_auto,
- "Auto enable xino feature");
-
-static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY);
-module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
-MODULE_PARM_DESC(metacopy,
- "Default to on or off for the metadata only copy up feature");
-
static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode)
{
@@ -211,43 +180,6 @@ static void ovl_destroy_inode(struct inode *inode)
kfree(oi->lowerdata_redirect);
}
-static void ovl_free_fs(struct ovl_fs *ofs)
-{
- struct vfsmount **mounts;
- unsigned i;
-
- iput(ofs->workbasedir_trap);
- iput(ofs->indexdir_trap);
- iput(ofs->workdir_trap);
- dput(ofs->whiteout);
- dput(ofs->indexdir);
- dput(ofs->workdir);
- if (ofs->workdir_locked)
- ovl_inuse_unlock(ofs->workbasedir);
- dput(ofs->workbasedir);
- if (ofs->upperdir_locked)
- ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
-
- /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
- mounts = (struct vfsmount **) ofs->layers;
- for (i = 0; i < ofs->numlayer; i++) {
- iput(ofs->layers[i].trap);
- mounts[i] = ofs->layers[i].mnt;
- kfree(ofs->layers[i].name);
- }
- kern_unmount_array(mounts, ofs->numlayer);
- kfree(ofs->layers);
- for (i = 0; i < ofs->numfs; i++)
- free_anon_bdev(ofs->fs[i].pseudo_dev);
- kfree(ofs->fs);
-
- kfree(ofs->config.upperdir);
- kfree(ofs->config.workdir);
- if (ofs->creator_cred)
- put_cred(ofs->creator_cred);
- kfree(ofs);
-}
-
static void ovl_put_super(struct super_block *sb)
{
struct ovl_fs *ofs = sb->s_fs_info;
@@ -323,122 +255,6 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
return err;
}
-/* Will this overlay be forced to mount/remount ro? */
-static bool ovl_force_readonly(struct ovl_fs *ofs)
-{
- return (!ovl_upper_mnt(ofs) || !ofs->workdir);
-}
-
-static const struct constant_table ovl_parameter_redirect_dir[] = {
- { "off", OVL_REDIRECT_OFF },
- { "follow", OVL_REDIRECT_FOLLOW },
- { "nofollow", OVL_REDIRECT_NOFOLLOW },
- { "on", OVL_REDIRECT_ON },
- {}
-};
-
-static const char *ovl_redirect_mode(struct ovl_config *config)
-{
- return ovl_parameter_redirect_dir[config->redirect_mode].name;
-}
-
-static int ovl_redirect_mode_def(void)
-{
- return ovl_redirect_dir_def ? OVL_REDIRECT_ON :
- ovl_redirect_always_follow ? OVL_REDIRECT_FOLLOW :
- OVL_REDIRECT_NOFOLLOW;
-}
-
-static const struct constant_table ovl_parameter_xino[] = {
- { "off", OVL_XINO_OFF },
- { "auto", OVL_XINO_AUTO },
- { "on", OVL_XINO_ON },
- {}
-};
-
-static const char *ovl_xino_mode(struct ovl_config *config)
-{
- return ovl_parameter_xino[config->xino].name;
-}
-
-static inline int ovl_xino_def(void)
-{
- return ovl_xino_auto_def ? OVL_XINO_AUTO : OVL_XINO_OFF;
-}
-
-/**
- * ovl_show_options
- * @m: the seq_file handle
- * @dentry: The dentry to query
- *
- * Prints the mount options for a given superblock.
- * Returns zero; does not fail.
- */
-static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
-{
- struct super_block *sb = dentry->d_sb;
- struct ovl_fs *ofs = sb->s_fs_info;
- size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
- const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower];
-
- /* ofs->layers[0] is the upper layer */
- seq_printf(m, ",lowerdir=%s", ofs->layers[1].name);
- /* dump regular lower layers */
- for (nr = 2; nr < nr_merged_lower; nr++)
- seq_printf(m, ":%s", ofs->layers[nr].name);
- /* dump data lower layers */
- for (nr = 0; nr < ofs->numdatalayer; nr++)
- seq_printf(m, "::%s", data_layers[nr].name);
- if (ofs->config.upperdir) {
- seq_show_option(m, "upperdir", ofs->config.upperdir);
- seq_show_option(m, "workdir", ofs->config.workdir);
- }
- if (ofs->config.default_permissions)
- seq_puts(m, ",default_permissions");
- if (ofs->config.redirect_mode != ovl_redirect_mode_def())
- seq_printf(m, ",redirect_dir=%s",
- ovl_redirect_mode(&ofs->config));
- if (ofs->config.index != ovl_index_def)
- seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
- if (!ofs->config.uuid)
- seq_puts(m, ",uuid=off");
- if (ofs->config.nfs_export != ovl_nfs_export_def)
- seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
- "on" : "off");
- if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(ofs))
- seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config));
- if (ofs->config.metacopy != ovl_metacopy_def)
- seq_printf(m, ",metacopy=%s",
- ofs->config.metacopy ? "on" : "off");
- if (ofs->config.ovl_volatile)
- seq_puts(m, ",volatile");
- if (ofs->config.userxattr)
- seq_puts(m, ",userxattr");
- return 0;
-}
-
-static int ovl_reconfigure(struct fs_context *fc)
-{
- struct super_block *sb = fc->root->d_sb;
- struct ovl_fs *ofs = sb->s_fs_info;
- struct super_block *upper_sb;
- int ret = 0;
-
- if (!(fc->sb_flags & SB_RDONLY) && ovl_force_readonly(ofs))
- return -EROFS;
-
- if (fc->sb_flags & SB_RDONLY && !sb_rdonly(sb)) {
- upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
- if (ovl_should_sync(ofs)) {
- down_read(&upper_sb->s_umount);
- ret = sync_filesystem(upper_sb);
- up_read(&upper_sb->s_umount);
- }
- }
-
- return ret;
-}
-
static const struct super_operations ovl_super_operations = {
.alloc_inode = ovl_alloc_inode,
.free_inode = ovl_free_inode,
@@ -450,262 +266,6 @@ static const struct super_operations ovl_super_operations = {
.show_options = ovl_show_options,
};
-enum {
- Opt_lowerdir,
- Opt_upperdir,
- Opt_workdir,
- Opt_default_permissions,
- Opt_redirect_dir,
- Opt_index,
- Opt_uuid,
- Opt_nfs_export,
- Opt_userxattr,
- Opt_xino,
- Opt_metacopy,
- Opt_volatile,
-};
-
-static const struct constant_table ovl_parameter_bool[] = {
- { "on", true },
- { "off", false },
- {}
-};
-
-#define fsparam_string_empty(NAME, OPT) \
- __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
-
-static const struct fs_parameter_spec ovl_parameter_spec[] = {
- fsparam_string_empty("lowerdir", Opt_lowerdir),
- fsparam_string("upperdir", Opt_upperdir),
- fsparam_string("workdir", Opt_workdir),
- fsparam_flag("default_permissions", Opt_default_permissions),
- fsparam_enum("redirect_dir", Opt_redirect_dir, ovl_parameter_redirect_dir),
- fsparam_enum("index", Opt_index, ovl_parameter_bool),
- fsparam_enum("uuid", Opt_uuid, ovl_parameter_bool),
- fsparam_enum("nfs_export", Opt_nfs_export, ovl_parameter_bool),
- fsparam_flag("userxattr", Opt_userxattr),
- fsparam_enum("xino", Opt_xino, ovl_parameter_xino),
- fsparam_enum("metacopy", Opt_metacopy, ovl_parameter_bool),
- fsparam_flag("volatile", Opt_volatile),
- {}
-};
-
-static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
-{
- int err = 0;
- struct fs_parse_result result;
- struct ovl_fs *ofs = fc->s_fs_info;
- struct ovl_config *config = &ofs->config;
- struct ovl_fs_context *ctx = fc->fs_private;
- int opt;
-
- if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
- /*
- * On remount overlayfs has always ignored all mount
- * options no matter if malformed or not so for
- * backwards compatibility we do the same here.
- */
- if (fc->oldapi)
- return 0;
-
- /*
- * Give us the freedom to allow changing mount options
- * with the new mount api in the future. So instead of
- * silently ignoring everything we report a proper
- * error. This is only visible for users of the new
- * mount api.
- */
- return invalfc(fc, "No changes allowed in reconfigure");
- }
-
- opt = fs_parse(fc, ovl_parameter_spec, param, &result);
- if (opt < 0)
- return opt;
-
- switch (opt) {
- case Opt_lowerdir:
- err = ovl_parse_param_lowerdir(param->string, fc);
- break;
- case Opt_upperdir:
- fallthrough;
- case Opt_workdir:
- err = ovl_parse_param_upperdir(param->string, fc,
- (Opt_workdir == opt));
- break;
- case Opt_default_permissions:
- config->default_permissions = true;
- break;
- case Opt_redirect_dir:
- config->redirect_mode = result.uint_32;
- if (config->redirect_mode == OVL_REDIRECT_OFF) {
- config->redirect_mode = ovl_redirect_always_follow ?
- OVL_REDIRECT_FOLLOW :
- OVL_REDIRECT_NOFOLLOW;
- }
- ctx->set.redirect = true;
- break;
- case Opt_index:
- config->index = result.uint_32;
- ctx->set.index = true;
- break;
- case Opt_uuid:
- config->uuid = result.uint_32;
- break;
- case Opt_nfs_export:
- config->nfs_export = result.uint_32;
- ctx->set.nfs_export = true;
- break;
- case Opt_xino:
- config->xino = result.uint_32;
- break;
- case Opt_metacopy:
- config->metacopy = result.uint_32;
- ctx->set.metacopy = true;
- break;
- case Opt_volatile:
- config->ovl_volatile = true;
- break;
- case Opt_userxattr:
- config->userxattr = true;
- break;
- default:
- pr_err("unrecognized mount option \"%s\" or missing value\n",
- param->key);
- return -EINVAL;
- }
-
- return err;
-}
-
-static int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
- struct ovl_config *config)
-{
- struct ovl_opt_set set = ctx->set;
-
- if (ctx->nr_data > 0 && !config->metacopy) {
- pr_err("lower data-only dirs require metacopy support.\n");
- return -EINVAL;
- }
-
- /* Workdir/index are useless in non-upper mount */
- if (!config->upperdir) {
- if (config->workdir) {
- pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
- config->workdir);
- kfree(config->workdir);
- config->workdir = NULL;
- }
- if (config->index && set.index) {
- pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n");
- set.index = false;
- }
- config->index = false;
- }
-
- if (!config->upperdir && config->ovl_volatile) {
- pr_info("option \"volatile\" is meaningless in a non-upper mount, ignoring it.\n");
- config->ovl_volatile = false;
- }
-
- /*
- * This is to make the logic below simpler. It doesn't make any other
- * difference, since redirect_dir=on is only used for upper.
- */
- if (!config->upperdir && config->redirect_mode == OVL_REDIRECT_FOLLOW)
- config->redirect_mode = OVL_REDIRECT_ON;
-
- /* Resolve metacopy -> redirect_dir dependency */
- if (config->metacopy && config->redirect_mode != OVL_REDIRECT_ON) {
- if (set.metacopy && set.redirect) {
- pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
- ovl_redirect_mode(config));
- return -EINVAL;
- }
- if (set.redirect) {
- /*
- * There was an explicit redirect_dir=... that resulted
- * in this conflict.
- */
- pr_info("disabling metacopy due to redirect_dir=%s\n",
- ovl_redirect_mode(config));
- config->metacopy = false;
- } else {
- /* Automatically enable redirect otherwise. */
- config->redirect_mode = OVL_REDIRECT_ON;
- }
- }
-
- /* Resolve nfs_export -> index dependency */
- if (config->nfs_export && !config->index) {
- if (!config->upperdir &&
- config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
- pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
- config->nfs_export = false;
- } else if (set.nfs_export && set.index) {
- pr_err("conflicting options: nfs_export=on,index=off\n");
- return -EINVAL;
- } else if (set.index) {
- /*
- * There was an explicit index=off that resulted
- * in this conflict.
- */
- pr_info("disabling nfs_export due to index=off\n");
- config->nfs_export = false;
- } else {
- /* Automatically enable index otherwise. */
- config->index = true;
- }
- }
-
- /* Resolve nfs_export -> !metacopy dependency */
- if (config->nfs_export && config->metacopy) {
- if (set.nfs_export && set.metacopy) {
- pr_err("conflicting options: nfs_export=on,metacopy=on\n");
- return -EINVAL;
- }
- if (set.metacopy) {
- /*
- * There was an explicit metacopy=on that resulted
- * in this conflict.
- */
- pr_info("disabling nfs_export due to metacopy=on\n");
- config->nfs_export = false;
- } else {
- /*
- * There was an explicit nfs_export=on that resulted
- * in this conflict.
- */
- pr_info("disabling metacopy due to nfs_export=on\n");
- config->metacopy = false;
- }
- }
-
-
- /* Resolve userxattr -> !redirect && !metacopy dependency */
- if (config->userxattr) {
- if (set.redirect &&
- config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
- pr_err("conflicting options: userxattr,redirect_dir=%s\n",
- ovl_redirect_mode(config));
- return -EINVAL;
- }
- if (config->metacopy && set.metacopy) {
- pr_err("conflicting options: userxattr,metacopy=on\n");
- return -EINVAL;
- }
- /*
- * Silently disable default setting of redirect and metacopy.
- * This shall be the default in the future as well: these
- * options must be explicitly enabled if used together with
- * userxattr.
- */
- config->redirect_mode = OVL_REDIRECT_NOFOLLOW;
- config->metacopy = false;
- }
-
- return 0;
-}
-
#define OVL_WORKDIR_NAME "work"
#define OVL_INDEXDIR_NAME "index"
@@ -1758,7 +1318,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
return root;
}
-static int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct ovl_fs *ofs = sb->s_fs_info;
struct ovl_fs_context *ctx = fc->fs_private;
@@ -1919,92 +1479,6 @@ out_err:
return err;
}
-static int ovl_get_tree(struct fs_context *fc)
-{
- return get_tree_nodev(fc, ovl_fill_super);
-}
-
-static inline void ovl_fs_context_free(struct ovl_fs_context *ctx)
-{
- ovl_parse_param_drop_lowerdir(ctx);
- path_put(&ctx->upper);
- path_put(&ctx->work);
- kfree(ctx->lower);
- kfree(ctx);
-}
-
-static void ovl_free(struct fs_context *fc)
-{
- struct ovl_fs *ofs = fc->s_fs_info;
- struct ovl_fs_context *ctx = fc->fs_private;
-
- /*
- * ofs is stored in the fs_context when it is initialized.
- * ofs is transferred to the superblock on a successful mount,
- * but if an error occurs before the transfer we have to free
- * it here.
- */
- if (ofs)
- ovl_free_fs(ofs);
-
- if (ctx)
- ovl_fs_context_free(ctx);
-}
-
-static const struct fs_context_operations ovl_context_ops = {
- .parse_param = ovl_parse_param,
- .get_tree = ovl_get_tree,
- .reconfigure = ovl_reconfigure,
- .free = ovl_free,
-};
-
-/*
- * This is called during fsopen() and will record the user namespace of
- * the caller in fc->user_ns since we've raised FS_USERNS_MOUNT. We'll
- * need it when we actually create the superblock to verify that the
- * process creating the superblock is in the same user namespace as
- * process that called fsopen().
- */
-static int ovl_init_fs_context(struct fs_context *fc)
-{
- struct ovl_fs_context *ctx;
- struct ovl_fs *ofs;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
- if (!ctx)
- return -ENOMEM;
-
- /*
- * By default we allocate for three lower layers. It's likely
- * that it'll cover most users.
- */
- ctx->lower = kmalloc_array(3, sizeof(*ctx->lower), GFP_KERNEL_ACCOUNT);
- if (!ctx->lower)
- goto out_err;
- ctx->capacity = 3;
-
- ofs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL);
- if (!ofs)
- goto out_err;
-
- ofs->config.redirect_mode = ovl_redirect_mode_def();
- ofs->config.index = ovl_index_def;
- ofs->config.uuid = true;
- ofs->config.nfs_export = ovl_nfs_export_def;
- ofs->config.xino = ovl_xino_def();
- ofs->config.metacopy = ovl_metacopy_def;
-
- fc->s_fs_info = ofs;
- fc->fs_private = ctx;
- fc->ops = &ovl_context_ops;
- return 0;
-
-out_err:
- ovl_fs_context_free(ctx);
- return -ENOMEM;
-
-}
-
static struct file_system_type ovl_fs_type = {
.owner = THIS_MODULE,
.name = "overlay",
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 5d0cf59c4926..9cb32e1a78a0 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -199,7 +199,7 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
ent->addr = (unsigned long)page_to_virt(p);
ent->size = nr_pages << PAGE_SHIFT;
- if (!virt_addr_valid(ent->addr))
+ if (!virt_addr_valid((void *)ent->addr))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4e5488975415..5ea42653126e 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -29,7 +29,7 @@ static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
/* Support for permanently empty directories */
-struct ctl_table sysctl_mount_point[] = {
+static struct ctl_table sysctl_mount_point[] = {
{.type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY }
};
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index ca2da713c5fe..b5808fe3469a 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -2179,7 +2179,7 @@ static inline void cifs_sg_set_buf(struct sg_table *sgtable,
} while (buflen);
} else {
sg_set_page(&sgtable->sgl[sgtable->nents++],
- virt_to_page(addr), buflen, off);
+ virt_to_page((void *)addr), buflen, off);
}
}
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 223e17c16b60..2a2aec8c6112 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -2500,7 +2500,7 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
if (is_vmalloc_or_module_addr((void *)kaddr))
page = vmalloc_to_page((void *)kaddr);
else
- page = virt_to_page(kaddr);
+ page = virt_to_page((void *)kaddr);
if (!smb_set_sge(rdma, page, off, seg))
return -EIO;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 6aa9c2e1e8eb..581ce9519339 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -166,6 +166,26 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
return 0;
}
+static struct page *squashfs_get_cache_page(struct address_space *mapping,
+ pgoff_t index)
+{
+ struct page *page;
+
+ if (!mapping)
+ return NULL;
+
+ page = find_get_page(mapping, index);
+ if (!page)
+ return NULL;
+
+ if (!PageUptodate(page)) {
+ put_page(page);
+ return NULL;
+ }
+
+ return page;
+}
+
static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
struct bio **biop, int *block_offset)
{
@@ -190,11 +210,10 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
for (i = 0; i < page_count; ++i) {
unsigned int len =
min_t(unsigned int, PAGE_SIZE - offset, total_len);
- struct page *page = NULL;
+ pgoff_t index = (read_start >> PAGE_SHIFT) + i;
+ struct page *page;
- if (cache_mapping)
- page = find_get_page(cache_mapping,
- (read_start >> PAGE_SHIFT) + i);
+ page = squashfs_get_cache_page(cache_mapping, index);
if (!page)
page = alloc_page(GFP_NOIO);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index eeb0e3099421..138676463336 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -118,11 +118,13 @@ static int internal_create_group(struct kobject *kobj, int update,
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
return -EINVAL;
+
if (!grp->attrs && !grp->bin_attrs) {
- WARN(1, "sysfs: (bin_)attrs not set by subsystem for group: %s/%s\n",
- kobj->name, grp->name ?: "");
- return -EINVAL;
+ pr_debug("sysfs: (bin_)attrs not set by subsystem for group: %s/%s, skipping\n",
+ kobj->name, grp->name ?: "");
+ return 0;
}
+
kobject_get_ownership(kobj, &uid, &gid);
if (grp->name) {
if (update) {
@@ -142,8 +144,10 @@ static int internal_create_group(struct kobject *kobj, int update,
return PTR_ERR(kn);
}
}
- } else
+ } else {
kn = kobj->sd;
+ }
+
kernfs_get(kn);
error = create_files(kn, kobj, uid, gid, grp, update);
if (error) {
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index ee84835ebc66..e9cc481b4ddf 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -985,7 +985,7 @@ xfs_ag_shrink_space(
goto resv_err;
err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL,
- true);
+ XFS_AG_RESV_NONE, true);
if (err2)
goto resv_err;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index c20fe99405d8..3069194527dd 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1536,7 +1536,8 @@ xfs_alloc_ag_vextent_lastblock(
*/
STATIC int
xfs_alloc_ag_vextent_near(
- struct xfs_alloc_arg *args)
+ struct xfs_alloc_arg *args,
+ uint32_t alloc_flags)
{
struct xfs_alloc_cur acur = {};
int error; /* error code */
@@ -1555,6 +1556,8 @@ xfs_alloc_ag_vextent_near(
if (args->agbno > args->max_agbno)
args->agbno = args->max_agbno;
+ /* Retry once quickly if we find busy extents before blocking. */
+ alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
restart:
len = 0;
@@ -1610,9 +1613,20 @@ restart:
*/
if (!acur.len) {
if (acur.busy) {
+ /*
+ * Our only valid extents must have been busy. Flush and
+ * retry the allocation again. If we get an -EAGAIN
+ * error, we're being told that a deadlock was avoided
+ * and the current transaction needs committing before
+ * the allocation can be retried.
+ */
trace_xfs_alloc_near_busy(args);
- xfs_extent_busy_flush(args->mp, args->pag,
- acur.busy_gen);
+ error = xfs_extent_busy_flush(args->tp, args->pag,
+ acur.busy_gen, alloc_flags);
+ if (error)
+ goto out;
+
+ alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
goto restart;
}
trace_xfs_alloc_size_neither(args);
@@ -1635,22 +1649,25 @@ out:
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
-STATIC int /* error */
+static int
xfs_alloc_ag_vextent_size(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args,
+ uint32_t alloc_flags)
{
- struct xfs_agf *agf = args->agbp->b_addr;
- struct xfs_btree_cur *bno_cur; /* cursor for bno btree */
- struct xfs_btree_cur *cnt_cur; /* cursor for cnt btree */
- int error; /* error result */
- xfs_agblock_t fbno; /* start of found freespace */
- xfs_extlen_t flen; /* length of found freespace */
- int i; /* temp status variable */
- xfs_agblock_t rbno; /* returned block number */
- xfs_extlen_t rlen; /* length of returned extent */
- bool busy;
- unsigned busy_gen;
+ struct xfs_agf *agf = args->agbp->b_addr;
+ struct xfs_btree_cur *bno_cur;
+ struct xfs_btree_cur *cnt_cur;
+ xfs_agblock_t fbno; /* start of found freespace */
+ xfs_extlen_t flen; /* length of found freespace */
+ xfs_agblock_t rbno; /* returned block number */
+ xfs_extlen_t rlen; /* length of returned extent */
+ bool busy;
+ unsigned busy_gen;
+ int error;
+ int i;
+ /* Retry once quickly if we find busy extents before blocking. */
+ alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
restart:
/*
* Allocate and initialize a cursor for the by-size btree.
@@ -1708,19 +1725,25 @@ restart:
error = xfs_btree_increment(cnt_cur, 0, &i);
if (error)
goto error0;
- if (i == 0) {
- /*
- * Our only valid extents must have been busy.
- * Make it unbusy by forcing the log out and
- * retrying.
- */
- xfs_btree_del_cursor(cnt_cur,
- XFS_BTREE_NOERROR);
- trace_xfs_alloc_size_busy(args);
- xfs_extent_busy_flush(args->mp,
- args->pag, busy_gen);
- goto restart;
- }
+ if (i)
+ continue;
+
+ /*
+ * Our only valid extents must have been busy. Flush and
+ * retry the allocation again. If we get an -EAGAIN
+ * error, we're being told that a deadlock was avoided
+ * and the current transaction needs committing before
+ * the allocation can be retried.
+ */
+ trace_xfs_alloc_size_busy(args);
+ error = xfs_extent_busy_flush(args->tp, args->pag,
+ busy_gen, alloc_flags);
+ if (error)
+ goto error0;
+
+ alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ goto restart;
}
}
@@ -1800,9 +1823,21 @@ restart:
args->len = rlen;
if (rlen < args->minlen) {
if (busy) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ /*
+ * Our only valid extents must have been busy. Flush and
+ * retry the allocation again. If we get an -EAGAIN
+ * error, we're being told that a deadlock was avoided
+ * and the current transaction needs committing before
+ * the allocation can be retried.
+ */
trace_xfs_alloc_size_busy(args);
- xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
+ error = xfs_extent_busy_flush(args->tp, args->pag,
+ busy_gen, alloc_flags);
+ if (error)
+ goto error0;
+
+ alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
goto restart;
}
goto out_nominleft;
@@ -2435,23 +2470,25 @@ static int
xfs_defer_agfl_block(
struct xfs_trans *tp,
xfs_agnumber_t agno,
- xfs_fsblock_t agbno,
+ xfs_agblock_t agbno,
struct xfs_owner_info *oinfo)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_extent_free_item *xefi;
+ xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
ASSERT(xfs_extfree_item_cache != NULL);
ASSERT(oinfo != NULL);
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
+ return -EFSCORRUPTED;
+
xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
GFP_KERNEL | __GFP_NOFAIL);
- xefi->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
+ xefi->xefi_startblock = fsbno;
xefi->xefi_blockcount = 1;
xefi->xefi_owner = oinfo->oi_owner;
-
- if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, xefi->xefi_startblock)))
- return -EFSCORRUPTED;
+ xefi->xefi_agresv = XFS_AG_RESV_AGFL;
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
@@ -2470,6 +2507,7 @@ __xfs_free_extent_later(
xfs_fsblock_t bno,
xfs_filblks_t len,
const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type,
bool skip_discard)
{
struct xfs_extent_free_item *xefi;
@@ -2490,6 +2528,7 @@ __xfs_free_extent_later(
ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
#endif
ASSERT(xfs_extfree_item_cache != NULL);
+ ASSERT(type != XFS_AG_RESV_AGFL);
if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
return -EFSCORRUPTED;
@@ -2498,6 +2537,7 @@ __xfs_free_extent_later(
GFP_KERNEL | __GFP_NOFAIL);
xefi->xefi_startblock = bno;
xefi->xefi_blockcount = (xfs_extlen_t)len;
+ xefi->xefi_agresv = type;
if (skip_discard)
xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
if (oinfo) {
@@ -2568,7 +2608,7 @@ out:
int /* error */
xfs_alloc_fix_freelist(
struct xfs_alloc_arg *args, /* allocation argument structure */
- int flags) /* XFS_ALLOC_FLAG_... */
+ uint32_t alloc_flags)
{
struct xfs_mount *mp = args->mp;
struct xfs_perag *pag = args->pag;
@@ -2584,7 +2624,7 @@ xfs_alloc_fix_freelist(
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
if (!xfs_perag_initialised_agf(pag)) {
- error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
if (error) {
/* Couldn't lock the AGF so skip this AG. */
if (error == -EAGAIN)
@@ -2600,13 +2640,13 @@ xfs_alloc_fix_freelist(
*/
if (xfs_perag_prefers_metadata(pag) &&
(args->datatype & XFS_ALLOC_USERDATA) &&
- (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
- ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
+ (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
+ ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
goto out_agbp_relse;
}
need = xfs_alloc_min_freelist(mp, pag);
- if (!xfs_alloc_space_available(args, need, flags |
+ if (!xfs_alloc_space_available(args, need, alloc_flags |
XFS_ALLOC_FLAG_CHECK))
goto out_agbp_relse;
@@ -2615,7 +2655,7 @@ xfs_alloc_fix_freelist(
* Can fail if we're not blocking on locks, and it's held.
*/
if (!agbp) {
- error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
if (error) {
/* Couldn't lock the AGF so skip this AG. */
if (error == -EAGAIN)
@@ -2630,7 +2670,7 @@ xfs_alloc_fix_freelist(
/* If there isn't enough total space or single-extent, reject it. */
need = xfs_alloc_min_freelist(mp, pag);
- if (!xfs_alloc_space_available(args, need, flags))
+ if (!xfs_alloc_space_available(args, need, alloc_flags))
goto out_agbp_relse;
#ifdef DEBUG
@@ -2668,11 +2708,12 @@ xfs_alloc_fix_freelist(
*/
memset(&targs, 0, sizeof(targs));
/* struct copy below */
- if (flags & XFS_ALLOC_FLAG_NORMAP)
+ if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
else
targs.oinfo = XFS_RMAP_OINFO_AG;
- while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
+ while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
+ pag->pagf_flcount > need) {
error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
if (error)
goto out_agbp_relse;
@@ -2700,7 +2741,7 @@ xfs_alloc_fix_freelist(
targs.resv = XFS_AG_RESV_AGFL;
/* Allocate as many blocks as possible at once. */
- error = xfs_alloc_ag_vextent_size(&targs);
+ error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
if (error)
goto out_agflbp_relse;
@@ -2710,7 +2751,7 @@ xfs_alloc_fix_freelist(
* on a completely full ag.
*/
if (targs.agbno == NULLAGBLOCK) {
- if (flags & XFS_ALLOC_FLAG_FREEING)
+ if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
break;
goto out_agflbp_relse;
}
@@ -2916,6 +2957,47 @@ xfs_alloc_put_freelist(
}
/*
+ * Check that this AGF/AGI header's sequence number and length matches the AG
+ * number and size in fsblocks.
+ */
+xfs_failaddr_t
+xfs_validate_ag_length(
+ struct xfs_buf *bp,
+ uint32_t seqno,
+ uint32_t length)
+{
+ struct xfs_mount *mp = bp->b_mount;
+ /*
+ * During growfs operations, the perag is not fully initialised,
+ * so we can't use it for any useful checking. growfs ensures we can't
+ * use it by using uncached buffers that don't have the perag attached
+ * so we can detect and avoid this problem.
+ */
+ if (bp->b_pag && seqno != bp->b_pag->pag_agno)
+ return __this_address;
+
+ /*
+ * Only the last AG in the filesystem is allowed to be shorter
+ * than the AG size recorded in the superblock.
+ */
+ if (length != mp->m_sb.sb_agblocks) {
+ /*
+ * During growfs, the new last AG can get here before we
+ * have updated the superblock. Give it a pass on the seqno
+ * check.
+ */
+ if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
+ return __this_address;
+ if (length < XFS_MIN_AG_BLOCKS)
+ return __this_address;
+ if (length > mp->m_sb.sb_agblocks)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
+/*
* Verify the AGF is consistent.
*
* We do not verify the AGFL indexes in the AGF are fully consistent here
@@ -2934,6 +3016,9 @@ xfs_agf_verify(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_agf *agf = bp->b_addr;
+ xfs_failaddr_t fa;
+ uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
+ uint32_t agf_length = be32_to_cpu(agf->agf_length);
if (xfs_has_crc(mp)) {
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
@@ -2945,18 +3030,26 @@ xfs_agf_verify(
if (!xfs_verify_magic(bp, agf->agf_magicnum))
return __this_address;
- if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
- be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
- be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
- be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
- be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
+ if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
return __this_address;
- if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
+ /*
+ * Both agf_seqno and agf_length need to validated before anything else
+ * block number related in the AGF or AGFL can be checked.
+ */
+ fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
+ if (fa)
+ return fa;
+
+ if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
+ return __this_address;
+ if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
+ return __this_address;
+ if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
return __this_address;
if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
- be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
+ be32_to_cpu(agf->agf_freeblks) > agf_length)
return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
@@ -2967,38 +3060,28 @@ xfs_agf_verify(
mp->m_alloc_maxlevels)
return __this_address;
- if (xfs_has_rmapbt(mp) &&
- (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
- be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
- mp->m_rmap_maxlevels))
- return __this_address;
-
- if (xfs_has_rmapbt(mp) &&
- be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
+ if (xfs_has_lazysbcount(mp) &&
+ be32_to_cpu(agf->agf_btreeblks) > agf_length)
return __this_address;
- /*
- * during growfs operations, the perag is not fully initialised,
- * so we can't use it for any useful checking. growfs ensures we can't
- * use it by using uncached buffers that don't have the perag attached
- * so we can detect and avoid this problem.
- */
- if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
- return __this_address;
+ if (xfs_has_rmapbt(mp)) {
+ if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
+ return __this_address;
- if (xfs_has_lazysbcount(mp) &&
- be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
- return __this_address;
+ if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
+ mp->m_rmap_maxlevels)
+ return __this_address;
+ }
- if (xfs_has_reflink(mp) &&
- be32_to_cpu(agf->agf_refcount_blocks) >
- be32_to_cpu(agf->agf_length))
- return __this_address;
+ if (xfs_has_reflink(mp)) {
+ if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
+ return __this_address;
- if (xfs_has_reflink(mp) &&
- (be32_to_cpu(agf->agf_refcount_level) < 1 ||
- be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels))
- return __this_address;
+ if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
+ be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
+ return __this_address;
+ }
return NULL;
}
@@ -3226,7 +3309,7 @@ xfs_alloc_vextent_check_args(
static int
xfs_alloc_vextent_prepare_ag(
struct xfs_alloc_arg *args,
- uint32_t flags)
+ uint32_t alloc_flags)
{
bool need_pag = !args->pag;
int error;
@@ -3235,7 +3318,7 @@ xfs_alloc_vextent_prepare_ag(
args->pag = xfs_perag_get(args->mp, args->agno);
args->agbp = NULL;
- error = xfs_alloc_fix_freelist(args, flags);
+ error = xfs_alloc_fix_freelist(args, alloc_flags);
if (error) {
trace_xfs_alloc_vextent_nofix(args);
if (need_pag)
@@ -3357,6 +3440,7 @@ xfs_alloc_vextent_this_ag(
{
struct xfs_mount *mp = args->mp;
xfs_agnumber_t minimum_agno;
+ uint32_t alloc_flags = 0;
int error;
ASSERT(args->pag != NULL);
@@ -3375,9 +3459,9 @@ xfs_alloc_vextent_this_ag(
return error;
}
- error = xfs_alloc_vextent_prepare_ag(args, 0);
+ error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
if (!error && args->agbp)
- error = xfs_alloc_ag_vextent_size(args);
+ error = xfs_alloc_ag_vextent_size(args, alloc_flags);
return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
}
@@ -3406,20 +3490,20 @@ xfs_alloc_vextent_iterate_ags(
xfs_agnumber_t minimum_agno,
xfs_agnumber_t start_agno,
xfs_agblock_t target_agbno,
- uint32_t flags)
+ uint32_t alloc_flags)
{
struct xfs_mount *mp = args->mp;
xfs_agnumber_t restart_agno = minimum_agno;
xfs_agnumber_t agno;
int error = 0;
- if (flags & XFS_ALLOC_FLAG_TRYLOCK)
+ if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
restart_agno = 0;
restart:
for_each_perag_wrap_range(mp, start_agno, restart_agno,
mp->m_sb.sb_agcount, agno, args->pag) {
args->agno = agno;
- error = xfs_alloc_vextent_prepare_ag(args, flags);
+ error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
if (error)
break;
if (!args->agbp) {
@@ -3433,10 +3517,10 @@ restart:
*/
if (args->agno == start_agno && target_agbno) {
args->agbno = target_agbno;
- error = xfs_alloc_ag_vextent_near(args);
+ error = xfs_alloc_ag_vextent_near(args, alloc_flags);
} else {
args->agbno = 0;
- error = xfs_alloc_ag_vextent_size(args);
+ error = xfs_alloc_ag_vextent_size(args, alloc_flags);
}
break;
}
@@ -3453,8 +3537,8 @@ restart:
* constraining flags by the caller, drop them and retry the allocation
* without any constraints being set.
*/
- if (flags) {
- flags = 0;
+ if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
+ alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
restart_agno = minimum_agno;
goto restart;
}
@@ -3482,6 +3566,7 @@ xfs_alloc_vextent_start_ag(
xfs_agnumber_t start_agno;
xfs_agnumber_t rotorstep = xfs_rotorstep;
bool bump_rotor = false;
+ uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
int error;
ASSERT(args->pag == NULL);
@@ -3508,7 +3593,7 @@ xfs_alloc_vextent_start_ag(
start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
- XFS_FSB_TO_AGBNO(mp, target), XFS_ALLOC_FLAG_TRYLOCK);
+ XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
if (bump_rotor) {
if (args->agno == start_agno)
@@ -3535,6 +3620,7 @@ xfs_alloc_vextent_first_ag(
struct xfs_mount *mp = args->mp;
xfs_agnumber_t minimum_agno;
xfs_agnumber_t start_agno;
+ uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
int error;
ASSERT(args->pag == NULL);
@@ -3553,7 +3639,7 @@ xfs_alloc_vextent_first_ag(
start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
- XFS_FSB_TO_AGBNO(mp, target), 0);
+ XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
}
@@ -3606,6 +3692,7 @@ xfs_alloc_vextent_near_bno(
struct xfs_mount *mp = args->mp;
xfs_agnumber_t minimum_agno;
bool needs_perag = args->pag == NULL;
+ uint32_t alloc_flags = 0;
int error;
if (!needs_perag)
@@ -3626,9 +3713,9 @@ xfs_alloc_vextent_near_bno(
if (needs_perag)
args->pag = xfs_perag_grab(mp, args->agno);
- error = xfs_alloc_vextent_prepare_ag(args, 0);
+ error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
if (!error && args->agbp)
- error = xfs_alloc_ag_vextent_near(args);
+ error = xfs_alloc_ag_vextent_near(args, alloc_flags);
return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
}
@@ -3756,15 +3843,11 @@ xfs_alloc_query_range(
xfs_alloc_query_range_fn fn,
void *priv)
{
- union xfs_btree_irec low_brec;
- union xfs_btree_irec high_brec;
- struct xfs_alloc_query_range_info query;
+ union xfs_btree_irec low_brec = { .a = *low_rec };
+ union xfs_btree_irec high_brec = { .a = *high_rec };
+ struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
- low_brec.a = *low_rec;
- high_brec.a = *high_rec;
- query.priv = priv;
- query.fn = fn;
return xfs_btree_query_range(cur, &low_brec, &high_brec,
xfs_alloc_query_range_helper, &query);
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 85ac470be0da..6bb8d295c321 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -19,11 +19,12 @@ unsigned int xfs_agfl_size(struct xfs_mount *mp);
/*
* Flags for xfs_alloc_fix_freelist.
*/
-#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
-#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
-#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
-#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
-#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
+#define XFS_ALLOC_FLAG_TRYLOCK (1U << 0) /* use trylock for buffer locking */
+#define XFS_ALLOC_FLAG_FREEING (1U << 1) /* indicate caller is freeing extents*/
+#define XFS_ALLOC_FLAG_NORMAP (1U << 2) /* don't modify the rmapbt */
+#define XFS_ALLOC_FLAG_NOSHRINK (1U << 3) /* don't shrink the freelist */
+#define XFS_ALLOC_FLAG_CHECK (1U << 4) /* test only, don't modify args */
+#define XFS_ALLOC_FLAG_TRYFLUSH (1U << 5) /* don't wait in busy extent flush */
/*
* Argument structure for xfs_alloc routines.
@@ -195,7 +196,7 @@ int xfs_alloc_read_agfl(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf **bpp);
int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
struct xfs_buf *, struct xfs_owner_info *);
-int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
+int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, uint32_t alloc_flags);
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, struct xfs_perag *pag,
struct xfs_buf **agbp);
@@ -232,7 +233,7 @@ xfs_buf_to_agfl_bno(
int __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, const struct xfs_owner_info *oinfo,
- bool skip_discard);
+ enum xfs_ag_resv_type type, bool skip_discard);
/*
* List of extents to be free "later".
@@ -245,6 +246,7 @@ struct xfs_extent_free_item {
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
struct xfs_perag *xefi_pag;
unsigned int xefi_flags;
+ enum xfs_ag_resv_type xefi_agresv;
};
void xfs_extent_free_get_group(struct xfs_mount *mp,
@@ -259,9 +261,10 @@ xfs_free_extent_later(
struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
- const struct xfs_owner_info *oinfo)
+ const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type)
{
- return __xfs_free_extent_later(tp, bno, len, oinfo, false);
+ return __xfs_free_extent_later(tp, bno, len, oinfo, type, false);
}
@@ -270,4 +273,7 @@ extern struct kmem_cache *xfs_extfree_item_cache;
int __init xfs_extfree_intent_init_cache(void);
void xfs_extfree_intent_destroy_cache(void);
+xfs_failaddr_t xfs_validate_ag_length(struct xfs_buf *bp, uint32_t seqno,
+ uint32_t length);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index beee51ad75ce..2580ae47209a 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -2293,8 +2293,6 @@ xfs_attr3_leaf_unbalance(
trace_xfs_attr_leaf_unbalance(state->args);
- drop_leaf = drop_blk->bp->b_addr;
- save_leaf = save_blk->bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf);
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf);
entry = xfs_attr3_leaf_entryp(drop_leaf);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index fef35696adb7..30c931b38853 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -574,7 +574,8 @@ xfs_bmap_btree_to_extents(
return error;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
- error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo);
+ error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
+ XFS_AG_RESV_NONE);
if (error)
return error;
@@ -5236,8 +5237,9 @@ xfs_bmap_del_extent_real(
} else {
error = __xfs_free_extent_later(tp, del->br_startblock,
del->br_blockcount, NULL,
- (bflags & XFS_BMAPI_NODISCARD) ||
- del->br_state == XFS_EXT_UNWRITTEN);
+ XFS_AG_RESV_NONE,
+ ((bflags & XFS_BMAPI_NODISCARD) ||
+ del->br_state == XFS_EXT_UNWRITTEN));
if (error)
goto done;
}
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 36564ae3084f..bf3f1b36fdd2 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -271,7 +271,8 @@ xfs_bmbt_free_block(
int error;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
- error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo);
+ error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo,
+ XFS_AG_RESV_NONE);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 34600f94c2f4..b83e54c70906 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1853,8 +1853,8 @@ xfs_difree_inode_chunk(
/* not sparse, calculate extent info directly */
return xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, agno, sagbno),
- M_IGEO(mp)->ialloc_blks,
- &XFS_RMAP_OINFO_INODES);
+ M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
+ XFS_AG_RESV_NONE);
}
/* holemask is only 16-bits (fits in an unsigned long) */
@@ -1899,8 +1899,8 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
error = xfs_free_extent_later(tp,
- XFS_AGB_TO_FSB(mp, agno, agbno),
- contigblk, &XFS_RMAP_OINFO_INODES);
+ XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
+ &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE);
if (error)
return error;
@@ -2486,11 +2486,14 @@ xfs_ialloc_log_agi(
static xfs_failaddr_t
xfs_agi_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_mount;
- struct xfs_agi *agi = bp->b_addr;
- int i;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_agi *agi = bp->b_addr;
+ xfs_failaddr_t fa;
+ uint32_t agi_seqno = be32_to_cpu(agi->agi_seqno);
+ uint32_t agi_length = be32_to_cpu(agi->agi_length);
+ int i;
if (xfs_has_crc(mp)) {
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
@@ -2507,6 +2510,10 @@ xfs_agi_verify(
if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
return __this_address;
+ fa = xfs_validate_ag_length(bp, agi_seqno, agi_length);
+ if (fa)
+ return fa;
+
if (be32_to_cpu(agi->agi_level) < 1 ||
be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
return __this_address;
@@ -2516,15 +2523,6 @@ xfs_agi_verify(
be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
return __this_address;
- /*
- * during growfs operations, the perag is not fully initialised,
- * so we can't use it for any useful checking. growfs ensures we can't
- * use it by using uncached buffers that don't have the perag attached
- * so we can detect and avoid this problem.
- */
- if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
- return __this_address;
-
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
continue;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 5a945ae21b5d..9258f01c0015 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -160,8 +160,7 @@ __xfs_inobt_free_block(
xfs_inobt_mod_blockcount(cur, -1);
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
- return xfs_free_extent(cur->bc_tp, cur->bc_ag.pag,
- XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1,
+ return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
&XFS_RMAP_OINFO_INOBT, resv);
}
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index b6e21433925c..646b3fa362ad 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1152,7 +1152,8 @@ xfs_refcount_adjust_extents(
cur->bc_ag.pag->pag_agno,
tmp.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
- tmp.rc_blockcount, NULL);
+ tmp.rc_blockcount, NULL,
+ XFS_AG_RESV_NONE);
if (error)
goto out_error;
}
@@ -1213,7 +1214,8 @@ xfs_refcount_adjust_extents(
cur->bc_ag.pag->pag_agno,
ext.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
- ext.rc_blockcount, NULL);
+ ext.rc_blockcount, NULL,
+ XFS_AG_RESV_NONE);
if (error)
goto out_error;
}
@@ -1919,8 +1921,13 @@ xfs_refcount_recover_cow_leftovers(
struct xfs_buf *agbp;
struct xfs_refcount_recovery *rr, *n;
struct list_head debris;
- union xfs_btree_irec low;
- union xfs_btree_irec high;
+ union xfs_btree_irec low = {
+ .rc.rc_domain = XFS_REFC_DOMAIN_COW,
+ };
+ union xfs_btree_irec high = {
+ .rc.rc_domain = XFS_REFC_DOMAIN_COW,
+ .rc.rc_startblock = -1U,
+ };
xfs_fsblock_t fsb;
int error;
@@ -1951,10 +1958,6 @@ xfs_refcount_recover_cow_leftovers(
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
/* Find all the leftover CoW staging extents. */
- memset(&low, 0, sizeof(low));
- memset(&high, 0, sizeof(high));
- low.rc.rc_domain = high.rc.rc_domain = XFS_REFC_DOMAIN_COW;
- high.rc.rc_startblock = -1U;
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
xfs_btree_del_cursor(cur, error);
@@ -1981,7 +1984,8 @@ xfs_refcount_recover_cow_leftovers(
/* Free the block. */
error = xfs_free_extent_later(tp, fsb,
- rr->rr_rrec.rc_blockcount, NULL);
+ rr->rr_rrec.rc_blockcount, NULL,
+ XFS_AG_RESV_NONE);
if (error)
goto out_trans;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index d4afc5f4e6a5..5c3987d8dc24 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -106,19 +106,13 @@ xfs_refcountbt_free_block(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
- int error;
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
- error = xfs_free_extent(cur->bc_tp, cur->bc_ag.pag,
- XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1,
+ return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
&XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA);
- if (error)
- return error;
-
- return error;
}
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index f4dc23b3b837..fbb0b2637463 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2389,14 +2389,10 @@ xfs_rmap_query_range(
xfs_rmap_query_range_fn fn,
void *priv)
{
- union xfs_btree_irec low_brec;
- union xfs_btree_irec high_brec;
- struct xfs_rmap_query_range_info query;
+ union xfs_btree_irec low_brec = { .r = *low_rec };
+ union xfs_btree_irec high_brec = { .r = *high_rec };
+ struct xfs_rmap_query_range_info query = { .priv = priv, .fn = fn };
- low_brec.r = *low_rec;
- high_brec.r = *high_rec;
- query.priv = priv;
- query.fn = fn;
return xfs_btree_query_range(cur, &low_brec, &high_brec,
xfs_rmap_query_range_helper, &query);
}
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index ba0f17bc1dc0..5e174685a77c 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -412,7 +412,6 @@ xfs_validate_sb_common(
sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
- sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE ||
sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
XFS_FSB_TO_B(mp, sbp->sb_agblocks) < XFS_MIN_AG_BYTES ||
XFS_FSB_TO_B(mp, sbp->sb_agblocks) > XFS_MAX_AG_BYTES ||
@@ -430,6 +429,61 @@ xfs_validate_sb_common(
return -EFSCORRUPTED;
}
+ /*
+ * Logs that are too large are not supported at all. Reject them
+ * outright. Logs that are too small are tolerated on v4 filesystems,
+ * but we can only check that when mounting the log. Hence we skip
+ * those checks here.
+ */
+ if (sbp->sb_logblocks > XFS_MAX_LOG_BLOCKS) {
+ xfs_notice(mp,
+ "Log size 0x%x blocks too large, maximum size is 0x%llx blocks",
+ sbp->sb_logblocks, XFS_MAX_LOG_BLOCKS);
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_FSB_TO_B(mp, sbp->sb_logblocks) > XFS_MAX_LOG_BYTES) {
+ xfs_warn(mp,
+ "log size 0x%llx bytes too large, maximum size is 0x%llx bytes",
+ XFS_FSB_TO_B(mp, sbp->sb_logblocks),
+ XFS_MAX_LOG_BYTES);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Do not allow filesystems with corrupted log sector or stripe units to
+ * be mounted. We cannot safely size the iclogs or write to the log if
+ * the log stripe unit is not valid.
+ */
+ if (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT) {
+ if (sbp->sb_logsectsize != (1U << sbp->sb_logsectlog)) {
+ xfs_notice(mp,
+ "log sector size in bytes/log2 (0x%x/0x%x) must match",
+ sbp->sb_logsectsize, 1U << sbp->sb_logsectlog);
+ return -EFSCORRUPTED;
+ }
+ } else if (sbp->sb_logsectsize || sbp->sb_logsectlog) {
+ xfs_notice(mp,
+ "log sector size in bytes/log2 (0x%x/0x%x) are not zero",
+ sbp->sb_logsectsize, sbp->sb_logsectlog);
+ return -EFSCORRUPTED;
+ }
+
+ if (sbp->sb_logsunit > 1) {
+ if (sbp->sb_logsunit % sbp->sb_blocksize) {
+ xfs_notice(mp,
+ "log stripe unit 0x%x bytes must be a multiple of block size",
+ sbp->sb_logsunit);
+ return -EFSCORRUPTED;
+ }
+ if (sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE) {
+ xfs_notice(mp,
+ "log stripe unit 0x%x bytes over maximum size (0x%x bytes)",
+ sbp->sb_logsunit, XLOG_MAX_RECORD_BSIZE);
+ return -EFSCORRUPTED;
+ }
+ }
+
/* Validate the realtime geometry; stolen from xfs_repair */
if (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE ||
sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) {
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index f3d328e4a440..7c2fdc71e42d 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -566,20 +566,45 @@ xfs_extent_busy_clear(
/*
* Flush out all busy extents for this AG.
+ *
+ * If the current transaction is holding busy extents, the caller may not want
+ * to wait for committed busy extents to resolve. If we are being told just to
+ * try a flush or progress has been made since we last skipped a busy extent,
+ * return immediately to allow the caller to try again.
+ *
+ * If we are freeing extents, we might actually be holding the only free extents
+ * in the transaction busy list and the log force won't resolve that situation.
+ * In this case, we must return -EAGAIN to avoid a deadlock by informing the
+ * caller it needs to commit the busy extents it holds before retrying the
+ * extent free operation.
*/
-void
+int
xfs_extent_busy_flush(
- struct xfs_mount *mp,
+ struct xfs_trans *tp,
struct xfs_perag *pag,
- unsigned busy_gen)
+ unsigned busy_gen,
+ uint32_t alloc_flags)
{
DEFINE_WAIT (wait);
int error;
- error = xfs_log_force(mp, XFS_LOG_SYNC);
+ error = xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
if (error)
- return;
+ return error;
+
+ /* Avoid deadlocks on uncommitted busy extents. */
+ if (!list_empty(&tp->t_busy)) {
+ if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
+ return 0;
+
+ if (busy_gen != READ_ONCE(pag->pagb_gen))
+ return 0;
+
+ if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
+ return -EAGAIN;
+ }
+ /* Wait for committed busy extents to resolve. */
do {
prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
if (busy_gen != READ_ONCE(pag->pagb_gen))
@@ -588,6 +613,7 @@ xfs_extent_busy_flush(
} while (1);
finish_wait(&pag->pagb_wait, &wait);
+ return 0;
}
void
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 4a118131059f..c37bf87e6781 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -51,9 +51,9 @@ bool
xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t *bno,
xfs_extlen_t *len, unsigned *busy_gen);
-void
-xfs_extent_busy_flush(struct xfs_mount *mp, struct xfs_perag *pag,
- unsigned busy_gen);
+int
+xfs_extent_busy_flush(struct xfs_trans *tp, struct xfs_perag *pag,
+ unsigned busy_gen, uint32_t alloc_flags);
void
xfs_extent_busy_wait_all(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index f9e36b810663..f1a5ecf099aa 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -337,6 +337,34 @@ xfs_trans_get_efd(
}
/*
+ * Fill the EFD with all extents from the EFI when we need to roll the
+ * transaction and continue with a new EFI.
+ *
+ * This simply copies all the extents in the EFI to the EFD rather than make
+ * assumptions about which extents in the EFI have already been processed. We
+ * currently keep the xefi list in the same order as the EFI extent list, but
+ * that may not always be the case. Copying everything avoids leaving a landmine
+ * were we fail to cancel all the extents in an EFI if the xefi list is
+ * processed in a different order to the extents in the EFI.
+ */
+static void
+xfs_efd_from_efi(
+ struct xfs_efd_log_item *efdp)
+{
+ struct xfs_efi_log_item *efip = efdp->efd_efip;
+ uint i;
+
+ ASSERT(efip->efi_format.efi_nextents > 0);
+ ASSERT(efdp->efd_next_extent < efip->efi_format.efi_nextents);
+
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ efdp->efd_format.efd_extents[i] =
+ efip->efi_format.efi_extents[i];
+ }
+ efdp->efd_next_extent = efip->efi_format.efi_nextents;
+}
+
+/*
* Free an extent and log it to the EFD. Note that the transaction is marked
* dirty regardless of whether the extent free succeeds or fails to support the
* EFI/EFD lifecycle rules.
@@ -365,7 +393,7 @@ xfs_trans_free_extent(
agbno, xefi->xefi_blockcount);
error = __xfs_free_extent(tp, xefi->xefi_pag, agbno,
- xefi->xefi_blockcount, &oinfo, XFS_AG_RESV_NONE,
+ xefi->xefi_blockcount, &oinfo, xefi->xefi_agresv,
xefi->xefi_flags & XFS_EFI_SKIP_DISCARD);
/*
@@ -378,6 +406,17 @@ xfs_trans_free_extent(
tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
set_bit(XFS_LI_DIRTY, &efdp->efd_item.li_flags);
+ /*
+ * If we need a new transaction to make progress, the caller will log a
+ * new EFI with the current contents. It will also log an EFD to cancel
+ * the existing EFI, and so we need to copy all the unprocessed extents
+ * in this EFI to the EFD so this works correctly.
+ */
+ if (error == -EAGAIN) {
+ xfs_efd_from_efi(efdp);
+ return error;
+ }
+
next_extent = efdp->efd_next_extent;
ASSERT(next_extent < efdp->efd_format.efd_nextents);
extp = &(efdp->efd_format.efd_extents[next_extent]);
@@ -495,6 +534,13 @@ xfs_extent_free_finish_item(
error = xfs_trans_free_extent(tp, EFD_ITEM(done), xefi);
+ /*
+ * Don't free the XEFI if we need a new transaction to complete
+ * processing of it.
+ */
+ if (error == -EAGAIN)
+ return error;
+
xfs_extent_free_put_group(xefi);
kmem_cache_free(xfs_extfree_item_cache, xefi);
return error;
@@ -620,6 +666,7 @@ xfs_efi_item_recover(
struct xfs_trans *tp;
int i;
int error = 0;
+ bool requeue_only = false;
/*
* First check the validity of the extents described by the
@@ -644,6 +691,7 @@ xfs_efi_item_recover(
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
struct xfs_extent_free_item fake = {
.xefi_owner = XFS_RMAP_OWN_UNKNOWN,
+ .xefi_agresv = XFS_AG_RESV_NONE,
};
struct xfs_extent *extp;
@@ -652,9 +700,28 @@ xfs_efi_item_recover(
fake.xefi_startblock = extp->ext_start;
fake.xefi_blockcount = extp->ext_len;
- xfs_extent_free_get_group(mp, &fake);
- error = xfs_trans_free_extent(tp, efdp, &fake);
- xfs_extent_free_put_group(&fake);
+ if (!requeue_only) {
+ xfs_extent_free_get_group(mp, &fake);
+ error = xfs_trans_free_extent(tp, efdp, &fake);
+ xfs_extent_free_put_group(&fake);
+ }
+
+ /*
+ * If we can't free the extent without potentially deadlocking,
+ * requeue the rest of the extents to a new so that they get
+ * run again later with a new transaction context.
+ */
+ if (error == -EAGAIN || requeue_only) {
+ error = xfs_free_extent_later(tp, fake.xefi_startblock,
+ fake.xefi_blockcount,
+ &XFS_RMAP_OINFO_ANY_OWNER,
+ fake.xefi_agresv);
+ if (!error) {
+ requeue_only = true;
+ continue;
+ }
+ }
+
if (error == -EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
extp, sizeof(*extp));
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 59e7d1a14b67..10403ba9b58f 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -160,9 +160,18 @@ struct xfs_getfsmap_info {
struct xfs_buf *agf_bp; /* AGF, for refcount queries */
struct xfs_perag *pag; /* AG info, if applicable */
xfs_daddr_t next_daddr; /* next daddr we expect */
+ /* daddr of low fsmap key when we're using the rtbitmap */
+ xfs_daddr_t low_daddr;
u64 missing_owner; /* owner of holes */
u32 dev; /* device id */
- struct xfs_rmap_irec low; /* low rmap key */
+ /*
+ * Low rmap key for the query. If low.rm_blockcount is nonzero, this
+ * is the second (or later) call to retrieve the recordset in pieces.
+ * xfs_getfsmap_rec_before_start will compare all records retrieved
+ * by the rmapbt query to filter out any records that start before
+ * the last record.
+ */
+ struct xfs_rmap_irec low;
struct xfs_rmap_irec high; /* high rmap key */
bool last; /* last extent? */
};
@@ -237,16 +246,31 @@ xfs_getfsmap_format(
xfs_fsmap_from_internal(rec, xfm);
}
+static inline bool
+xfs_getfsmap_rec_before_start(
+ struct xfs_getfsmap_info *info,
+ const struct xfs_rmap_irec *rec,
+ xfs_daddr_t rec_daddr)
+{
+ if (info->low_daddr != -1ULL)
+ return rec_daddr < info->low_daddr;
+ if (info->low.rm_blockcount)
+ return xfs_rmap_compare(rec, &info->low) < 0;
+ return false;
+}
+
/*
* Format a reverse mapping for getfsmap, having translated rm_startblock
- * into the appropriate daddr units.
+ * into the appropriate daddr units. Pass in a nonzero @len_daddr if the
+ * length could be larger than rm_blockcount in struct xfs_rmap_irec.
*/
STATIC int
xfs_getfsmap_helper(
struct xfs_trans *tp,
struct xfs_getfsmap_info *info,
const struct xfs_rmap_irec *rec,
- xfs_daddr_t rec_daddr)
+ xfs_daddr_t rec_daddr,
+ xfs_daddr_t len_daddr)
{
struct xfs_fsmap fmr;
struct xfs_mount *mp = tp->t_mountp;
@@ -256,12 +280,15 @@ xfs_getfsmap_helper(
if (fatal_signal_pending(current))
return -EINTR;
+ if (len_daddr == 0)
+ len_daddr = XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+
/*
* Filter out records that start before our startpoint, if the
* caller requested that.
*/
- if (xfs_rmap_compare(rec, &info->low) < 0) {
- rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (xfs_getfsmap_rec_before_start(info, rec, rec_daddr)) {
+ rec_daddr += len_daddr;
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
return 0;
@@ -280,7 +307,7 @@ xfs_getfsmap_helper(
info->head->fmh_entries++;
- rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ rec_daddr += len_daddr;
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
return 0;
@@ -320,7 +347,7 @@ xfs_getfsmap_helper(
if (error)
return error;
fmr.fmr_offset = XFS_FSB_TO_BB(mp, rec->rm_offset);
- fmr.fmr_length = XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ fmr.fmr_length = len_daddr;
if (rec->rm_flags & XFS_RMAP_UNWRITTEN)
fmr.fmr_flags |= FMR_OF_PREALLOC;
if (rec->rm_flags & XFS_RMAP_ATTR_FORK)
@@ -337,7 +364,7 @@ xfs_getfsmap_helper(
xfs_getfsmap_format(mp, &fmr, info);
out:
- rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ rec_daddr += len_daddr;
if (info->next_daddr < rec_daddr)
info->next_daddr = rec_daddr;
return 0;
@@ -358,7 +385,7 @@ xfs_getfsmap_datadev_helper(
fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno, rec->rm_startblock);
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
- return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
+ return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr, 0);
}
/* Transform a bnobt irec into a fsmap */
@@ -382,7 +409,7 @@ xfs_getfsmap_datadev_bnobt_helper(
irec.rm_offset = 0;
irec.rm_flags = 0;
- return xfs_getfsmap_helper(cur->bc_tp, info, &irec, rec_daddr);
+ return xfs_getfsmap_helper(cur->bc_tp, info, &irec, rec_daddr, 0);
}
/* Set rmap flags based on the getfsmap flags */
@@ -409,31 +436,25 @@ xfs_getfsmap_logdev(
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_rmap_irec rmap;
- int error;
+ xfs_daddr_t rec_daddr, len_daddr;
+ xfs_fsblock_t start_fsb, end_fsb;
+ uint64_t eofs;
- /* Set up search keys */
- info->low.rm_startblock = XFS_BB_TO_FSBT(mp, keys[0].fmr_physical);
- info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
- error = xfs_fsmap_owner_to_rmap(&info->low, keys);
- if (error)
- return error;
- info->low.rm_blockcount = 0;
- xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ start_fsb = XFS_BB_TO_FSBT(mp,
+ keys[0].fmr_physical + keys[0].fmr_length);
+ end_fsb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
- error = xfs_fsmap_owner_to_rmap(&info->high, keys + 1);
- if (error)
- return error;
- info->high.rm_startblock = -1U;
- info->high.rm_owner = ULLONG_MAX;
- info->high.rm_offset = ULLONG_MAX;
- info->high.rm_blockcount = 0;
- info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
- info->missing_owner = XFS_FMR_OWN_FREE;
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (keys[0].fmr_length > 0)
+ info->low_daddr = XFS_FSB_TO_BB(mp, start_fsb);
- trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
+ trace_xfs_fsmap_low_key_linear(mp, info->dev, start_fsb);
+ trace_xfs_fsmap_high_key_linear(mp, info->dev, end_fsb);
- if (keys[0].fmr_physical > 0)
+ if (start_fsb > 0)
return 0;
/* Fabricate an rmap entry for the external log device. */
@@ -443,7 +464,9 @@ xfs_getfsmap_logdev(
rmap.rm_offset = 0;
rmap.rm_flags = 0;
- return xfs_getfsmap_helper(tp, info, &rmap, 0);
+ rec_daddr = XFS_FSB_TO_BB(mp, rmap.rm_startblock);
+ len_daddr = XFS_FSB_TO_BB(mp, rmap.rm_blockcount);
+ return xfs_getfsmap_helper(tp, info, &rmap, rec_daddr, len_daddr);
}
#ifdef CONFIG_XFS_RT
@@ -457,72 +480,58 @@ xfs_getfsmap_rtdev_rtbitmap_helper(
{
struct xfs_getfsmap_info *info = priv;
struct xfs_rmap_irec irec;
- xfs_daddr_t rec_daddr;
+ xfs_rtblock_t rtbno;
+ xfs_daddr_t rec_daddr, len_daddr;
+
+ rtbno = rec->ar_startext * mp->m_sb.sb_rextsize;
+ rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
+ irec.rm_startblock = rtbno;
+
+ rtbno = rec->ar_extcount * mp->m_sb.sb_rextsize;
+ len_daddr = XFS_FSB_TO_BB(mp, rtbno);
+ irec.rm_blockcount = rtbno;
- irec.rm_startblock = rec->ar_startext * mp->m_sb.sb_rextsize;
- rec_daddr = XFS_FSB_TO_BB(mp, irec.rm_startblock);
- irec.rm_blockcount = rec->ar_extcount * mp->m_sb.sb_rextsize;
irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
irec.rm_offset = 0;
irec.rm_flags = 0;
- return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+ return xfs_getfsmap_helper(tp, info, &irec, rec_daddr, len_daddr);
}
-/* Execute a getfsmap query against the realtime device. */
+/* Execute a getfsmap query against the realtime device rtbitmap. */
STATIC int
-__xfs_getfsmap_rtdev(
+xfs_getfsmap_rtdev_rtbitmap(
struct xfs_trans *tp,
const struct xfs_fsmap *keys,
- int (*query_fn)(struct xfs_trans *,
- struct xfs_getfsmap_info *),
struct xfs_getfsmap_info *info)
{
+
+ struct xfs_rtalloc_rec alow = { 0 };
+ struct xfs_rtalloc_rec ahigh = { 0 };
struct xfs_mount *mp = tp->t_mountp;
- xfs_fsblock_t start_fsb;
- xfs_fsblock_t end_fsb;
+ xfs_rtblock_t start_rtb;
+ xfs_rtblock_t end_rtb;
uint64_t eofs;
- int error = 0;
+ int error;
- eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rextents * mp->m_sb.sb_rextsize);
if (keys[0].fmr_physical >= eofs)
return 0;
- start_fsb = XFS_BB_TO_FSBT(mp, keys[0].fmr_physical);
- end_fsb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
+ start_rtb = XFS_BB_TO_FSBT(mp,
+ keys[0].fmr_physical + keys[0].fmr_length);
+ end_rtb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
- /* Set up search keys */
- info->low.rm_startblock = start_fsb;
- error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
- if (error)
- return error;
- info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
- info->low.rm_blockcount = 0;
- xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
-
- info->high.rm_startblock = end_fsb;
- error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
- if (error)
- return error;
- info->high.rm_offset = XFS_BB_TO_FSBT(mp, keys[1].fmr_offset);
- info->high.rm_blockcount = 0;
- xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
-
- trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
+ info->missing_owner = XFS_FMR_OWN_UNKNOWN;
- return query_fn(tp, info);
-}
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (keys[0].fmr_length > 0) {
+ info->low_daddr = XFS_FSB_TO_BB(mp, start_rtb);
+ if (info->low_daddr >= eofs)
+ return 0;
+ }
-/* Actually query the realtime bitmap. */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap_query(
- struct xfs_trans *tp,
- struct xfs_getfsmap_info *info)
-{
- struct xfs_rtalloc_rec alow = { 0 };
- struct xfs_rtalloc_rec ahigh = { 0 };
- struct xfs_mount *mp = tp->t_mountp;
- int error;
+ trace_xfs_fsmap_low_key_linear(mp, info->dev, start_rtb);
+ trace_xfs_fsmap_high_key_linear(mp, info->dev, end_rtb);
xfs_ilock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
@@ -530,8 +539,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
* Set up query parameters to return free rtextents covering the range
* we want.
*/
- alow.ar_startext = info->low.rm_startblock;
- ahigh.ar_startext = info->high.rm_startblock;
+ alow.ar_startext = start_rtb;
+ ahigh.ar_startext = end_rtb;
do_div(alow.ar_startext, mp->m_sb.sb_rextsize);
if (do_div(ahigh.ar_startext, mp->m_sb.sb_rextsize))
ahigh.ar_startext++;
@@ -554,18 +563,6 @@ err:
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
return error;
}
-
-/* Execute a getfsmap query against the realtime device rtbitmap. */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap(
- struct xfs_trans *tp,
- const struct xfs_fsmap *keys,
- struct xfs_getfsmap_info *info)
-{
- info->missing_owner = XFS_FMR_OWN_UNKNOWN;
- return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
- info);
-}
#endif /* CONFIG_XFS_RT */
/* Execute a getfsmap query against the regular data device. */
@@ -606,9 +603,27 @@ __xfs_getfsmap_datadev(
error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
if (error)
return error;
- info->low.rm_blockcount = 0;
+ info->low.rm_blockcount = XFS_BB_TO_FSBT(mp, keys[0].fmr_length);
xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (info->low.rm_blockcount == 0) {
+ /* empty */
+ } else if (XFS_RMAP_NON_INODE_OWNER(info->low.rm_owner) ||
+ (info->low.rm_flags & (XFS_RMAP_ATTR_FORK |
+ XFS_RMAP_BMBT_BLOCK |
+ XFS_RMAP_UNWRITTEN))) {
+ info->low.rm_startblock += info->low.rm_blockcount;
+ info->low.rm_owner = 0;
+ info->low.rm_offset = 0;
+
+ start_fsb += info->low.rm_blockcount;
+ if (XFS_FSB_TO_DADDR(mp, start_fsb) >= eofs)
+ return 0;
+ } else {
+ info->low.rm_offset += info->low.rm_blockcount;
+ }
+
info->high.rm_startblock = -1U;
info->high.rm_owner = ULLONG_MAX;
info->high.rm_offset = ULLONG_MAX;
@@ -659,12 +674,8 @@ __xfs_getfsmap_datadev(
* Set the AG low key to the start of the AG prior to
* moving on to the next AG.
*/
- if (pag->pag_agno == start_ag) {
- info->low.rm_startblock = 0;
- info->low.rm_owner = 0;
- info->low.rm_offset = 0;
- info->low.rm_flags = 0;
- }
+ if (pag->pag_agno == start_ag)
+ memset(&info->low, 0, sizeof(info->low));
/*
* If this is the last AG, report any gap at the end of it
@@ -791,6 +802,19 @@ xfs_getfsmap_check_keys(
struct xfs_fsmap *low_key,
struct xfs_fsmap *high_key)
{
+ if (low_key->fmr_flags & (FMR_OF_SPECIAL_OWNER | FMR_OF_EXTENT_MAP)) {
+ if (low_key->fmr_offset)
+ return false;
+ }
+ if (high_key->fmr_flags != -1U &&
+ (high_key->fmr_flags & (FMR_OF_SPECIAL_OWNER |
+ FMR_OF_EXTENT_MAP))) {
+ if (high_key->fmr_offset && high_key->fmr_offset != -1ULL)
+ return false;
+ }
+ if (high_key->fmr_length && high_key->fmr_length != -1ULL)
+ return false;
+
if (low_key->fmr_device > high_key->fmr_device)
return false;
if (low_key->fmr_device < high_key->fmr_device)
@@ -834,15 +858,15 @@ xfs_getfsmap_check_keys(
* ----------------
* There are multiple levels of keys and counters at work here:
* xfs_fsmap_head.fmh_keys -- low and high fsmap keys passed in;
- * these reflect fs-wide sector addrs.
+ * these reflect fs-wide sector addrs.
* dkeys -- fmh_keys used to query each device;
- * these are fmh_keys but w/ the low key
- * bumped up by fmr_length.
+ * these are fmh_keys but w/ the low key
+ * bumped up by fmr_length.
* xfs_getfsmap_info.next_daddr -- next disk addr we expect to see; this
* is how we detect gaps in the fsmap
records and report them.
* xfs_getfsmap_info.low/high -- per-AG low/high keys computed from
- * dkeys; used to query the metadata.
+ * dkeys; used to query the metadata.
*/
int
xfs_getfsmap(
@@ -863,6 +887,8 @@ xfs_getfsmap(
if (!xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[0]) ||
!xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
return -EINVAL;
+ if (!xfs_getfsmap_check_keys(&head->fmh_keys[0], &head->fmh_keys[1]))
+ return -EINVAL;
use_rmap = xfs_has_rmapbt(mp) &&
has_capability_noaudit(current, CAP_SYS_ADMIN);
@@ -901,26 +927,15 @@ xfs_getfsmap(
* blocks could be mapped to several other files/offsets.
* According to rmapbt record ordering, the minimal next
* possible record for the block range is the next starting
- * offset in the same inode. Therefore, bump the file offset to
- * continue the search appropriately. For all other low key
- * mapping types (attr blocks, metadata), bump the physical
- * offset as there can be no other mapping for the same physical
- * block range.
+ * offset in the same inode. Therefore, each fsmap backend bumps
+ * the file offset to continue the search appropriately. For
+ * all other low key mapping types (attr blocks, metadata), each
+ * fsmap backend bumps the physical offset as there can be no
+ * other mapping for the same physical block range.
*/
dkeys[0] = head->fmh_keys[0];
- if (dkeys[0].fmr_flags & (FMR_OF_SPECIAL_OWNER | FMR_OF_EXTENT_MAP)) {
- dkeys[0].fmr_physical += dkeys[0].fmr_length;
- dkeys[0].fmr_owner = 0;
- if (dkeys[0].fmr_offset)
- return -EINVAL;
- } else
- dkeys[0].fmr_offset += dkeys[0].fmr_length;
- dkeys[0].fmr_length = 0;
memset(&dkeys[1], 0xFF, sizeof(struct xfs_fsmap));
- if (!xfs_getfsmap_check_keys(dkeys, &head->fmh_keys[1]))
- return -EINVAL;
-
info.next_daddr = head->fmh_keys[0].fmr_physical +
head->fmh_keys[0].fmr_length;
info.fsmap_recs = fsmap_recs;
@@ -960,6 +975,8 @@ xfs_getfsmap(
info.dev = handlers[i].dev;
info.last = false;
info.pag = NULL;
+ info.low_daddr = -1ULL;
+ info.low.rm_blockcount = 0;
error = handlers[i].fn(tp, dkeys, &info);
if (error)
break;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index fc61cc024023..79004d193e54 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -639,7 +639,6 @@ xfs_log_mount(
int num_bblks)
{
struct xlog *log;
- bool fatal = xfs_has_crc(mp);
int error = 0;
int min_logfsbs;
@@ -663,53 +662,37 @@ xfs_log_mount(
mp->m_log = log;
/*
- * Validate the given log space and drop a critical message via syslog
- * if the log size is too small that would lead to some unexpected
- * situations in transaction log space reservation stage.
+ * Now that we have set up the log and it's internal geometry
+ * parameters, we can validate the given log space and drop a critical
+ * message via syslog if the log size is too small. A log that is too
+ * small can lead to unexpected situations in transaction log space
+ * reservation stage. The superblock verifier has already validated all
+ * the other log geometry constraints, so we don't have to check those
+ * here.
*
- * Note: we can't just reject the mount if the validation fails. This
- * would mean that people would have to downgrade their kernel just to
- * remedy the situation as there is no way to grow the log (short of
- * black magic surgery with xfs_db).
+ * Note: For v4 filesystems, we can't just reject the mount if the
+ * validation fails. This would mean that people would have to
+ * downgrade their kernel just to remedy the situation as there is no
+ * way to grow the log (short of black magic surgery with xfs_db).
*
- * We can, however, reject mounts for CRC format filesystems, as the
+ * We can, however, reject mounts for V5 format filesystems, as the
* mkfs binary being used to make the filesystem should never create a
* filesystem with a log that is too small.
*/
min_logfsbs = xfs_log_calc_minimum_size(mp);
-
if (mp->m_sb.sb_logblocks < min_logfsbs) {
xfs_warn(mp,
"Log size %d blocks too small, minimum size is %d blocks",
mp->m_sb.sb_logblocks, min_logfsbs);
- error = -EINVAL;
- } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
- xfs_warn(mp,
- "Log size %d blocks too large, maximum size is %lld blocks",
- mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
- error = -EINVAL;
- } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
- xfs_warn(mp,
- "log size %lld bytes too large, maximum size is %lld bytes",
- XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
- XFS_MAX_LOG_BYTES);
- error = -EINVAL;
- } else if (mp->m_sb.sb_logsunit > 1 &&
- mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
- xfs_warn(mp,
- "log stripe unit %u bytes must be a multiple of block size",
- mp->m_sb.sb_logsunit);
- error = -EINVAL;
- fatal = true;
- }
- if (error) {
+
/*
* Log check errors are always fatal on v5; or whenever bad
* metadata leads to a crash.
*/
- if (fatal) {
+ if (xfs_has_crc(mp)) {
xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
ASSERT(0);
+ error = -EINVAL;
goto out_free_log;
}
xfs_crit(mp, "Log size out of supported range.");
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index c4078d0ec108..4a9bbd3fe120 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -114,7 +114,8 @@ xfs_dax_notify_ddev_failure(
int error = 0;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr);
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
- xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp, daddr + bblen);
+ xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp,
+ daddr + bblen - 1);
xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno);
error = xfs_trans_alloc_empty(mp, &tp);
@@ -210,7 +211,7 @@ xfs_dax_notify_failure(
ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
/* Ignore the range out of filesystem area */
- if (offset + len < ddev_start)
+ if (offset + len - 1 < ddev_start)
return -ENXIO;
if (offset > ddev_end)
return -ENXIO;
@@ -222,8 +223,8 @@ xfs_dax_notify_failure(
len -= ddev_start - offset;
offset = 0;
}
- if (offset + len > ddev_end)
- len -= ddev_end - offset;
+ if (offset + len - 1 > ddev_end)
+ len = ddev_end - offset + 1;
return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
mf_flags);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index abcc559f3c64..eb9102453aff 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -617,7 +617,8 @@ xfs_reflink_cancel_cow_blocks(
del.br_blockcount);
error = xfs_free_extent_later(*tpp, del.br_startblock,
- del.br_blockcount, NULL);
+ del.br_blockcount, NULL,
+ XFS_AG_RESV_NONE);
if (error)
break;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 4db669203149..f3cc204bb4bf 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3623,6 +3623,31 @@ DEFINE_FSMAP_EVENT(xfs_fsmap_low_key);
DEFINE_FSMAP_EVENT(xfs_fsmap_high_key);
DEFINE_FSMAP_EVENT(xfs_fsmap_mapping);
+DECLARE_EVENT_CLASS(xfs_fsmap_linear_class,
+ TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno),
+ TP_ARGS(mp, keydev, bno),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(xfs_fsblock_t, bno)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->keydev = new_decode_dev(keydev);
+ __entry->bno = bno;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d bno 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->bno)
+)
+#define DEFINE_FSMAP_LINEAR_EVENT(name) \
+DEFINE_EVENT(xfs_fsmap_linear_class, name, \
+ TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno), \
+ TP_ARGS(mp, keydev, bno))
+DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_low_key_linear);
+DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_high_key_linear);
+
DECLARE_EVENT_CLASS(xfs_getfsmap_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap),
TP_ARGS(mp, fsmap),
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 7d4109af193e..1098452e7f95 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -823,7 +823,7 @@ xfs_trans_ail_update_bulk(
trace_xfs_ail_insert(lip, 0, lsn);
}
lip->li_lsn = lsn;
- list_add(&lip->li_ail, &tmp);
+ list_add_tail(&lip->li_ail, &tmp);
}
if (!list_empty(&tmp))
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 8372b0e7fd15..b14d165632e7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -27,6 +27,8 @@
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
#define ACPI_ECDT_HID "LNXEC"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
/* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 5e4b1f2369d2..570cd4da7210 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -3,86 +3,9 @@
#define __ASM_GENERIC_EXPORT_H
/*
- * This comment block is used by fixdep. Please do not remove.
- *
- * When CONFIG_MODVERSIONS is changed from n to y, all source files having
- * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
- * side effect of the *.o build rule.
+ * <asm/export.h> and <asm-generic/export.h> are deprecated.
+ * Please include <linux/export.h> directly.
*/
-
-#ifndef KSYM_FUNC
-#define KSYM_FUNC(x) x
-#endif
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define KSYM_ALIGN 4
-#elif defined(CONFIG_64BIT)
-#define KSYM_ALIGN 8
-#else
-#define KSYM_ALIGN 4
-#endif
-
-.macro __put, val, name
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
- .long \val - ., \name - ., 0
-#elif defined(CONFIG_64BIT)
- .quad \val, \name, 0
-#else
- .long \val, \name, 0
-#endif
-.endm
-
-/*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
- */
-
-.macro ___EXPORT_SYMBOL name,val,sec
-#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
- .section ___ksymtab\sec+\name,"a"
- .balign KSYM_ALIGN
-__ksymtab_\name:
- __put \val, __kstrtab_\name
- .previous
- .section __ksymtab_strings,"aMS",%progbits,1
-__kstrtab_\name:
- .asciz "\name"
- .previous
-#endif
-.endm
-
-#if defined(CONFIG_TRIM_UNUSED_KSYMS)
-
-#include <linux/kconfig.h>
-#include <generated/autoksyms.h>
-
-.macro __ksym_marker sym
- .section ".discard.ksym","a"
-__ksym_marker_\sym:
- .previous
-.endm
-
-#define __EXPORT_SYMBOL(sym, val, sec) \
- __ksym_marker sym; \
- __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, val, sec, conf) \
- ___cond_export_sym(sym, val, sec, conf)
-#define ___cond_export_sym(sym, val, sec, enabled) \
- __cond_export_sym_##enabled(sym, val, sec)
-#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#define __cond_export_sym_0(sym, val, sec) /* nothing */
-
-#else
-#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#endif
-
-#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
-#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
-#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, name,)
-#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, name,_gpl)
+#include <linux/export.h>
#endif
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index c0be2edeb484..9773582fd96e 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -74,8 +74,16 @@ extern unsigned long memory_end;
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+#define virt_to_pfn virt_to_pfn
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn) << PAGE_SHIFT;
+}
+#define pfn_to_virt pfn_to_virt
#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index da9e5629ea43..0587354ba678 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -688,7 +688,7 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data init.data.*) \
+ *(.init.data .init.data.*) \
MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
@@ -1016,6 +1016,7 @@
PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
+ *(.export_symbol) \
*(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
diff --git a/include/dt-bindings/clock/qcom,sm8350-videocc.h b/include/dt-bindings/clock/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..b6945a448676
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8350-videocc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+
+/* Clocks */
+#define VIDEO_CC_AHB_CLK_SRC 0
+#define VIDEO_CC_MVS0_CLK 1
+#define VIDEO_CC_MVS0_CLK_SRC 2
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 3
+#define VIDEO_CC_MVS0C_CLK 4
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS1_CLK 6
+#define VIDEO_CC_MVS1_CLK_SRC 7
+#define VIDEO_CC_MVS1_DIV2_CLK 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK_SRC 14
+#define VIDEO_PLL0 15
+#define VIDEO_PLL1 16
+
+/* GDSCs */
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8996-cbf.h b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
new file mode 100644
index 000000000000..aac5e69f6bd5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+
+#define MASTER_CBF_M4M 0
+#define SLAVE_CBF_M4M 1
+
+#endif
diff --git a/include/dt-bindings/leds/leds-lp55xx.h b/include/dt-bindings/leds/leds-lp55xx.h
new file mode 100644
index 000000000000..a4fb4567715d
--- /dev/null
+++ b/include/dt-bindings/leds/leds-lp55xx.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_LEDS_LP55XX_H
+#define _DT_BINDINGS_LEDS_LP55XX_H
+
+#define LP55XX_CP_OFF 0
+#define LP55XX_CP_BYPASS 1
+#define LP55XX_CP_BOOST 2
+#define LP55XX_CP_AUTO 3
+
+#endif /* _DT_BINDINGS_LEDS_LP55XX_H */
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index a90f3613c584..8d73a9c51e2b 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -64,6 +64,7 @@
#define STM32F7_RCC_APB1_TIM14 8
#define STM32F7_RCC_APB1_LPTIM1 9
#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_CAN3 13
#define STM32F7_RCC_APB1_SPI2 14
#define STM32F7_RCC_APB1_SPI3 15
#define STM32F7_RCC_APB1_SPDIFRX 16
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
index d3116c52ab72..669ca2d6abce 100644
--- a/include/dt-bindings/mux/ti-serdes.h
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -117,4 +117,66 @@
#define J721S2_SERDES0_LANE3_USB 0x2
#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+/* J784S4 */
+
+#define J784S4_SERDES0_LANE0_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J784S4_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE1_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J784S4_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE2_PCIE3_LANE0 0x0
+#define J784S4_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J784S4_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE3_PCIE3_LANE1 0x0
+#define J784S4_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J784S4_SERDES0_LANE3_USB 0x2
+#define J784S4_SERDES0_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J784S4_SERDES1_LANE0_PCIE0_LANE0 0x1
+#define J784S4_SERDES1_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J784S4_SERDES1_LANE1_PCIE0_LANE1 0x1
+#define J784S4_SERDES1_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE2_QSGMII_LANE1 0x0
+#define J784S4_SERDES1_LANE2_PCIE0_LANE2 0x1
+#define J784S4_SERDES1_LANE2_PCIE2_LANE0 0x2
+#define J784S4_SERDES1_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE3_QSGMII_LANE2 0x0
+#define J784S4_SERDES1_LANE3_PCIE0_LANE3 0x1
+#define J784S4_SERDES1_LANE3_PCIE2_LANE1 0x2
+#define J784S4_SERDES1_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE0_QSGMII_LANE5 0x0
+#define J784S4_SERDES2_LANE0_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE1_QSGMII_LANE6 0x0
+#define J784S4_SERDES2_LANE1_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE2_QSGMII_LANE7 0x0
+#define J784S4_SERDES2_LANE2_QSGMII_LANE1 0x1
+#define J784S4_SERDES2_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE3_QSGMII_LANE8 0x0
+#define J784S4_SERDES2_LANE3_QSGMII_LANE2 0x1
+#define J784S4_SERDES2_LANE3_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE3_IP4_UNUSED 0x3
+
#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/reset/qcom,sm8350-videocc.h b/include/dt-bindings/reset/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..cd356b207a4a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8350-videocc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 1a6a695ca67a..847da6fc2713 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -92,8 +92,12 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
/*
* Evaluates as true when emulating PMUv3p5, and false otherwise.
*/
-#define kvm_pmu_is_3p5(vcpu) \
- (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu) ({ \
+ u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
+ u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
+ \
+ pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
+})
u8 kvm_arm_pmu_get_pmuver_limit(void);
diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h
index d75fc4365746..56619e33251e 100644
--- a/include/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
: -EOPNOTSUPP;
}
-static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
-{
- if (dev->ops->destructor)
- dev->ops->destructor(dev);
-}
-
#endif /* __KVM_IODEV_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d41a05d68166..641dc4843987 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -70,19 +70,6 @@ static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
kfree(fwnode);
}
-/**
- * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
- * the PCI-defined class-code information
- *
- * @_cls : the class, subclass, prog-if triple for this device
- * @_msk : the class mask for this device
- *
- * This macro is used to create a struct acpi_device_id that matches a
- * specific PCI class. The .id and .driver_data fields will be left
- * initialized with the default value.
- */
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
-
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_device_node(dev->fwnode);
@@ -720,6 +707,9 @@ extern int acpi_nvs_register(__u64 start, __u64 size);
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
void *data);
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev);
+
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
@@ -782,7 +772,6 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
#include <acpi/acpi_numa.h>
@@ -936,6 +925,12 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
struct acpi_device_id;
+static inline const struct acpi_device_id *acpi_match_acpi_device(
+ const struct acpi_device_id *ids, const struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const struct acpi_device_id *acpi_match_device(
const struct acpi_device_id *ids, const struct device *dev)
{
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 5001e14c5c06..c60a6a14638c 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -107,7 +107,7 @@ enum amba_vendor {
extern struct bus_type amba_bustype;
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
+#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 583fe3b49a49..cc060da51bec 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -94,6 +94,14 @@
*/
#define FFA_PAGE_SIZE SZ_4K
+/*
+ * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
+ * query for FFA_RXTX_MAP.
+ */
+#define FFA_FEAT_RXTX_MIN_SZ_4K 0
+#define FFA_FEAT_RXTX_MIN_SZ_64K 1
+#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+
/* FFA Bus/Device/Driver related */
struct ffa_device {
u32 id;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 7d6d73b78147..03644237e1ef 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -302,12 +302,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
#endif
/*
- * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
- * machines the order of hi and lo parts of numbers match the bitmap structure.
- * In both cases conversion is not needed when copying data from/to arrays of
- * u64.
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
*/
-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+#if BITS_PER_LONG == 32
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
#else
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index f401067ac03a..2b7fb8e87793 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -715,6 +715,8 @@ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_request(struct request *rq);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -852,7 +854,11 @@ static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{
- if (!iob || (req->rq_flags & RQF_USE_SCHED) || ioerror ||
+ /*
+ * blk_mq_end_request_batch() can't end request allocated from
+ * sched tags
+ */
+ if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
(req->end_io && !blk_rq_is_passthrough(req)))
return false;
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index cc35d010fa94..e1a3c9c9754c 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -3,6 +3,7 @@
#define __LINUX_BOOTMEM_INFO_H
#include <linux/mm.h>
+#include <linux/kmemleak.h>
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
static inline void free_bootmem_page(struct page *page)
{
+ kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
free_reserved_page(page);
}
#endif
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
new file mode 100644
index 000000000000..53f1a7a932b0
--- /dev/null
+++ b/include/linux/cleanup.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GUARDS_H
+#define __LINUX_GUARDS_H
+
+#include <linux/compiler.h>
+
+/*
+ * DEFINE_FREE(name, type, free):
+ * simple helper macro that defines the required wrapper for a __free()
+ * based cleanup function. @free is an expression using '_T' to access
+ * the variable.
+ *
+ * __free(name):
+ * variable attribute to add a scoped based cleanup to the variable.
+ *
+ * no_free_ptr(var):
+ * like a non-atomic xchg(var, NULL), such that the cleanup function will
+ * be inhibited -- provided it sanely deals with a NULL value.
+ *
+ * return_ptr(p):
+ * returns p while inhibiting the __free().
+ *
+ * Ex.
+ *
+ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ *
+ * struct obj *p __free(kfree) = kmalloc(...);
+ * if (!p)
+ * return NULL;
+ *
+ * if (!init_obj(p))
+ * return NULL;
+ *
+ * return_ptr(p);
+ */
+
+#define DEFINE_FREE(_name, _type, _free) \
+ static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+
+#define __free(_name) __cleanup(__free_##_name)
+
+#define no_free_ptr(p) \
+ ({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
+
+#define return_ptr(p) return no_free_ptr(p)
+
+
+/*
+ * DEFINE_CLASS(name, type, exit, init, init_args...):
+ * helper to define the destructor and constructor for a type.
+ * @exit is an expression using '_T' -- similar to FREE above.
+ * @init is an expression in @init_args resulting in @type
+ *
+ * EXTEND_CLASS(name, ext, init, init_args...):
+ * extends class @name to @name@ext with the new constructor
+ *
+ * CLASS(name, var)(args...):
+ * declare the variable @var as an instance of the named class
+ *
+ * Ex.
+ *
+ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
+ *
+ * CLASS(fdget, f)(fd);
+ * if (!f.file)
+ * return -EBADF;
+ *
+ * // use 'f' without concern
+ */
+
+#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
+typedef _type class_##_name##_t; \
+static inline void class_##_name##_destructor(_type *p) \
+{ _type _T = *p; _exit; } \
+static inline _type class_##_name##_constructor(_init_args) \
+{ _type t = _init; return t; }
+
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef class_##_name##_t class_##_name##ext##_t; \
+static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
+{ class_##_name##_destructor(p); } \
+static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+{ class_##_name##_t t = _init; return t; }
+
+#define CLASS(_name, var) \
+ class_##_name##_t var __cleanup(class_##_name##_destructor) = \
+ class_##_name##_constructor
+
+
+/*
+ * DEFINE_GUARD(name, type, lock, unlock):
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
+ * guard(name):
+ * an anonymous instance of the (guard) class
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
+ */
+
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
+
+#define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+#define scoped_guard(_name, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1)
+
+/*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+ * 'fat' pointer (eg. spin_lock_irqsave).
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ *
+ * will result in the following type:
+ *
+ * typedef struct {
+ * type *lock; // 'type := void' for the _0 variant
+ * __VA_ARGS__;
+ * } class_##name##_t;
+ *
+ * As above, both _lock and _unlock are statements, except this time '_T' will
+ * be a pointer to the above struct.
+ */
+
+#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef struct { \
+ _type *lock; \
+ __VA_ARGS__; \
+} class_##_name##_t; \
+ \
+static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+{ \
+ if (_T->lock) { _unlock; } \
+}
+
+
+#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(_type *l) \
+{ \
+ class_##_name##_t _t = { .lock = l }, *_T = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(void) \
+{ \
+ class_##_name##_t _t = { .lock = (void*)1 }, \
+ *_T __maybe_unused = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_0(_name, _lock)
+
+#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 44b1736c95b5..1cfa4f0f490a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -581,11 +581,7 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
-
-/* fs/cookies.c */
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
-
-/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
@@ -597,18 +593,12 @@ asmlinkage long compat_sys_epoll_pwait2(int epfd,
const struct __kernel_timespec __user *timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/fcntl.c */
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/ioctl.c */
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
@@ -623,13 +613,9 @@ asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
-
-/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
/* No generic prototype for pread64 and pwrite64 */
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
@@ -649,14 +635,10 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
-
-/* fs/sendfile.c */
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset, compat_size_t count);
-
-/* fs/select.c */
asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
@@ -677,68 +659,45 @@ asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-
-/* fs/stat.c */
asmlinkage long compat_sys_newfstatat(unsigned int dfd,
const char __user *filename,
struct compat_stat __user *statbuf,
int flag);
asmlinkage long compat_sys_newfstat(unsigned int fd,
struct compat_stat __user *statbuf);
-
-/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
-
-/* kernel/exit.c */
+/* No generic prototype for sync_file_range and sync_file_range2 */
asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
struct compat_rusage __user *);
-
-
-
-/* kernel/futex.c */
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
-
-/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
struct old_itimerval32 __user *in,
struct old_itimerval32 __user *out);
-
-/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
compat_ulong_t nr_segments,
struct compat_kexec_segment __user *,
compat_ulong_t flags);
-
-/* kernel/posix-timers.c */
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
-
-/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
-
-/* kernel/sched/core.c */
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-
-/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
@@ -763,25 +722,17 @@ asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
-
-/* kernel/sys.c */
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
asmlinkage long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
-
-/* kernel/time.c */
asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-
-/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
@@ -790,22 +741,14 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr __user *u_mqstat,
struct compat_mq_attr __user *u_omqstat);
-
-/* ipc/msg.c */
asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-
-/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-
-/* net/socket.c */
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
@@ -813,20 +756,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
-
-/* mm/filemap.c: No generic prototype for readahead */
-
-/* security/keys/keyctl.c */
+/* No generic prototype for readahead */
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-
-/* arch/example/kernel/sys_example.c */
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
-
-/* mm/fadvise.c: No generic prototype for fadvise64_64 */
-
-/* mm/, CONFIG_MMU only */
+/* No generic prototype for fadvise64_64 */
+/* CONFIG_MMU only */
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -896,18 +832,18 @@ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent __user *,
unsigned int count);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-/* obsolete: kernel/signal.c */
+/* obsolete */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -922,7 +858,7 @@ asmlinkage long compat_sys_sigaction(int sig,
struct compat_old_sigaction __user *oact);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 6cfd6902bd5b..9b673fefcef8 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -5,6 +5,15 @@
/* Compiler specific definitions for Clang compiler */
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://reviews.llvm.org/D152180
+ */
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
+
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
*/
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 571fa7924f74..00efa35c350f 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -70,6 +70,12 @@
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
+ */
+#define __cleanup(func) __attribute__((__cleanup__(func)))
+
+/*
* Note the long name.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f19a47b9bb5a..bf70987240e4 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -41,10 +41,11 @@ enum coresight_dev_type {
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
- CORESIGHT_DEV_TYPE_ECT,
+ CORESIGHT_DEV_TYPE_MAX
};
enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_DUMMY,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
@@ -66,12 +67,7 @@ enum coresight_dev_subtype_source {
enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
-};
-
-/* Embedded Cross Trigger (ECT) sub-types */
-enum coresight_dev_subtype_ect {
- CORESIGHT_DEV_SUBTYPE_ECT_NONE,
- CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
};
/**
@@ -84,8 +80,6 @@ enum coresight_dev_subtype_ect {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
- * @ect_subtype: type of cross trigger this component is, as
- * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -95,21 +89,25 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
- enum coresight_dev_subtype_ect ect_subtype;
};
/**
* struct coresight_platform_data - data harvested from the firmware
* specification.
*
- * @nr_inport: Number of elements for the input connections.
- * @nr_outport: Number of elements for the output connections.
- * @conns: Sparse array of nr_outport connections from this component.
+ * @nr_inconns: Number of elements for the input connections.
+ * @nr_outconns: Number of elements for the output connections.
+ * @out_conns: Array of nr_outconns pointers to connections from this
+ * component.
+ * @in_conns: Sparse array of pointers to input connections. Sparse
+ * because the source device owns the connection so when it's
+ * unloaded the connection leaves an empty slot.
*/
struct coresight_platform_data {
- int nr_inport;
- int nr_outport;
- struct coresight_connection *conns;
+ int nr_inconns;
+ int nr_outconns;
+ struct coresight_connection **out_conns;
+ struct coresight_connection **in_conns;
};
/**
@@ -164,19 +162,42 @@ struct coresight_desc {
/**
* struct coresight_connection - representation of a single connection
- * @outport: a connection's output port number.
- * @child_port: remote component's port number @output is connected to.
- * @chid_fwnode: remote component's fwnode handle.
- * @child_dev: a @coresight_device representation of the component
- connected to @outport.
+ * @src_port: a connection's output port number.
+ * @dest_port: destination's input port number @src_port is connected to.
+ * @dest_fwnode: destination component's fwnode handle.
+ * @dest_dev: a @coresight_device representation of the component
+ connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ *
+ * The full connection structure looks like this, where in_conns store
+ * references to same connection as the source device's out_conns.
+ *
+ * +-----------------------------+ +-----------------------------+
+ * |coresight_device | |coresight_connection |
+ * |-----------------------------| |-----------------------------|
+ * | | | |
+ * | | | dest_dev*|<--
+ * |pdata->out_conns[nr_outconns]|<->|src_dev* | |
+ * | | | | |
+ * +-----------------------------+ +-----------------------------+ |
+ * |
+ * +-----------------------------+ |
+ * |coresight_device | |
+ * |------------------------------ |
+ * | | |
+ * | pdata->in_conns[nr_inconns]|<--
+ * | |
+ * +-----------------------------+
*/
struct coresight_connection {
- int outport;
- int child_port;
- struct fwnode_handle *child_fwnode;
- struct coresight_device *child_dev;
+ int src_port;
+ int dest_port;
+ struct fwnode_handle *dest_fwnode;
+ struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
+ struct coresight_device *src_dev;
+ atomic_t src_refcnt;
+ atomic_t dest_refcnt;
};
/**
@@ -211,8 +232,6 @@ struct coresight_sysfs_link {
* from source to that sink.
* @ea: Device attribute for sink representation under PMU directory.
* @def_sink: cached reference to default sink found for this device.
- * @ect_dev: Associated cross trigger device. Not part of the trace data
- * path or connections.
* @nr_links: number of sysfs links created to other components from this
* device. These will appear in the "connections" group.
* @has_conns_grp: Have added a "connections" group for sysfs links.
@@ -228,19 +247,16 @@ struct coresight_device {
const struct coresight_ops *ops;
struct csdev_access access;
struct device dev;
- atomic_t *refcnt;
+ atomic_t refcnt;
bool orphan;
bool enable; /* true only if configured as part of a path */
/* sink specific fields */
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
- /* cross trigger handling */
- struct coresight_device *ect_dev;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
- bool ect_enabled; /* true only if associated ect device is enabled */
/* system configuration and feature lists */
struct list_head feature_csdev_list;
struct list_head config_csdev_list;
@@ -272,6 +288,12 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
@@ -288,7 +310,8 @@ static struct coresight_dev_list (var) = { \
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev,
struct perf_event *event, void **pages,
@@ -306,8 +329,12 @@ struct coresight_ops_sink {
* @disable: disables flow between iport and oport.
*/
struct coresight_ops_link {
- int (*enable)(struct coresight_device *csdev, int iport, int oport);
- void (*disable)(struct coresight_device *csdev, int iport, int oport);
+ int (*enable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
+ void (*disable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
};
/**
@@ -320,8 +347,8 @@ struct coresight_ops_link {
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev,
- struct perf_event *event, u32 mode);
+ int (*enable)(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
@@ -336,27 +363,16 @@ struct coresight_ops_source {
* @disable : Disable the device
*/
struct coresight_ops_helper {
- int (*enable)(struct coresight_device *csdev, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev, void *data);
};
-/**
- * struct coresight_ops_ect - Ops for an embedded cross trigger device
- *
- * @enable : Enable the device
- * @disable : Disable the device
- */
-struct coresight_ops_ect {
- int (*enable)(struct coresight_device *csdev);
- int (*disable)(struct coresight_device *csdev);
-};
-
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
- const struct coresight_ops_ect *ect_ops;
};
#if IS_ENABLED(CONFIG_CORESIGHT)
@@ -602,5 +618,18 @@ static inline void coresight_write64(struct coresight_device *csdev, u64 val, u3
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn);
+int coresight_add_in_conn(struct coresight_connection *conn);
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index bf6258472e49..261944ec0887 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -261,6 +261,19 @@ static inline bool dax_mapping(struct address_space *mapping)
return mapping->host && IS_DAX(mapping->host);
}
+/*
+ * Due to dax's memory and block duo personalities, hwpoison reporting
+ * takes into consideration which personality is presently visible.
+ * When dax acts like a block device, such as in block IO, an encounter of
+ * dax hwpoison is reported as -EIO.
+ * When dax acts like memory, such as in page fault, a detection of hwpoison
+ * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON.
+ */
+static inline int dax_mem2blk_err(int err)
+{
+ return (err == -EHWPOISON) ? -EIO : err;
+}
+
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
void hmem_register_resource(int target_nid, struct resource *r);
#else
diff --git a/include/linux/device.h b/include/linux/device.h
index 472dd24d4823..bbaeabd04b0d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -30,6 +30,7 @@
#include <linux/device/bus.h>
#include <linux/device/class.h>
#include <linux/device/driver.h>
+#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
@@ -96,7 +97,12 @@ struct device_type {
const struct dev_pm_ops *pm;
};
-/* interface for exporting device attributes */
+/**
+ * struct device_attribute - Interface for exporting device attributes.
+ * @attr: sysfs attribute definition.
+ * @show: Show handler.
+ * @store: Store handler.
+ */
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
@@ -105,6 +111,11 @@ struct device_attribute {
const char *buf, size_t count);
};
+/**
+ * struct dev_ext_attribute - Exported device attribute with extra context.
+ * @attr: Exported device attribute.
+ * @var: Pointer to context.
+ */
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
@@ -123,30 +134,124 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+/**
+ * DEVICE_ATTR - Define a device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Convenience macro for defining a struct device_attribute.
+ *
+ * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
+ *
+ * .. code-block:: c
+ *
+ * struct device_attribute dev_attr_foo = {
+ * .attr = { .name = "foo", .mode = 0644 },
+ * .show = foo_show,
+ * .store = foo_store,
+ * };
+ */
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
+ */
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_PREALLOC(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_RW - Define a read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
+ * and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RW(), but @_mode is 0600.
+ */
#define DEVICE_ATTR_ADMIN_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
+
+/**
+ * DEVICE_ATTR_RO - Define a readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
+ */
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RO(), but @_mode is 0400.
+ */
#define DEVICE_ATTR_ADMIN_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
+
+/**
+ * DEVICE_ATTR_WO - Define an admin-only writable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of unsigned long.
+ *
+ * Like DEVICE_ATTR(), but @_show and @_store are automatically provided
+ * such that reads and writes to the attribute from userspace affect @_var.
+ */
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+/**
+ * DEVICE_INT_ATTR - Define a device attribute backed by an int.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of int.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is an int.
+ */
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+
+/**
+ * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of bool.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a bool.
+ */
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
+
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
@@ -223,6 +328,17 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
void devm_kfree(struct device *dev, const void *p);
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
@@ -700,6 +816,11 @@ static inline bool device_iommu_mapped(struct device *dev)
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
+/**
+ * dev_name - Return a device's name.
+ * @dev: Device with name to get.
+ * Return: The kobject name of the device, or its initial name if unavailable.
+ */
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
@@ -899,6 +1020,9 @@ void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __must_check device_add(struct device *dev);
void device_del(struct device *dev);
+
+DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
+
int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
int device_for_each_child_reverse(struct device *dev, void *data,
@@ -1066,6 +1190,9 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
struct device *get_device(struct device *dev);
void put_device(struct device *dev);
+
+DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
+
bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index d2638d9259dc..3080747689f6 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -40,7 +40,7 @@ struct dw_edma_region {
* iATU windows. That will be done by the controller
* automatically.
*/
-struct dw_edma_core_ops {
+struct dw_edma_plat_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
@@ -48,7 +48,8 @@ struct dw_edma_core_ops {
enum dw_edma_map_format {
EDMA_MF_EDMA_LEGACY = 0x0,
EDMA_MF_EDMA_UNROLL = 0x1,
- EDMA_MF_HDMA_COMPAT = 0x5
+ EDMA_MF_HDMA_COMPAT = 0x5,
+ EDMA_MF_HDMA_NATIVE = 0x7,
};
/**
@@ -80,7 +81,7 @@ enum dw_edma_chip_flags {
struct dw_edma_chip {
struct device *dev;
int nr_irqs;
- const struct dw_edma_core_ops *ops;
+ const struct dw_edma_plat_ops *ops;
u32 flags;
void __iomem *reg_base;
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 159e43171ccc..c177322f793d 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -48,13 +48,9 @@ struct sja1105_deferred_xmit_work {
/* Global tagger data */
struct sja1105_tagger_data {
- /* Tagger to switch */
void (*xmit_work_fn)(struct kthread_work *work);
void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
enum sja1110_meta_tstamp dir, u64 tstamp);
- /* Switch to tagger */
- bool (*rxtstamp_get_state)(struct dsa_switch *ds);
- void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on);
};
struct sja1105_skb_cb {
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index fe7e6ba918f1..1c849db953a5 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -10,6 +10,55 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#if defined(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)
+/*
+ * relative reference: this reduces the size by half on 64-bit architectures,
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
+#define __KSYM_REF(sym) ".long " #sym "- ."
+#elif defined(CONFIG_64BIT)
+#define __KSYM_REF(sym) ".quad " #sym
+#else
+#define __KSYM_REF(sym) ".long " #sym
+#endif
+
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * Note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define __KSYMTAB(name, sym, sec, ns) \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
+ "__kstrtab_" #name ":" "\n" \
+ " .asciz \"" #name "\"" "\n" \
+ "__kstrtabns_" #name ":" "\n" \
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ " .balign 4" "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+ __KSYM_REF(__kstrtabns_ ##name) "\n" \
+ " .previous" "\n" \
+ )
+
+#ifdef CONFIG_IA64
+#define KSYM_FUNC(name) @fptr(name)
+#else
+#define KSYM_FUNC(name) name
+#endif
+
+#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
+#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+
#define SYMBOL_CRC(sym, crc, sec) \
asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
"__crc_" #sym ":" "\n" \
diff --git a/include/linux/export.h b/include/linux/export.h
index 3f31ced0d977..beed8387e0a4 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/compiler.h>
+#include <linux/linkage.h>
#include <linux/stringify.h>
/*
@@ -28,74 +30,25 @@ extern struct module __this_module;
#else
#define THIS_MODULE ((struct module *)0)
#endif
+#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#include <linux/compiler.h>
-/*
- * Emit the ksymtab entry as a pair of relative references: this reduces
- * the size by half on 64-bit architectures, and eliminates the need for
- * absolute relocations that require runtime processing on relocatable
- * kernels.
- */
-#define __KSYMTAB_ENTRY(sym, sec) \
- __ADDRESSABLE(sym) \
- asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 4 \n" \
- "__ksymtab_" #sym ": \n" \
- " .long " #sym "- . \n" \
- " .long __kstrtab_" #sym "- . \n" \
- " .long __kstrtabns_" #sym "- . \n" \
- " .previous \n")
-
-struct kernel_symbol {
- int value_offset;
- int name_offset;
- int namespace_offset;
-};
+#ifdef CONFIG_64BIT
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 8 ASM_NL \
+ .quad sym
#else
-#define __KSYMTAB_ENTRY(sym, sec) \
- static const struct kernel_symbol __ksymtab_##sym \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- __aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
-
-struct kernel_symbol {
- unsigned long value;
- const char *name;
- const char *namespace;
-};
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 4 ASM_NL \
+ .long sym
#endif
-#ifdef __GENKSYMS__
-
-#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-
-#else
-
-/*
- * For every exported symbol, do the following:
- *
- * - Put the name of the symbol and namespace (empty string "" for none) in
- * __ksymtab_strings.
- * - Place a struct kernel_symbol entry in the __ksymtab section.
- *
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
- */
-#define ___EXPORT_SYMBOL(sym, sec, ns) \
- extern typeof(sym) sym; \
- extern const char __kstrtab_##sym[]; \
- extern const char __kstrtabns_##sym[]; \
- asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
- "__kstrtab_" #sym ": \n" \
- " .asciz \"" #sym "\" \n" \
- "__kstrtabns_" #sym ": \n" \
- " .asciz \"" ns "\" \n" \
- " .previous \n"); \
- __KSYMTAB_ENTRY(sym, sec)
-
-#endif
+#define ___EXPORT_SYMBOL(sym, license, ns) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+ .asciz ns ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
@@ -104,54 +57,35 @@ struct kernel_symbol {
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns)
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#elif defined(__GENKSYMS__)
-#include <generated/autoksyms.h>
+#define __EXPORT_SYMBOL(sym, license, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
- * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
- * discarded in the final link stage.
- */
-#define __ksym_marker(sym) \
- static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
-
-#define __EXPORT_SYMBOL(sym, sec, ns) \
- __ksym_marker(sym); \
- __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, ns, conf) \
- ___cond_export_sym(sym, sec, ns, conf)
-#define ___cond_export_sym(sym, sec, ns, enabled) \
- __cond_export_sym_##enabled(sym, sec, ns)
-#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
-
-#ifdef __GENKSYMS__
-#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-#else
-#define __cond_export_sym_0(sym, sec, ns) /* nothing */
-#endif
+#elif defined(__ASSEMBLY__)
+
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ ___EXPORT_SYMBOL(sym, license, ns)
#else
-#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ extern typeof(sym) sym; \
+ __ADDRESSABLE(sym) \
+ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
#endif /* CONFIG_MODULES */
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
-#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
-
-#endif /* !__ASSEMBLY__ */
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 1d6402529d10..a82a4bb6ce68 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -103,6 +103,7 @@ enum f2fs_error {
ERROR_INCONSISTENT_SIT,
ERROR_CORRUPTED_VERITY_XATTR,
ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
ERROR_MAX,
};
diff --git a/include/linux/file.h b/include/linux/file.h
index 39704eae83e2..6e9099d29343 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/posix_types.h>
#include <linux/errno.h>
+#include <linux/cleanup.h>
struct file;
@@ -80,6 +81,8 @@ static inline void fdput_pos(struct fd f)
fdput(f);
}
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
@@ -88,6 +91,9 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
+ get_unused_fd_flags(flags), unsigned flags)
+
extern void fd_install(unsigned int fd, struct file *file);
extern int __receive_fd(struct file *file, int __user *ufd,
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index efb6e2cf2034..bd3fc75d4f14 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -261,6 +261,15 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
+typedef void (*fw_transaction_callback_with_tstamp_t)(struct fw_card *card, int rcode,
+ u32 request_tstamp, u32 response_tstamp, void *data,
+ size_t length, void *callback_data);
+
+union fw_transaction_callback {
+ fw_transaction_callback_t without_tstamp;
+ fw_transaction_callback_with_tstamp_t with_tstamp;
+};
+
/*
* This callback handles an inbound request subaction. It is called in
* RCU read-side context, therefore must not sleep.
@@ -312,6 +321,7 @@ struct fw_transaction {
struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer;
+ u32 split_timeout_cycle;
struct fw_packet packet;
@@ -319,7 +329,8 @@ struct fw_transaction {
* The data passed to the callback is valid only during the
* callback.
*/
- fw_transaction_callback_t callback;
+ union fw_transaction_callback callback;
+ bool with_tstamp;
void *callback_data;
};
@@ -345,10 +356,71 @@ void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
u32 fw_request_get_timestamp(const struct fw_request *request);
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
+
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data);
+
+/**
+ * fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction without time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction without time
+ * stamp.
+ */
+static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .without_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, false, callback_data);
+}
+
+/**
+ * fw_send_request_with_tstamp() - submit a request packet for transmission to generate callback for
+ * response with time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ */
+static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, fw_transaction_callback_with_tstamp_t callback,
+ void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .with_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, true, callback_data);
+}
+
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index f5da51677069..9dda7d9898ff 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2014-2021 Xilinx
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 2d6f3cfa7dea..972434daa000 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -24,6 +24,7 @@ int ssip_slave_stop_tx(struct hsi_client *master);
void ssip_reset_event(struct hsi_client *master);
int ssip_slave_running(struct hsi_client *master);
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 13a1ce38cb0c..3430cc2b05a6 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -367,6 +367,8 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
+const void *i2c_get_match_data(const struct i2c_client *client);
+
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
struct device * const dev = kobj_to_dev(kobj);
diff --git a/include/linux/i8254.h b/include/linux/i8254.h
new file mode 100644
index 000000000000..a675c309232b
--- /dev/null
+++ b/include/linux/i8254.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) William Breathitt Gray */
+#ifndef _I8254_H_
+#define _I8254_H_
+
+struct device;
+struct regmap;
+
+/**
+ * struct i8254_regmap_config - Configuration for the register map of an i8254
+ * @parent: parent device
+ * @map: regmap for the i8254
+ */
+struct i8254_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+};
+
+int devm_i8254_regmap_register(struct device *dev, const struct i8254_regmap_config *config);
+
+#endif /* _I8254_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index f5f3ee57bc70..607c3a89a647 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -22,6 +22,7 @@
#include <linux/platform_data/st_sensors_pdata.h>
#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+#define LSM303D_IMU_DEV_NAME "lsm303d"
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d28a5e8097e4..202e55b0a28b 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -221,6 +221,9 @@ struct iio_event_spec {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * This field is deprecated in favour of providing
+ * iio_info->read_label() to override the label, which
+ * unlike @extend_name does not affect sysfs filenames.
* @datasheet_name: A name used in in-kernel mapping of channels. It should
* correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 51f52c5c6092..bce3b1788199 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -171,6 +171,7 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig);
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
new file mode 100644
index 000000000000..0cd80112bea5
--- /dev/null
+++ b/include/linux/interconnect-clk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __LINUX_INTERCONNECT_CLK_H
+#define __LINUX_INTERCONNECT_CLK_H
+
+struct device;
+
+struct icc_clk_data {
+ struct clk *clk;
+ const char *name;
+};
+
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data);
+void icc_clk_unregister(struct icc_provider *provider);
+
+#endif
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index 2b0e784ba771..97ac253df62c 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -40,8 +40,6 @@ struct icc_bulk_data {
#if IS_ENABLED(CONFIG_INTERCONNECT)
-struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
@@ -61,12 +59,6 @@ void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
#else
-static inline struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id)
-{
- return NULL;
-}
-
static inline struct icc_path *of_icc_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5ec0fa71399e..2b665c32f5fe 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -13,6 +13,7 @@
#define _LINUX_TRACE_IRQFLAGS_H
#include <linux/typecheck.h>
+#include <linux/cleanup.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>
@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void);
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+ local_irq_save(_T->flags),
+ local_irq_restore(_T->flags),
+ unsigned long flags)
+
#endif
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 07dfb6a20a1c..f6c2ddb16b95 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -196,6 +196,8 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
+extern void kdb_send_sig(struct task_struct *p, int sig);
+
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 258cdde8d356..76e891ee9e37 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -365,5 +365,6 @@ extern void kgdb_free_init_mem(void);
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
static inline void kgdb_free_init_mem(void) { }
+static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0e571e973bc2..9d3ac7720da9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -849,7 +849,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG(cond, kvm, fmt...) \
({ \
- int __ret = (cond); \
+ bool __ret = !!(cond); \
\
if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
kvm_vm_bugged(kvm); \
@@ -858,7 +858,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG_ON(cond, kvm) \
({ \
- int __ret = (cond); \
+ bool __ret = !!(cond); \
\
if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
kvm_vm_bugged(kvm); \
@@ -991,6 +991,8 @@ static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
return RB_EMPTY_ROOT(&slots->gfn_tree);
}
+bool kvm_are_all_memslots_empty(struct kvm *kvm);
+
#define kvm_for_each_memslot(memslot, bkt, slots) \
hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
if (WARN_ON_ONCE(!memslot->npages)) { \
@@ -2237,9 +2239,6 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 3a65ff72bb04..7d428100b42b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -124,6 +124,10 @@ struct led_classdev {
#define LED_BLINK_INVERT 3
#define LED_BLINK_BRIGHTNESS_CHANGE 4
#define LED_BLINK_DISABLE 5
+ /* Brightness off also disables hw-blinking so it is a separate action */
+#define LED_SET_BRIGHTNESS_OFF 6
+#define LED_SET_BRIGHTNESS 7
+#define LED_SET_BLINK 8
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -147,6 +151,10 @@ struct led_classdev {
* match the values specified exactly.
* Deactivate blinking again when the brightness is set to LED_OFF
* via the brightness_set() callback.
+ * For led_blink_set_nosleep() the LED core assumes that blink_set
+ * implementations, of drivers which do not use brightness_set_blocking,
+ * will not sleep. Therefor if brightness_set_blocking is not set
+ * this function must not sleep!
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
@@ -170,6 +178,8 @@ struct led_classdev {
struct work_struct set_brightness_work;
int delayed_set_value;
+ unsigned long delayed_delay_on;
+ unsigned long delayed_delay_off;
#ifdef CONFIG_LEDS_TRIGGERS
/* Protects the trigger data below */
@@ -315,12 +325,27 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
* software blinking if there is no hardware blinking or if
* the LED refuses the passed values.
*
+ * This function may sleep!
+ *
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
* use led_set_brightness() instead.
*/
void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
unsigned long *delay_off);
+
+/**
+ * led_blink_set_nosleep - set blinking, guaranteed to not sleep
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink and is guaranteed to not sleep. Otherwise
+ * this is the same as led_blink_set(), see led_blink_set() for details.
+ */
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off);
+
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -334,6 +359,8 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
*
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
+ *
+ * This function is guaranteed not to sleep.
*/
void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off,
@@ -476,11 +503,11 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
-void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
- unsigned long *delay_off);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
+ unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert);
void led_trigger_set_default(struct led_classdev *led_cdev);
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
@@ -530,11 +557,11 @@ static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off) {}
+ unsigned long delay_on,
+ unsigned long delay_off) {}
static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
static inline int led_trigger_set(struct led_classdev *led_cdev,
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 3bc9f7410e21..c53c81242e72 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -20,6 +20,7 @@
/* Dummy declarations */
struct svc_rqst;
struct rpc_task;
+struct rpc_clnt;
/*
* This is the set of functions for lockd->nfsd communication
@@ -56,6 +57,7 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
+extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host);
/*
* NLM client operations provide a means to modify RPC processing of NLM
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index fff7fa6b7c5d..f1755163dd9f 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,6 +12,7 @@
enum axp20x_variants {
AXP152_ID = 0,
+ AXP192_ID,
AXP202_ID,
AXP209_ID,
AXP221_ID,
@@ -26,6 +27,7 @@ enum axp20x_variants {
NR_AXP20X_VARIANTS,
};
+#define AXP192_DATACACHE(m) (0x06 + (m))
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
@@ -47,6 +49,13 @@ enum axp20x_variants {
#define AXP152_DCDC_FREQ 0x37
#define AXP152_DCDC_MODE 0x80
+#define AXP192_USB_OTG_STATUS 0x04
+#define AXP192_PWR_OUT_CTRL 0x12
+#define AXP192_DCDC2_V_OUT 0x23
+#define AXP192_DCDC1_V_OUT 0x26
+#define AXP192_DCDC3_V_OUT 0x27
+#define AXP192_LDO2_3_V_OUT 0x28
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -185,6 +194,17 @@ enum axp20x_variants {
#define AXP152_IRQ2_STATE 0x49
#define AXP152_IRQ3_STATE 0x4a
+#define AXP192_IRQ1_EN 0x40
+#define AXP192_IRQ2_EN 0x41
+#define AXP192_IRQ3_EN 0x42
+#define AXP192_IRQ4_EN 0x43
+#define AXP192_IRQ1_STATE 0x44
+#define AXP192_IRQ2_STATE 0x45
+#define AXP192_IRQ3_STATE 0x46
+#define AXP192_IRQ4_STATE 0x47
+#define AXP192_IRQ5_EN 0x4a
+#define AXP192_IRQ5_STATE 0x4d
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -204,6 +224,11 @@ enum axp20x_variants {
#define AXP15060_IRQ2_STATE 0x49
/* ADC */
+#define AXP192_GPIO2_V_ADC_H 0x68
+#define AXP192_GPIO2_V_ADC_L 0x69
+#define AXP192_GPIO3_V_ADC_H 0x6a
+#define AXP192_GPIO3_V_ADC_L 0x6b
+
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
#define AXP20X_ACIN_I_ADC_H 0x58
@@ -233,6 +258,8 @@ enum axp20x_variants {
#define AXP20X_IPSOUT_V_HIGH_L 0x7f
/* Power supply */
+#define AXP192_GPIO30_IN_RANGE 0x85
+
#define AXP20X_DCDC_MODE 0x80
#define AXP20X_ADC_EN1 0x82
#define AXP20X_ADC_EN2 0x83
@@ -261,6 +288,16 @@ enum axp20x_variants {
#define AXP152_PWM1_FREQ_Y 0x9c
#define AXP152_PWM1_DUTY_CYCLE 0x9d
+#define AXP192_GPIO0_CTRL 0x90
+#define AXP192_LDO_IO0_V_OUT 0x91
+#define AXP192_GPIO1_CTRL 0x92
+#define AXP192_GPIO2_CTRL 0x93
+#define AXP192_GPIO2_0_STATE 0x94
+#define AXP192_GPIO4_3_CTRL 0x95
+#define AXP192_GPIO4_3_STATE 0x96
+#define AXP192_GPIO2_0_PULL 0x97
+#define AXP192_N_RSTO_CTRL 0x9e
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -341,6 +378,17 @@ enum axp20x_variants {
/* Regulators IDs */
enum {
+ AXP192_DCDC1 = 0,
+ AXP192_DCDC2,
+ AXP192_DCDC3,
+ AXP192_LDO1,
+ AXP192_LDO2,
+ AXP192_LDO3,
+ AXP192_LDO_IO0,
+ AXP192_REG_ID_MAX
+};
+
+enum {
AXP20X_LDO1 = 0,
AXP20X_LDO2,
AXP20X_LDO3,
@@ -531,6 +579,42 @@ enum {
AXP152_IRQ_GPIO0_INPUT,
};
+enum axp192_irqs {
+ AXP192_IRQ_ACIN_OVER_V = 1,
+ AXP192_IRQ_ACIN_PLUGIN,
+ AXP192_IRQ_ACIN_REMOVAL,
+ AXP192_IRQ_VBUS_OVER_V,
+ AXP192_IRQ_VBUS_PLUGIN,
+ AXP192_IRQ_VBUS_REMOVAL,
+ AXP192_IRQ_VBUS_V_LOW,
+ AXP192_IRQ_BATT_PLUGIN,
+ AXP192_IRQ_BATT_REMOVAL,
+ AXP192_IRQ_BATT_ENT_ACT_MODE,
+ AXP192_IRQ_BATT_EXIT_ACT_MODE,
+ AXP192_IRQ_CHARG,
+ AXP192_IRQ_CHARG_DONE,
+ AXP192_IRQ_BATT_TEMP_HIGH,
+ AXP192_IRQ_BATT_TEMP_LOW,
+ AXP192_IRQ_DIE_TEMP_HIGH,
+ AXP192_IRQ_CHARG_I_LOW,
+ AXP192_IRQ_DCDC1_V_LONG,
+ AXP192_IRQ_DCDC2_V_LONG,
+ AXP192_IRQ_DCDC3_V_LONG,
+ AXP192_IRQ_PEK_SHORT = 22,
+ AXP192_IRQ_PEK_LONG,
+ AXP192_IRQ_N_OE_PWR_ON,
+ AXP192_IRQ_N_OE_PWR_OFF,
+ AXP192_IRQ_VBUS_VALID,
+ AXP192_IRQ_VBUS_NOT_VALID,
+ AXP192_IRQ_VBUS_SESS_VALID,
+ AXP192_IRQ_VBUS_SESS_END,
+ AXP192_IRQ_LOW_PWR_LVL = 31,
+ AXP192_IRQ_TIMER,
+ AXP192_IRQ_GPIO2_INPUT = 37,
+ AXP192_IRQ_GPIO1_INPUT,
+ AXP192_IRQ_GPIO0_INPUT,
+};
+
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
index 1812ebfa11a8..ee66c9751003 100644
--- a/include/linux/mfd/intel-m10-bmc.h
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/regmap.h>
+#include <linux/rwsem.h>
#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
#define M10BMC_N3000_SYS_BASE 0x300800
@@ -39,6 +40,11 @@
#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+/* Telemetry registers */
+#define M10BMC_N3000_TELEM_START 0x100
+#define M10BMC_N3000_TELEM_END 0x250
+#define M10BMC_D5005_TELEM_END 0x300
+
/* Secure update doorbell register, in system register region */
#define M10BMC_N3000_DOORBELL 0x400
@@ -205,11 +211,15 @@ struct m10bmc_csr_map {
* struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
* @cells: MFD cells
* @n_cells: MFD cells ARRAY_SIZE()
+ * @handshake_sys_reg_ranges: array of register ranges for fw handshake regs
+ * @handshake_sys_reg_nranges: number of register ranges for fw handshake regs
* @csr_map: the mappings for register definition of MAX10 BMC
*/
struct intel_m10bmc_platform_info {
struct mfd_cell *cells;
int n_cells;
+ const struct regmap_range *handshake_sys_reg_ranges;
+ unsigned int handshake_sys_reg_nranges;
const struct m10bmc_csr_map *csr_map;
};
@@ -232,18 +242,30 @@ struct intel_m10bmc_flash_bulk_ops {
void (*unlock_write)(struct intel_m10bmc *m10bmc);
};
+enum m10bmc_fw_state {
+ M10BMC_FW_STATE_NORMAL,
+ M10BMC_FW_STATE_SEC_UPDATE_PREPARE,
+ M10BMC_FW_STATE_SEC_UPDATE_WRITE,
+ M10BMC_FW_STATE_SEC_UPDATE_PROGRAM,
+};
+
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
* @info: the platform information for MAX10 BMC
* @flash_bulk_ops: optional device specific operations for flash R/W
+ * @bmcfw_lock: read/write semaphore to BMC firmware running state
+ * @bmcfw_state: BMC firmware running state. Available only when
+ * handshake_sys_reg_nranges > 0.
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
const struct intel_m10bmc_platform_info *info;
const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+ struct rw_semaphore bmcfw_lock; /* Protects bmcfw_state */
+ enum m10bmc_fw_state bmcfw_state;
};
/*
@@ -251,6 +273,7 @@ struct intel_m10bmc {
*
* m10bmc_raw_read - read m10bmc register per addr
* m10bmc_sys_read - read m10bmc system register per offset
+ * m10bmc_sys_update_bits - update m10bmc system register per offset
*/
static inline int
m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
@@ -266,21 +289,15 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
return ret;
}
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val);
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val);
+
/*
- * The base of the system registers could be configured by HW developers, and
- * in HW SPEC, the base is not added to the addresses of the system registers.
- *
- * This function helps to simplify the accessing of the system registers. And if
- * the base is reconfigured in HW, SW developers could simply change the
- * csr_map's base accordingly.
+ * Track the state of the firmware, as it is not available for register
+ * handshakes during secure updates on some MAX 10 cards.
*/
-static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
- unsigned int *val)
-{
- const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
-
- return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
-}
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state);
/*
* MAX10 BMC Core support
diff --git a/include/linux/mfd/max597x.h b/include/linux/mfd/max5970.h
index a850b2e02e6a..762a7d40c843 100644
--- a/include/linux/mfd/max597x.h
+++ b/include/linux/mfd/max5970.h
@@ -7,25 +7,25 @@
* Author: Patrick Rudolph <patrick.rudolph@9elements.com>
*/
-#ifndef _MFD_MAX597X_H
-#define _MFD_MAX597X_H
+#ifndef _MFD_MAX5970_H
+#define _MFD_MAX5970_H
#include <linux/regmap.h>
#define MAX5970_NUM_SWITCHES 2
#define MAX5978_NUM_SWITCHES 1
-#define MAX597X_NUM_LEDS 4
+#define MAX5970_NUM_LEDS 4
-struct max597x_data {
+struct max5970_data {
int num_switches;
u32 irng[MAX5970_NUM_SWITCHES];
u32 mon_rng[MAX5970_NUM_SWITCHES];
u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
};
-enum max597x_chip_type {
- MAX597x_TYPE_MAX5978 = 1,
- MAX597x_TYPE_MAX5970,
+enum max5970_chip_type {
+ TYPE_MAX5978 = 1,
+ TYPE_MAX5970,
};
#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
@@ -93,4 +93,4 @@ enum max597x_chip_type {
#define MAX_REGISTERS 0x49
#define ADC_MASK 0x3FF
-#endif /* _MFD_MAX597X_H */
+#endif /* _MFD_MAX5970_H */
diff --git a/include/linux/mfd/max77541.h b/include/linux/mfd/max77541.h
new file mode 100644
index 000000000000..fe5c0a3dc637
--- /dev/null
+++ b/include/linux/mfd/max77541.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MFD_MAX77541_H
+#define __MFD_MAX77541_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* REGISTERS */
+#define MAX77541_REG_INT_SRC 0x00
+#define MAX77541_REG_INT_SRC_M 0x01
+
+#define MAX77541_BIT_INT_SRC_TOPSYS BIT(0)
+#define MAX77541_BIT_INT_SRC_BUCK BIT(1)
+
+#define MAX77541_REG_TOPSYS_INT 0x02
+#define MAX77541_REG_TOPSYS_INT_M 0x03
+
+#define MAX77541_BIT_TOPSYS_INT_TJ_120C BIT(0)
+#define MAX77541_BIT_TOPSYS_INT_TJ_140C BIT(1)
+#define MAX77541_BIT_TOPSYS_INT_TSHDN BIT(2)
+#define MAX77541_BIT_TOPSYS_INT_UVLO BIT(3)
+#define MAX77541_BIT_TOPSYS_INT_ALT_SWO BIT(4)
+#define MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET BIT(5)
+
+/* REGULATORS */
+#define MAX77541_REG_BUCK_INT 0x20
+#define MAX77541_REG_BUCK_INT_M 0x21
+
+#define MAX77541_BIT_BUCK_INT_M1_POK_FLT BIT(0)
+#define MAX77541_BIT_BUCK_INT_M2_POK_FLT BIT(1)
+#define MAX77541_BIT_BUCK_INT_M1_SCFLT BIT(4)
+#define MAX77541_BIT_BUCK_INT_M2_SCFLT BIT(5)
+
+#define MAX77541_REG_EN_CTRL 0x0B
+
+#define MAX77541_BIT_M1_EN BIT(0)
+#define MAX77541_BIT_M2_EN BIT(1)
+
+#define MAX77541_REG_M1_VOUT 0x23
+#define MAX77541_REG_M2_VOUT 0x33
+
+#define MAX77541_BITS_MX_VOUT GENMASK(7, 0)
+
+#define MAX77541_REG_M1_CFG1 0x25
+#define MAX77541_REG_M2_CFG1 0x35
+
+#define MAX77541_BITS_MX_CFG1_RNG GENMASK(7, 6)
+
+/* ADC */
+#define MAX77541_REG_ADC_INT 0x70
+#define MAX77541_REG_ADC_INT_M 0x71
+
+#define MAX77541_BIT_ADC_INT_CH1_I BIT(0)
+#define MAX77541_BIT_ADC_INT_CH2_I BIT(1)
+#define MAX77541_BIT_ADC_INT_CH3_I BIT(2)
+#define MAX77541_BIT_ADC_INT_CH6_I BIT(5)
+
+#define MAX77541_REG_ADC_DATA_CH1 0x72
+#define MAX77541_REG_ADC_DATA_CH2 0x73
+#define MAX77541_REG_ADC_DATA_CH3 0x74
+#define MAX77541_REG_ADC_DATA_CH6 0x77
+
+/* INTERRUPT MASKS*/
+#define MAX77541_REG_INT_SRC_MASK 0x00
+#define MAX77541_REG_TOPSYS_INT_MASK 0x00
+#define MAX77541_REG_BUCK_INT_MASK 0x00
+
+#define MAX77541_MAX_REGULATORS 2
+
+enum max7754x_ids {
+ MAX77540 = 1,
+ MAX77541,
+};
+
+struct regmap;
+struct regmap_irq_chip_data;
+struct i2c_client;
+
+struct max77541 {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ enum max7754x_ids id;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_buck;
+ struct regmap_irq_chip_data *irq_topsys;
+ struct regmap_irq_chip_data *irq_adc;
+};
+
+#endif /* __MFD_MAX77541_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 6bb432f6a96c..0221f806d139 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -55,21 +55,28 @@ enum rt5033_reg {
};
/* RT5033 Charger state register */
-#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+#define RT5033_CHG_STAT_MASK 0x30
#define RT5033_CHG_STAT_DISCHARGING 0x00
#define RT5033_CHG_STAT_FULL 0x10
#define RT5033_CHG_STAT_CHARGING 0x20
#define RT5033_CHG_STAT_NOT_CHARGING 0x30
-#define RT5033_CHG_STAT_TYPE_MASK 0x60
-#define RT5033_CHG_STAT_TYPE_PRE 0x20
-#define RT5033_CHG_STAT_TYPE_FAST 0x60
/* RT5033 CHGCTRL1 register */
#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_TE_EN_MASK 0x08
+#define RT5033_CHGCTRL1_HZ_MASK 0x02
#define RT5033_CHGCTRL1_MODE_MASK 0x01
/* RT5033 CHGCTRL2 register */
#define RT5033_CHGCTRL2_CV_MASK 0xfc
+#define RT5033_CHGCTRL2_CV_SHIFT 0x02
+
+/* RT5033 DEVICE_ID register */
+#define RT5033_VENDOR_ID_MASK 0xf0
+#define RT5033_CHIP_REV_MASK 0x0f
/* RT5033 CHGCTRL3 register */
#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
@@ -77,18 +84,18 @@ enum rt5033_reg {
#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
/* RT5033 CHGCTRL4 register */
-#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_MIVR_MASK 0xe0
#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+#define RT5033_CHGCTRL4_IPREC_SHIFT 0x03
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
/* RT5033 CHGCTRL5 register */
-#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
-#define RT5033_CHG_MAX_CURRENT 0x0d
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
/* RT5033 RT CTRL1 register */
#define RT5033_RT_CTRL1_UUG_MASK 0x02
-#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
@@ -115,28 +122,37 @@ enum rt5033_reg {
* register), AICR mode limits the input current. For example, the AIRC 100
* mode limits the input current to 100 mA.
*/
+#define RT5033_AICR_DISABLE 0x00
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
#define RT5033_AICR_700_MODE 0x60
#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1000_MODE 0xa0
#define RT5033_AICR_1500_MODE 0xc0
#define RT5033_AICR_2000_MODE 0xe0
-#define RT5033_AICR_MODE_MASK 0xe0
-/* RT5033 use internal timer need to set time */
-#define RT5033_FAST_CHARGE_TIMER4 0x00
-#define RT5033_FAST_CHARGE_TIMER6 0x01
-#define RT5033_FAST_CHARGE_TIMER8 0x02
-#define RT5033_FAST_CHARGE_TIMER9 0x03
-#define RT5033_FAST_CHARGE_TIMER12 0x04
-#define RT5033_FAST_CHARGE_TIMER14 0x05
-#define RT5033_FAST_CHARGE_TIMER16 0x06
+/* RT5033 charger minimum input voltage regulation */
+#define RT5033_CHARGER_MIVR_DISABLE 0x00
+#define RT5033_CHARGER_MIVR_4200MV 0x20
+#define RT5033_CHARGER_MIVR_4300MV 0x40
+#define RT5033_CHARGER_MIVR_4400MV 0x60
+#define RT5033_CHARGER_MIVR_4500MV 0x80
+#define RT5033_CHARGER_MIVR_4600MV 0xa0
+#define RT5033_CHARGER_MIVR_4700MV 0xc0
+#define RT5033_CHARGER_MIVR_4800MV 0xe0
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00 /* 4 hrs */
+#define RT5033_FAST_CHARGE_TIMER6 0x08 /* 6 hrs */
+#define RT5033_FAST_CHARGE_TIMER8 0x10 /* 8 hrs */
+#define RT5033_FAST_CHARGE_TIMER10 0x18 /* 10 hrs */
+#define RT5033_FAST_CHARGE_TIMER12 0x20 /* 12 hrs */
+#define RT5033_FAST_CHARGE_TIMER14 0x28 /* 14 hrs */
+#define RT5033_FAST_CHARGE_TIMER16 0x30 /* 16 hrs */
+
+#define RT5033_INT_TIMER_DISABLE 0x00
#define RT5033_INT_TIMER_ENABLE 0x01
-/* RT5033 charger termination enable mask */
-#define RT5033_TE_ENABLE_MASK 0x08
-
/*
* RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
* and boost mode.
@@ -145,25 +161,30 @@ enum rt5033_reg {
#define RT5033_BOOST_MODE 0x01
/* RT5033 charger termination enable */
+#define RT5033_TE_DISABLE 0x00
#define RT5033_TE_ENABLE 0x08
/* RT5033 charger CFO enable */
+#define RT5033_CFO_DISABLE 0x00
#define RT5033_CFO_ENABLE 0x40
/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+#define RT5033_CV_MAX_VOLTAGE 0x1e
/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+#define RT5033_CHG_MAX_PRE_CURRENT 0x03
/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+#define RT5033_CHG_MAX_CURRENT 0x0d
/*
* RT5033 charger const-charge end of charger current (
@@ -187,11 +208,12 @@ enum rt5033_reg {
* RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
+#define RT5033_CHARGER_UUG_DISABLE 0x00
#define RT5033_CHARGER_UUG_ENABLE 0x02
/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
-#define RT5033_CHARGER_HZ_ENABLE 0x01
+#define RT5033_CHARGER_HZ_ENABLE 0x02
/* RT5033 regulator BUCK output voltage uV */
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 8f306ac15a27..bb3d18945d21 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -12,7 +12,6 @@
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/power_supply.h>
/* RT5033 regulator IDs */
enum rt5033_regulators {
@@ -32,27 +31,4 @@ struct rt5033_dev {
bool wakeup;
};
-struct rt5033_battery {
- struct i2c_client *client;
- struct rt5033_dev *rt5033;
- struct regmap *regmap;
- struct power_supply *psy;
-};
-
-/* RT5033 charger platform data */
-struct rt5033_charger_data {
- unsigned int pre_uamp;
- unsigned int pre_uvolt;
- unsigned int const_uvolt;
- unsigned int eoc_uamp;
- unsigned int fast_uamp;
-};
-
-struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
- struct rt5033_charger_data *chg;
-};
-
#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
index fa3f99f7e9a1..dc00bac24f5a 100644
--- a/include/linux/mfd/stpmic1.h
+++ b/include/linux/mfd/stpmic1.h
@@ -15,7 +15,7 @@
#define RREQ_STATE_SR 0x5
#define VERSION_SR 0x6
-#define SWOFF_PWRCTRL_CR 0x10
+#define MAIN_CR 0x10
#define PADS_PULL_CR 0x11
#define BUCKS_PD_CR 0x12
#define LDO14_PD_CR 0x13
@@ -148,14 +148,14 @@
#define LDO_BYPASS_MASK BIT(7)
/* Main PMIC Control Register
- * SWOFF_PWRCTRL_CR
+ * MAIN_CR
* Address : 0x10
*/
-#define ICC_EVENT_ENABLED BIT(4)
+#define OCP_OFF_DBG BIT(4)
#define PWRCTRL_POLARITY_HIGH BIT(3)
-#define PWRCTRL_PIN_VALID BIT(2)
-#define RESTART_REQUEST_ENABLED BIT(1)
-#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0)
+#define PWRCTRL_ENABLE BIT(2)
+#define RESTART_REQUEST_ENABLE BIT(1)
+#define SOFTWARE_SWITCH_OFF BIT(0)
/* Main PMIC PADS Control Register
* PADS_PULL_CR
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4f2c33c273eb..2dd73e4f3d8e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -377,7 +377,7 @@ extern unsigned int kobjsize(const void *objp);
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
/* Bits set in the VMA until the stack is in its final location */
-#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
@@ -399,8 +399,10 @@ extern unsigned int kobjsize(const void *objp);
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
+#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
+#define VM_STACK_EARLY 0
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -3177,7 +3179,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool downgrade);
+ bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -3185,7 +3187,7 @@ extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in,
#ifdef CONFIG_MMU
extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct list_head *uf, bool downgrade);
+ struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3379,6 +3381,8 @@ static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
return VM_FAULT_OOM;
+ else if (err == -EHWPOISON)
+ return VM_FAULT_HWPOISON;
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ccf017353bb6..b0678b093cb2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -221,6 +221,19 @@ struct acpi_device_id {
__u32 cls_msk;
};
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
#define PNP_ID_LEN 8
#define PNP_MAX_DEVICES 8
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8f226d460f51..a33aa9eb9fc3 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,6 +19,7 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
+#include <linux/cleanup.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -219,4 +220,7 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index ea2f7e6b1b0b..20eeba8b009d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -63,7 +63,8 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
unsigned int cl_max_connect; /* max number of xprts allowed */
- const char * cl_principal; /* used for machine cred */
+ const char * cl_principal; /* used for machine cred */
+ struct xprtsec_parms cl_xprtsec; /* xprt security policy */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -153,6 +154,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_EAGER 0x01000000
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+#define NFS_MOUNT_SHUTDOWN 0x08000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -183,6 +185,7 @@ struct nfs_server {
change_attr_type;/* Description of change attribute */
struct nfs_fsid fsid;
+ int s_sysfs_id; /* sysfs dentry index */
__u64 maxfilesize; /* maximum file size */
struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
@@ -259,6 +262,7 @@ struct nfs_server {
/* User namespace info */
const struct cred *cred;
bool has_sec_mnt_opts;
+ struct kobject kobj;
};
/* Server capabilities */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 29a1b39794bf..12bbb5c63664 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1528,6 +1528,7 @@ struct nfs42_seek_res {
struct nfs42_setxattrargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fh;
+ const u32 *bitmask;
const char *xattr_name;
u32 xattr_flags;
size_t xattr_len;
@@ -1537,6 +1538,8 @@ struct nfs42_setxattrargs {
struct nfs42_setxattrres {
struct nfs4_sequence_res seq_res;
struct nfs4_change_info cinfo;
+ struct nfs_fattr *fattr;
+ const struct nfs_server *server;
};
struct nfs42_getxattrargs {
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index bb0ee80526b2..8d07116caaf1 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -385,7 +385,7 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
#if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \
- (node >= 0) && (node) < MAX_NUMNODES; \
+ (node) < MAX_NUMNODES; \
(node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
#define for_each_node_mask(node, mask) \
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 243c82d7f852..999eddd619b7 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -516,7 +516,7 @@ extern int parport_device_proc_register(struct pardevice *device);
extern int parport_device_proc_unregister(struct pardevice *device);
/* If PC hardware is the only type supported, we can optimise a bit. */
-#if !defined(CONFIG_PARPORT_NOT_PC)
+#if !defined(CONFIG_PARPORT_NOT_PC) && defined(CONFIG_PARPORT_PC)
#include <linux/parport_pc.h>
#define parport_write_data(p,x) parport_pc_write_data(p,x)
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index 98a60ce87b92..bcba7fda3cc9 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -222,6 +222,27 @@ enum pds_core_lif_type {
PDS_CORE_LIF_TYPE_DEFAULT = 0,
};
+#define PDS_CORE_IFNAMSIZ 16
+
+/**
+ * enum pds_core_logical_qtype - Logical Queue Types
+ * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
+ * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
+ * @PDS_CORE_QTYPE_RXQ: Receive Queue
+ * @PDS_CORE_QTYPE_TXQ: Transmit Queue
+ * @PDS_CORE_QTYPE_EQ: Event Queue
+ * @PDS_CORE_QTYPE_MAX: Max queue type supported
+ */
+enum pds_core_logical_qtype {
+ PDS_CORE_QTYPE_ADMINQ = 0,
+ PDS_CORE_QTYPE_NOTIFYQ = 1,
+ PDS_CORE_QTYPE_RXQ = 2,
+ PDS_CORE_QTYPE_TXQ = 3,
+ PDS_CORE_QTYPE_EQ = 4,
+
+ PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
+};
+
/**
* union pds_core_lif_config - LIF configuration
* @state: LIF state (enum pds_core_lif_state)
@@ -584,6 +605,219 @@ struct pds_core_q_init_comp {
u8 color;
};
+/*
+ * enum pds_vdpa_cmd_opcode - vDPA Device commands
+ */
+enum pds_vdpa_cmd_opcode {
+ PDS_VDPA_CMD_INIT = 48,
+ PDS_VDPA_CMD_IDENT = 49,
+ PDS_VDPA_CMD_RESET = 51,
+ PDS_VDPA_CMD_VQ_RESET = 52,
+ PDS_VDPA_CMD_VQ_INIT = 53,
+ PDS_VDPA_CMD_STATUS_UPDATE = 54,
+ PDS_VDPA_CMD_SET_FEATURES = 55,
+ PDS_VDPA_CMD_SET_ATTR = 56,
+};
+
+/**
+ * struct pds_vdpa_cmd - generic command
+ * @opcode: Opcode
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_init_cmd - INIT command
+ * @opcode: Opcode PDS_VDPA_CMD_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_ident - vDPA identification data
+ * @hw_features: vDPA features supported by device
+ * @max_vqs: max queues available (2 queues for a single queuepair)
+ * @max_qlen: log(2) of maximum number of descriptors
+ * @min_qlen: log(2) of minimum number of descriptors
+ *
+ * This struct is used in a DMA block that is set up for the PDS_VDPA_CMD_IDENT
+ * transaction. Set up the DMA block and send the address in the IDENT cmd
+ * data, the DSC will write the ident information, then we can remove the DMA
+ * block after reading the answer. If the completion status is 0, then there
+ * is valid information, else there was an error and the data should be invalid.
+ */
+struct pds_vdpa_ident {
+ __le64 hw_features;
+ __le16 max_vqs;
+ __le16 max_qlen;
+ __le16 min_qlen;
+};
+
+/**
+ * struct pds_vdpa_ident_cmd - IDENT command
+ * @opcode: Opcode PDS_VDPA_CMD_IDENT
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @len: length of ident info DMA space
+ * @ident_pa: address for DMA of ident info (struct pds_vdpa_ident)
+ * only used for this transaction, then forgotten by DSC
+ */
+struct pds_vdpa_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 len;
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_vdpa_status_cmd - STATUS_UPDATE command
+ * @opcode: Opcode PDS_VDPA_CMD_STATUS_UPDATE
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @status: new status bits
+ */
+struct pds_vdpa_status_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 status;
+};
+
+/**
+ * enum pds_vdpa_attr - List of VDPA device attributes
+ * @PDS_VDPA_ATTR_MAC: MAC address
+ * @PDS_VDPA_ATTR_MAX_VQ_PAIRS: Max virtqueue pairs
+ */
+enum pds_vdpa_attr {
+ PDS_VDPA_ATTR_MAC = 1,
+ PDS_VDPA_ATTR_MAX_VQ_PAIRS = 2,
+};
+
+/**
+ * struct pds_vdpa_setattr_cmd - SET_ATTR command
+ * @opcode: Opcode PDS_VDPA_CMD_SET_ATTR
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @attr: attribute to be changed (enum pds_vdpa_attr)
+ * @pad: Word boundary padding
+ * @mac: new mac address to be assigned as vdpa device address
+ * @max_vq_pairs: new limit of virtqueue pairs
+ */
+struct pds_vdpa_setattr_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 attr;
+ u8 pad[3];
+ union {
+ u8 mac[6];
+ __le16 max_vq_pairs;
+ } __packed;
+};
+
+/**
+ * struct pds_vdpa_vq_init_cmd - queue init command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id (bit0 clear = rx, bit0 set = tx, qid=N is ctrlq)
+ * @len: log(2) of max descriptor count
+ * @desc_addr: DMA address of descriptor area
+ * @avail_addr: DMA address of available descriptors (aka driver area)
+ * @used_addr: DMA address of used descriptors (aka device area)
+ * @intr_index: interrupt index
+ * @avail_index: initial device position in available ring
+ * @used_index: initial device position in used ring
+ */
+struct pds_vdpa_vq_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+ __le16 len;
+ __le64 desc_addr;
+ __le64 avail_addr;
+ __le64 used_addr;
+ __le16 intr_index;
+ __le16 avail_index;
+ __le16 used_index;
+};
+
+/**
+ * struct pds_vdpa_vq_init_comp - queue init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @hw_qtype: HW queue type, used in doorbell selection
+ * @hw_qindex: HW queue index, used in doorbell selection
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_init_comp {
+ u8 status;
+ u8 hw_qtype;
+ __le16 hw_qindex;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_cmd - queue reset command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_RESET
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id
+ */
+struct pds_vdpa_vq_reset_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_comp - queue reset completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd0: Word boundary padding
+ * @avail_index: current device position in available ring
+ * @used_index: current device position in used ring
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_reset_comp {
+ u8 status;
+ u8 rsvd0;
+ __le16 avail_index;
+ __le16 used_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_set_features_cmd - set hw features
+ * @opcode: Opcode PDS_VDPA_CMD_SET_FEATURES
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @rsvd: Word boundary padding
+ * @features: Feature bit mask
+ */
+struct pds_vdpa_set_features_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le32 rsvd;
+ __le64 features;
+};
+
union pds_core_adminq_cmd {
u8 opcode;
u8 bytes[64];
@@ -600,6 +834,16 @@ union pds_core_adminq_cmd {
struct pds_core_q_identify_cmd q_ident;
struct pds_core_q_init_cmd q_init;
+
+ struct pds_vdpa_cmd vdpa;
+ struct pds_vdpa_init_cmd vdpa_init;
+ struct pds_vdpa_ident_cmd vdpa_ident;
+ struct pds_vdpa_status_cmd vdpa_status;
+ struct pds_vdpa_setattr_cmd vdpa_setattr;
+ struct pds_vdpa_set_features_cmd vdpa_set_features;
+ struct pds_vdpa_vq_init_cmd vdpa_vq_init;
+ struct pds_vdpa_vq_reset_cmd vdpa_vq_reset;
+
};
union pds_core_adminq_comp {
@@ -621,6 +865,9 @@ union pds_core_adminq_comp {
struct pds_core_q_identify_comp q_ident;
struct pds_core_q_init_comp q_init;
+
+ struct pds_vdpa_vq_init_comp vdpa_vq_init;
+ struct pds_vdpa_vq_reset_comp vdpa_vq_reset;
};
#ifndef __CHECKER__
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
index 060331486d50..435c8e8161c2 100644
--- a/include/linux/pds/pds_common.h
+++ b/include/linux/pds/pds_common.h
@@ -39,26 +39,7 @@ enum pds_core_vif_types {
#define PDS_DEV_TYPE_RDMA_STR "RDMA"
#define PDS_DEV_TYPE_LM_STR "LM"
-#define PDS_CORE_IFNAMSIZ 16
-
-/**
- * enum pds_core_logical_qtype - Logical Queue Types
- * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
- * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
- * @PDS_CORE_QTYPE_RXQ: Receive Queue
- * @PDS_CORE_QTYPE_TXQ: Transmit Queue
- * @PDS_CORE_QTYPE_EQ: Event Queue
- * @PDS_CORE_QTYPE_MAX: Max queue type supported
- */
-enum pds_core_logical_qtype {
- PDS_CORE_QTYPE_ADMINQ = 0,
- PDS_CORE_QTYPE_NOTIFYQ = 1,
- PDS_CORE_QTYPE_RXQ = 2,
- PDS_CORE_QTYPE_TXQ = 3,
- PDS_CORE_QTYPE_EQ = 4,
-
- PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
-};
+#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 42125cf9c506..b3b458442330 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -8,6 +8,7 @@
#include <linux/cpumask.h>
#include <linux/pfn.h>
#include <linux/init.h>
+#include <linux/cleanup.h>
#include <asm/percpu.h>
@@ -125,6 +126,9 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
+
+DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
+
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu_gfp(type, gfp) \
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b528be0e1f47..2166a69e3bf2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -305,6 +305,7 @@ struct pmu {
struct module *module;
struct device *dev;
+ struct device *parent;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 3a570bc59fc7..f6d607ef0e80 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -148,6 +148,7 @@ struct phy_attrs {
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
* @pwr: power regulator associated with the phy
+ * @debugfs: debugfs directory
*/
struct phy {
struct device dev;
@@ -158,6 +159,7 @@ struct phy {
int power_count;
struct phy_attrs attrs;
struct regulator *pwr;
+ struct dentry *debugfs;
};
/**
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 3441064713a3..3cc8db0b12b5 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -73,6 +73,9 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
+ /* Charge pump mode */
+ u32 charge_pump_mode;
+
/* optional enable GPIO */
struct gpio_desc *enable_gpiod;
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 897051e51b78..a657830232ae 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -15,7 +15,7 @@
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
- * Magnetometer DRDY is supported only on LSM9DS0.
+ * Magnetometer DRDY is supported only on LSM9DS0 and LSM303D.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 035d9649eba4..badad7d11f4f 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -375,23 +375,23 @@ const struct dev_pm_ops name = { \
}
#ifdef CONFIG_PM
-#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+#define _EXPORT_DEV_PM_OPS(name, license, ns) \
const struct dev_pm_ops name; \
- __EXPORT_SYMBOL(name, sec, ns); \
+ __EXPORT_SYMBOL(name, license, ns); \
const struct dev_pm_ops name
#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
#else
-#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+#define _EXPORT_DEV_PM_OPS(name, license, ns) \
static __maybe_unused const struct dev_pm_ops __static_##name
#define EXPORT_PM_FN_GPL(name)
#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "_gpl", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "_gpl", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 0df425bf9bd7..1424670df161 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cleanup.h>
#include <linux/list.h>
/*
@@ -463,4 +464,8 @@ static __always_inline void preempt_enable_nested(void)
preempt_enable();
}
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/property.h b/include/linux/property.h
index 66df1a15d518..8c3c6685a2ae 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -85,6 +85,18 @@ bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char
return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
}
+/**
+ * device_is_compatible - match 'compatible' property of the device with a given string
+ * @dev: Pointer to the struct device
+ * @compat: The string to match 'compatible' property with
+ *
+ * Returns: true if matches, otherwise false.
+ */
+static inline bool device_is_compatible(const struct device *dev, const char *compat)
+{
+ return fwnode_device_is_compatible(dev_fwnode(dev), compat);
+}
+
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 7d9c2a63b7cd..5e5f920ade90 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -27,6 +27,7 @@
#include <linux/preempt.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
#include <linux/context_tracking_irq.h>
@@ -1057,4 +1058,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 8052d34da782..27343424225c 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -49,9 +49,9 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
extern void finish_rcuwait(struct rcuwait *w);
-#define rcuwait_wait_event(w, condition, state) \
+#define ___rcuwait_wait_event(w, condition, state, ret, cmd) \
({ \
- int __ret = 0; \
+ long __ret = ret; \
prepare_to_rcuwait(w); \
for (;;) { \
/* \
@@ -67,10 +67,27 @@ extern void finish_rcuwait(struct rcuwait *w);
break; \
} \
\
- schedule(); \
+ cmd; \
} \
finish_rcuwait(w); \
__ret; \
})
+#define rcuwait_wait_event(w, condition, state) \
+ ___rcuwait_wait_event(w, condition, state, 0, schedule())
+
+#define __rcuwait_wait_event_timeout(w, condition, state, timeout) \
+ ___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \
+ state, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define rcuwait_wait_event_timeout(w, condition, state, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __rcuwait_wait_event_timeout(w, condition, \
+ state, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efa5c324369a..1dd530ce8b45 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+#include <linux/cleanup.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) \
@@ -201,6 +202,13 @@ extern void up_read(struct rw_semaphore *sem);
*/
extern void up_write(struct rw_semaphore *sem);
+DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+
+DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
+DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
+
+
/*
* downgrade write lock to read lock
*/
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index e0f5ac90a228..dd35ce28bb90 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -125,6 +125,8 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
+DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
+
static inline void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 6f78f302d272..be65de65fe61 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -7,17 +7,34 @@
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
+#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
+struct uart_8250_port;
+
/*
* This is the platform device platform_data structure
+ *
+ * @mapsize: Port size for ioremap()
+ * @bugs: Port bugs
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *up)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *up, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
+ resource_size_t mapsize;
unsigned int uartclk; /* UART clock rate */
unsigned int irq; /* interrupt number */
unsigned long irqflags; /* request_irq flags */
@@ -28,8 +45,11 @@ struct plat_serial8250_port {
unsigned char has_sysrq; /* supports magic SysRq */
unsigned int type; /* If UPF_FIXED_TYPE */
upf_t flags; /* UPF_* flags */
+ u16 bugs; /* port bugs */
unsigned int (*serial_in)(struct uart_port *, int);
void (*serial_out)(struct uart_port *, int, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
const struct ktermios *old);
@@ -90,15 +110,23 @@ struct uart_8250_em485 {
* their own 8250 ports without registering their own
* platform device. Using these will make your driver
* dependent on the 8250 driver.
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *port)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *port, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
-
struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
u32 capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- bool fifo_bug; /* min RX trigger if enabled */
+ u16 bugs; /* port bugs */
unsigned int tx_loadsz; /* transmit fifo load size */
unsigned char acr;
unsigned char fcr;
@@ -129,8 +157,8 @@ struct uart_8250_port {
const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
- int (*dl_read)(struct uart_8250_port *);
- void (*dl_write)(struct uart_8250_port *, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
void (*rs485_start_tx)(struct uart_8250_port *);
@@ -183,8 +211,11 @@ void serial8250_set_isa_configurator(void (*v)(int port, struct uart_port *up,
u32 *capabilities));
#ifdef CONFIG_SERIAL_8250_RT288X
-unsigned int au_serial_in(struct uart_port *p, int offset);
-void au_serial_out(struct uart_port *p, int offset, int value);
+int rt288x_setup(struct uart_port *p);
+int au_platform_setup(struct plat_serial8250_port *p);
+#else
+static inline int rt288x_setup(struct uart_port *p) { return -ENODEV; }
+static inline int au_platform_setup(struct plat_serial8250_port *p) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 66ecec15a1bf..6d58c57acdaa 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -28,6 +28,7 @@
struct uart_port;
struct serial_struct;
+struct serial_port_device;
struct device;
struct gpio_desc;
@@ -458,6 +459,7 @@ struct uart_port {
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
+ int ctrl_id; /* optional serial core controller id */
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -563,7 +565,8 @@ struct uart_port {
unsigned int minor;
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
- struct device *dev; /* parent device */
+ struct device *dev; /* serial port physical parent device */
+ struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
unsigned int sysrq_ch; /* char for sysrq */
@@ -853,7 +856,7 @@ void uart_console_write(struct uart_port *port, const char *s,
int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
+void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2);
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 37ad81058d6a..27ae79191bdc 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -13,9 +13,9 @@
/*
* Convert back and forth between INTEVT and IRQ values.
*/
-#ifdef CONFIG_CPU_HAS_INTEVT
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
+#ifdef CONFIG_CPU_HAS_INTEVT /* Avoid IRQ0 (invalid for platform devices) */
+#define evt2irq(evt) ((evt) >> 5)
+#define irq2evt(irq) ((irq) << 5)
#else
#define evt2irq(evt) (evt)
#define irq2evt(irq) (irq)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 791f7453a04f..848c7c82ad5a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
+#include <linux/cleanup.h>
/*
@@ -226,6 +227,8 @@ void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
+DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+
/**
* ksize - Report actual allocation size of associated object
*
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ef645de13ae9..f523ceabd059 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -5,6 +5,7 @@
#define __SOUNDWIRE_H
#include <linux/bug.h>
+#include <linux/lockdep_types.h>
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
@@ -846,6 +847,7 @@ struct sdw_defer {
* @post_bank_switch: Callback for post bank switch
* @read_ping_status: Read status from PING frames, reported with two bits per Device.
* Bits 31:24 are reserved.
+ * @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
@@ -860,7 +862,7 @@ struct sdw_master_ops {
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
-
+ void (*new_peripheral_assigned)(struct sdw_bus *bus, int dev_num);
};
/**
@@ -906,7 +908,9 @@ struct sdw_bus {
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
+ struct lock_class_key bus_lock_key;
struct mutex msg_lock;
+ struct lock_class_key msg_lock_key;
int (*compute_params)(struct sdw_bus *bus);
const struct sdw_master_ops *ops;
const struct sdw_master_port_ops *port_ops;
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 207701aeeb47..11fc88fb0d78 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -7,6 +7,10 @@
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
+/*********************************************************************
+ * cAVS and ACE1.x definitions
+ *********************************************************************/
+
#define SDW_SHIM_BASE 0x2C000
#define SDW_ALH_BASE 0x2C800
#define SDW_SHIM_BASE_ACE 0x38000
@@ -101,13 +105,84 @@
#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+/*********************************************************************
+ * ACE2.x definitions for SHIM registers - only accessible when the
+ * HDAudio extended link LCTL.SPA/CPA = 1.
+ *********************************************************************/
+/* x variable is link index */
+#define SDW_SHIM2_GENERIC_BASE(x) (0x00030000 + 0x8000 * (x))
+#define SDW_IP_BASE(x) (0x00030100 + 0x8000 * (x))
+#define SDW_SHIM2_VS_BASE(x) (0x00036000 + 0x8000 * (x))
+
+/* SHIM2 Generic Registers */
+/* Read-only capabilities */
+#define SDW_SHIM2_LECAP 0x00
+#define SDW_SHIM2_LECAP_HDS BIT(0) /* unset -> Host mode */
+#define SDW_SHIM2_LECAP_MLC GENMASK(3, 1) /* Number of Lanes */
+
+/* PCM Stream capabilities */
+#define SDW_SHIM2_PCMSCAP 0x10
+#define SDW_SHIM2_PCMSCAP_ISS GENMASK(3, 0) /* Input-only streams */
+#define SDW_SHIM2_PCMSCAP_OSS GENMASK(7, 4) /* Output-only streams */
+#define SDW_SHIM2_PCMSCAP_BSS GENMASK(12, 8) /* Bidirectional streams */
+
+/* Read-only PCM Stream Channel Count, y variable is stream */
+#define SDW_SHIM2_PCMSYCHC(y) (0x14 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHC_CS GENMASK(3, 0) /* Channels Supported */
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM2_PCMSYCHM(y) (0x16 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHM_LCHAN GENMASK(3, 0) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_HCHAN GENMASK(7, 4) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_STRM GENMASK(13, 8) /* HDaudio stream tag */
+#define SDW_SHIM2_PCMSYCHM_DIR BIT(15) /* HDaudio stream direction */
+
+/* SHIM2 vendor-specific registers */
+#define SDW_SHIM2_INTEL_VS_LVSCTL 0x04
+#define SDW_SHIM2_INTEL_VS_LVSCTL_FCG BIT(26)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_MLCS GENMASK(29, 27)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_DCGD BIT(30)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_ICGD BIT(31)
+
+#define SDW_SHIM2_MLCS_XTAL_CLK 0x0
+#define SDW_SHIM2_MLCS_CARDINAL_CLK 0x1
+#define SDW_SHIM2_MLCS_AUDIO_PLL_CLK 0x2
+#define SDW_SHIM2_MLCS_MCLK_INPUT_CLK 0x3
+#define SDW_SHIM2_MLCS_WOV_RING_OSC_CLK 0x4
+
+#define SDW_SHIM2_INTEL_VS_WAKEEN 0x08
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWE BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_WAKESTS 0x0A
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWS BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_IOCTL 0x0C
+#define SDW_SHIM2_INTEL_VS_IOCTL_MIF BIT(0)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CO BIT(1)
+#define SDW_SHIM2_INTEL_VS_IOCTL_COE BIT(2)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DO BIT(3)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DOE BIT(4)
+#define SDW_SHIM2_INTEL_VS_IOCTL_BKE BIT(5)
+#define SDW_SHIM2_INTEL_VS_IOCTL_WPDD BIT(6)
+#define SDW_SHIM2_INTEL_VS_IOCTL_ODC BIT(7)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CIBD BIT(8)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DIBD BIT(9)
+#define SDW_SHIM2_INTEL_VS_IOCTL_HAMIFD BIT(10)
+
+#define SDW_SHIM2_INTEL_VS_ACTMCTL 0x0E
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE BIT(0)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODS BIT(1)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+
/**
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
* firmware.
*/
struct sdw_intel_stream_params_data {
- int stream;
+ struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
struct snd_pcm_hw_params *hw_params;
int link_id;
@@ -120,7 +195,7 @@ struct sdw_intel_stream_params_data {
* firmware.
*/
struct sdw_intel_stream_free_data {
- int stream;
+ struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
int link_id;
};
@@ -134,7 +209,7 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
- int (*trigger)(struct snd_soc_dai *dai, int cmd, int stream);
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai);
};
/**
@@ -194,6 +269,8 @@ struct sdw_intel_slave_id {
struct sdw_slave_id id;
};
+struct hdac_bus;
+
/**
* struct sdw_intel_ctx - context allocated by the controller
* driver probe
@@ -248,6 +325,10 @@ struct sdw_intel_ctx {
* DSP driver. The quirks are common for all links for now.
* @shim_base: sdw shim base.
* @alh_base: sdw alh base.
+ * @ext: extended HDaudio link support
+ * @hbus: hdac_bus pointer, needed for power management
+ * @eml_lock: mutex protecting shared registers in the HDaudio multi-link
+ * space
*/
struct sdw_intel_res {
const struct sdw_intel_hw_ops *hw_ops;
@@ -262,6 +343,9 @@ struct sdw_intel_res {
u32 clock_stop_quirks;
u32 shim_base;
u32 alh_base;
+ bool ext;
+ struct hdac_bus *hbus;
+ struct mutex *eml_lock;
};
/*
@@ -315,6 +399,7 @@ struct sdw_intel;
* @sync_go: helper for multi-link synchronization
* @sync_check_cmdsync_unlocked: helper for multi-link synchronization
* and bank switch - shim_lock is assumed to be locked at higher level
+ * @program_sdi: helper for codec command/control based on dev_num
*/
struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
@@ -341,8 +426,11 @@ struct sdw_intel_hw_ops {
int (*sync_go_unlocked)(struct sdw_intel *sdw);
int (*sync_go)(struct sdw_intel *sdw);
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
+
+ void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
};
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
+extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index be48f1cb1878..31d3d747a9db 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -61,6 +61,7 @@
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
@@ -502,5 +503,35 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+ raw_spin_lock(_T->lock),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
+ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ raw_spin_lock_irq(_T->lock),
+ raw_spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ raw_spin_lock_irqsave(_T->lock, _T->flags),
+ raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+ spin_lock(_T->lock),
+ spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ spin_lock_irq(_T->lock),
+ spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index eb92a50a4599..127ef3b2e607 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -343,4 +343,9 @@ static inline void smp_mb__after_srcu_read_unlock(void)
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
+DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
+ _T->idx = srcu_read_lock(_T->lock),
+ srcu_read_unlock(_T->lock, _T->idx),
+ int idx)
+
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 3e6ce288a7fc..61e58327b1aa 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -120,6 +120,7 @@ struct rpc_authops {
struct rpcsec_gss_info *);
int (*key_timeout)(struct rpc_auth *,
struct rpc_cred *);
+ int (*ping)(struct rpc_clnt *clnt);
};
struct rpc_credops {
@@ -144,6 +145,7 @@ struct rpc_credops {
extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
+extern const struct rpc_authops authtls_ops;
int __init rpc_init_authunix(void);
int __init rpcauth_init_module(void);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 770ef2cb5775..4f41d839face 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,13 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
-struct rpc_sysfs_client;
+struct rpc_sysfs_client {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
/*
* The high-level client handle
@@ -57,7 +63,9 @@ struct rpc_clnt {
cl_discrtry : 1,/* disconnect before retry */
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
- cl_chatty : 1;/* be verbose */
+ cl_chatty : 1,/* be verbose */
+ cl_shutdown : 1;/* rpc immediate -EIO */
+ struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -139,6 +147,7 @@ struct rpc_create_args {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
const struct cred *cred;
unsigned int max_connect;
+ struct xprtsec_parms xprtsec;
};
struct rpc_add_xprt_test {
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index b9f59aabee53..b52411bcfe4e 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -129,6 +129,21 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
+/* RPC transport layer security policies */
+enum xprtsec_policies {
+ RPC_XPRTSEC_NONE = 0,
+ RPC_XPRTSEC_TLS_ANON,
+ RPC_XPRTSEC_TLS_X509,
+};
+
+struct xprtsec_parms {
+ enum xprtsec_policies policy;
+
+ /* authentication material */
+ key_serial_t cert_serial;
+ key_serial_t privkey_serial;
+};
+
struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -185,6 +200,7 @@ enum xprt_transports {
XPRT_TRANSPORT_RDMA = 256,
XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
+ XPRT_TRANSPORT_TCP_TLS = 258,
};
struct rpc_sysfs_xprt;
@@ -229,6 +245,7 @@ struct rpc_xprt {
*/
unsigned long bind_timeout,
reestablish_timeout;
+ struct xprtsec_parms xprtsec;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
is reconnected */
@@ -333,6 +350,7 @@ struct xprt_create {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
+ struct xprtsec_parms xprtsec;
};
struct xprt_class {
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 38284f25eddf..700a1e6c047c 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -57,9 +57,11 @@ struct sock_xprt {
struct work_struct error_worker;
struct work_struct recv_worker;
struct mutex recv_mutex;
+ struct completion handshake_done;
struct sockaddr_storage srcaddr;
unsigned short srcport;
int xprt_err;
+ struct rpc_clnt *clnt;
/*
* UDP socket buffer size parameters
@@ -90,5 +92,6 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_DISCONNECT (7)
#define XPRT_SOCK_CONNECT_SENT (8)
#define XPRT_SOCK_NOSPACE (9)
+#define XPRT_SOCK_IGNORE_RECV (10)
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d18ce144037e..03e3d0121d5e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -348,8 +348,6 @@ asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
const void __user *argp, size_t argsz);
asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
-
-/* fs/xattr.c */
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
@@ -372,17 +370,9 @@ asmlinkage long sys_removexattr(const char __user *path,
asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
-
-/* fs/dcache.c */
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-
-/* fs/cookies.c */
asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
-
-/* fs/eventfd.c */
asmlinkage long sys_eventfd2(unsigned int count, int flags);
-
-/* fs/eventpoll.c */
asmlinkage long sys_epoll_create1(int flags);
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
struct epoll_event __user *event);
@@ -395,8 +385,6 @@ asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events,
const struct __kernel_timespec __user *timeout,
const sigset_t __user *sigmask,
size_t sigsetsize);
-
-/* fs/fcntl.c */
asmlinkage long sys_dup(unsigned int fildes);
asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
@@ -404,25 +392,15 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long sys_fcntl64(unsigned int fd,
unsigned int cmd, unsigned long arg);
#endif
-
-/* fs/inotify_user.c */
asmlinkage long sys_inotify_init1(int flags);
asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
u32 mask);
asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
-
-/* fs/ioctl.c */
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg);
-
-/* fs/ioprio.c */
asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
-
-/* fs/locks.c */
asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
-
-/* fs/namei.c */
asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
unsigned dev);
asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
@@ -433,18 +411,12 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
int newdfd, const char __user *newname, int flags);
asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
int newdfd, const char __user * newname);
-
-/* fs/namespace.c */
asmlinkage long sys_umount(char __user *name, int flags);
asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
char __user *type, unsigned long flags,
void __user *data);
asmlinkage long sys_pivot_root(const char __user *new_root,
const char __user *put_old);
-
-/* fs/nfsctl.c */
-
-/* fs/open.c */
asmlinkage long sys_statfs(const char __user * path,
struct statfs __user *buf);
asmlinkage long sys_statfs64(const char __user *path, size_t sz,
@@ -479,22 +451,14 @@ asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
asmlinkage long sys_vhangup(void);
-
-/* fs/pipe.c */
asmlinkage long sys_pipe2(int __user *fildes, int flags);
-
-/* fs/quota.c */
asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id,
void __user *addr);
-
-/* fs/readdir.c */
asmlinkage long sys_getdents64(unsigned int fd,
struct linux_dirent64 __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
unsigned long offset_low, loff_t __user *result,
unsigned int whence);
@@ -517,12 +481,8 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
-
-/* fs/sendfile.c */
asmlinkage long sys_sendfile64(int out_fd, int in_fd,
loff_t __user *offset, size_t count);
-
-/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
fd_set __user *, struct __kernel_timespec __user *,
void __user *);
@@ -535,19 +495,13 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
struct old_timespec32 __user *, const sigset_t __user *,
size_t);
-
-/* fs/signalfd.c */
asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
-
-/* fs/splice.c */
asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
unsigned long nr_segs, unsigned int flags);
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags);
asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
-
-/* fs/stat.c */
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
@@ -558,8 +512,6 @@ asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
struct stat64 __user *statbuf, int flag);
#endif
-
-/* fs/sync.c */
asmlinkage long sys_sync(void);
asmlinkage long sys_fsync(unsigned int fd);
asmlinkage long sys_fdatasync(unsigned int fd);
@@ -567,8 +519,6 @@ asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
loff_t offset, loff_t nbytes);
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
unsigned int flags);
-
-/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
const struct __kernel_itimerspec __user *utmr,
@@ -579,39 +529,25 @@ asmlinkage long sys_timerfd_gettime32(int ufd,
asmlinkage long sys_timerfd_settime32(int ufd, int flags,
const struct old_itimerspec32 __user *utmr,
struct old_itimerspec32 __user *otmr);
-
-/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
struct __kernel_timespec __user *utimes,
int flags);
asmlinkage long sys_utimensat_time32(unsigned int dfd,
const char __user *filename,
struct old_timespec32 __user *t, int flags);
-
-/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
-
-/* kernel/capability.c */
asmlinkage long sys_capget(cap_user_header_t header,
cap_user_data_t dataptr);
asmlinkage long sys_capset(cap_user_header_t header,
const cap_user_data_t data);
-
-/* kernel/exec_domain.c */
asmlinkage long sys_personality(unsigned int personality);
-
-/* kernel/exit.c */
asmlinkage long sys_exit(int error_code);
asmlinkage long sys_exit_group(int error_code);
asmlinkage long sys_waitid(int which, pid_t pid,
struct siginfo __user *infop,
int options, struct rusage __user *ru);
-
-/* kernel/fork.c */
asmlinkage long sys_set_tid_address(int __user *tidptr);
asmlinkage long sys_unshare(unsigned long unshare_flags);
-
-/* kernel/futex/syscalls.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
const struct __kernel_timespec __user *utime,
u32 __user *uaddr2, u32 val3);
@@ -627,31 +563,21 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
-
-/* kernel/hrtimer.c */
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
asmlinkage long sys_setitimer(int which,
struct __kernel_old_itimerval __user *value,
struct __kernel_old_itimerval __user *ovalue);
-
-/* kernel/kexec.c */
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags);
-
-/* kernel/module.c */
asmlinkage long sys_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
asmlinkage long sys_delete_module(const char __user *name_user,
unsigned int flags);
-
-/* kernel/posix-timers.c */
asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
@@ -685,15 +611,9 @@ asmlinkage long sys_clock_getres_time32(clockid_t which_clock,
asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags,
struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/printk.c */
asmlinkage long sys_syslog(int type, char __user *buf, int len);
-
-/* kernel/ptrace.c */
asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
unsigned long data);
-/* kernel/sched/core.c */
-
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
@@ -712,8 +632,6 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct __kernel_timespec __user *interval);
asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid,
struct old_timespec32 __user *interval);
-
-/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
asmlinkage long sys_kill(pid_t pid, int sig);
asmlinkage long sys_tkill(pid_t pid, int sig);
@@ -739,8 +657,6 @@ asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
const struct old_timespec32 __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
-
-/* kernel/sys.c */
asmlinkage long sys_setpriority(int which, int who, int niceval);
asmlinkage long sys_getpriority(int which, int who);
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
@@ -774,16 +690,12 @@ asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
-
-/* kernel/time.c */
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
-
-/* kernel/sys.c */
asmlinkage long sys_getpid(void);
asmlinkage long sys_getppid(void);
asmlinkage long sys_getuid(void);
@@ -792,8 +704,6 @@ asmlinkage long sys_getgid(void);
asmlinkage long sys_getegid(void);
asmlinkage long sys_gettid(void);
asmlinkage long sys_sysinfo(struct sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
asmlinkage long sys_mq_unlink(const char __user *name);
asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct __kernel_timespec __user *abs_timeout);
@@ -808,8 +718,6 @@ asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes,
const char __user *u_msg_ptr,
unsigned int msg_len, unsigned int msg_prio,
const struct old_timespec32 __user *u_abs_timeout);
-
-/* ipc/msg.c */
asmlinkage long sys_msgget(key_t key, int msgflg);
asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
@@ -817,8 +725,6 @@ asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
size_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long sys_semget(key_t key, int nsems, int semflg);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
@@ -830,15 +736,11 @@ asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops,
const struct old_timespec32 __user *timeout);
asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
unsigned nsops);
-
-/* ipc/shm.c */
asmlinkage long sys_shmget(key_t key, size_t size, int flag);
asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
asmlinkage long sys_shmdt(char __user *shmaddr);
-
-/* net/socket.c */
asmlinkage long sys_socket(int, int, int);
asmlinkage long sys_socketpair(int, int, int, int __user *);
asmlinkage long sys_bind(int, struct sockaddr __user *, int);
@@ -858,18 +760,12 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
asmlinkage long sys_shutdown(int, int);
asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-
-/* mm/filemap.c */
asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
-
-/* mm/nommu.c, also with MMU */
asmlinkage long sys_brk(unsigned long brk);
asmlinkage long sys_munmap(unsigned long addr, size_t len);
asmlinkage long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
-
-/* security/keys/keyctl.c */
asmlinkage long sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
@@ -881,8 +777,6 @@ asmlinkage long sys_request_key(const char __user *_type,
key_serial_t destringid);
asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-
-/* arch/example/kernel/sys_example.c */
#ifdef CONFIG_CLONE_BACKWARDS
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
@@ -901,11 +795,9 @@ asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size);
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp);
-
-/* mm/fadvise.c */
asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
asmlinkage long sys_swapoff(const char __user *specialfile);
asmlinkage long sys_mprotect(unsigned long start, size_t len,
@@ -943,7 +835,6 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
const int __user *nodes,
int __user *status,
int flags);
-
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t __user *uinfo);
asmlinkage long sys_perf_event_open(
@@ -956,7 +847,6 @@ asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct old_timespec32 __user *timeout);
-
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
@@ -1068,7 +958,7 @@ asmlinkage long sys_cachestat(unsigned int fd,
* Architecture-specific system calls
*/
-/* arch/x86/kernel/ioport.c */
+/* x86 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
/* pciconfig: alpha, arm, arm64, ia64, sparc */
@@ -1176,11 +1066,11 @@ asmlinkage long sys_sysfs(int option,
unsigned long arg1, unsigned long arg2);
asmlinkage long sys_fork(void);
-/* obsolete: kernel/time/time.c */
+/* obsolete */
asmlinkage long sys_stime(__kernel_old_time_t __user *tptr);
asmlinkage long sys_stime32(old_time32_t __user *tptr);
-/* obsolete: kernel/signal.c */
+/* obsolete */
asmlinkage long sys_sigpending(old_sigset_t __user *uset);
asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
old_sigset_t __user *oset);
@@ -1200,19 +1090,19 @@ asmlinkage long sys_sgetmask(void);
asmlinkage long sys_ssetmask(int newmask);
asmlinkage long sys_signal(int sig, __sighandler_t handler);
-/* obsolete: kernel/sched/core.c */
+/* obsolete */
asmlinkage long sys_nice(int increment);
-/* obsolete: kernel/kexec_file.c */
+/* obsolete */
asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
unsigned long cmdline_len,
const char __user *cmdline_ptr,
unsigned long flags);
-/* obsolete: kernel/exit.c */
+/* obsolete */
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
-/* obsolete: kernel/uid16.c */
+/* obsolete */
#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
@@ -1239,10 +1129,10 @@ asmlinkage long sys_getgid16(void);
asmlinkage long sys_getegid16(void);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long sys_socketcall(int call, unsigned long __user *args);
-/* obsolete: fs/stat.c */
+/* obsolete */
asmlinkage long sys_stat(const char __user *filename,
struct __old_kernel_stat __user *statbuf);
asmlinkage long sys_lstat(const char __user *filename,
@@ -1252,13 +1142,13 @@ asmlinkage long sys_fstat(unsigned int fd,
asmlinkage long sys_readlink(const char __user *path,
char __user *buf, int bufsiz);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
-/* obsolete: kernel/sys.c */
+/* obsolete */
asmlinkage long sys_gethostname(char __user *name, int len);
asmlinkage long sys_uname(struct old_utsname __user *);
asmlinkage long sys_olduname(struct oldold_utsname __user *);
@@ -1266,11 +1156,11 @@ asmlinkage long sys_olduname(struct oldold_utsname __user *);
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
#endif
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth);
-/* obsolete: mm/ */
+/* obsolete */
asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 90cd08ab2f5d..02333f47c994 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -172,6 +172,20 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
/**
+ * enum tb_link_width - Thunderbolt/USB4 link width
+ * @TB_LINK_WIDTH_SINGLE: Single lane link
+ * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
+ * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
+ * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
+ */
+enum tb_link_width {
+ TB_LINK_WIDTH_SINGLE = BIT(0),
+ TB_LINK_WIDTH_DUAL = BIT(1),
+ TB_LINK_WIDTH_ASYM_TX = BIT(2),
+ TB_LINK_WIDTH_ASYM_RX = BIT(3),
+};
+
+/**
* struct tb_xdomain - Cross-domain (XDomain) connection
* @dev: XDomain device
* @tb: Pointer to the domain
@@ -186,7 +200,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
- * @link_width: Width of the link (1 or 2)
+ * @link_width: Width of the downstream facing link
* @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
@@ -234,7 +248,7 @@ struct tb_xdomain {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
- unsigned int link_width;
+ enum tb_link_width link_width;
bool link_usb4;
bool is_unplugged;
bool needs_uuid;
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 0a81c3dfd26c..e290c0269944 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -86,6 +86,7 @@ enum uacce_q_state {
* @state: queue state machine
* @pasid: pasid associated to the mm
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
+ * @mapping: user space mapping of the queue
*/
struct uacce_queue {
struct uacce_device *uacce;
@@ -97,6 +98,7 @@ struct uacce_queue {
enum uacce_q_state state;
u32 pasid;
struct iommu_sva *handle;
+ struct address_space *mapping;
};
/**
@@ -114,7 +116,6 @@ struct uacce_queue {
* @mutex: protects uacce operation
* @priv: private pointer of the uacce
* @queues: list of queues
- * @inode: core vfs
*/
struct uacce_device {
const char *algs;
@@ -130,7 +131,6 @@ struct uacce_device {
struct mutex mutex;
void *priv;
struct list_head queues;
- struct inode *inode;
};
#if IS_ENABLED(CONFIG_UACCE)
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
index c7a1810373e3..a8cb617a3028 100644
--- a/include/linux/ulpi/driver.h
+++ b/include/linux/ulpi/driver.h
@@ -15,9 +15,9 @@ struct ulpi_ops;
* @dev: device interface
*/
struct ulpi {
+ struct device dev;
struct ulpi_device_id id;
const struct ulpi_ops *ops;
- struct device dev;
};
#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0c7eff91adf4..4e9623e8492b 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -267,7 +267,7 @@ struct hc_driver {
int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup);
/* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
+ int (*pci_resume)(struct usb_hcd *hcd, pm_message_t state);
/* called just before hibernate final D3 state, allows host to poweroff parts */
int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 7eeb5f9c4f0d..1a0a4dc87980 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -278,7 +278,7 @@ struct usb_serial_driver {
int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port,
const struct ktermios *old);
- void (*break_ctl)(struct tty_struct *tty, int break_state);
+ int (*break_ctl)(struct tty_struct *tty, int break_state);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
bool (*tx_empty)(struct usb_serial_port *port);
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 9292f0e07846..2489a7857d8e 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -60,8 +60,7 @@ struct typec_mux_desc {
#if IS_ENABLED(CONFIG_TYPEC)
-struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc);
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode);
void typec_mux_put(struct typec_mux *mux);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state);
@@ -74,8 +73,7 @@ void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
#else
-static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc)
+static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -102,10 +100,9 @@ static inline void *typec_mux_get_drvdata(struct typec_mux_dev *mux)
#endif /* CONFIG_TYPEC */
-static inline struct typec_mux *
-typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
+static inline struct typec_mux *typec_mux_get(struct device *dev)
{
- return fwnode_typec_mux_get(dev_fwnode(dev), desc);
+ return fwnode_typec_mux_get(dev_fwnode(dev));
}
#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b93238db94e3..de6041deee37 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -103,6 +103,7 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
* @config_enabled: configuration change reporting enabled
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
+ * @vqs_list_lock: protects @vqs.
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
@@ -117,7 +118,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
- spinlock_t vqs_list_lock; /* Protects VQs list access */
+ spinlock_t vqs_list_lock;
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
@@ -160,6 +161,8 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
+ * @validate: the function to call to validate features and config space.
+ * Returns 0 or -errno.
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index c4eeb79b0139..067ac1d789bc 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -38,6 +38,12 @@ struct virtio_pci_modern_device {
int modern_bars;
struct virtio_device_id id;
+
+ /* optional check for vendor virtio device, returns dev_id or -ERRNO */
+ int (*device_id_check)(struct pci_dev *pdev);
+
+ /* optional mask for devices with limited DMA space */
+ u64 dma_mask;
};
/*
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index 8958e5e2fc5b..e5a00d126612 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -130,7 +130,7 @@ struct dvb_adapter {
* struct dvb_device - represents a DVB device node
*
* @list_head: List head with all DVB devices
- * @ref: reference counter
+ * @ref: reference count for this device
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
* @type: type of the device, as defined by &enum dvb_device_type.
@@ -266,10 +266,10 @@ int dvb_register_device(struct dvb_adapter *adap,
/**
* dvb_remove_device - Remove a registered DVB device
*
+ * @dvbdev: pointer to struct dvb_device
+ *
* This does not free memory. dvb_free_device() will do that when
* reference counter is empty
- *
- * @dvbdev: pointer to struct dvb_device
*/
void dvb_remove_device(struct dvb_device *dvbdev);
diff --git a/include/media/jpeg.h b/include/media/jpeg.h
new file mode 100644
index 000000000000..a01e142e99a7
--- /dev/null
+++ b/include/media/jpeg.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _MEDIA_JPEG_H_
+#define _MEDIA_JPEG_H_
+
+/* JPEG markers */
+#define JPEG_MARKER_TEM 0x01
+#define JPEG_MARKER_SOF0 0xc0
+#define JPEG_MARKER_DHT 0xc4
+#define JPEG_MARKER_RST 0xd0
+#define JPEG_MARKER_SOI 0xd8
+#define JPEG_MARKER_EOI 0xd9
+#define JPEG_MARKER_SOS 0xda
+#define JPEG_MARKER_DQT 0xdb
+#define JPEG_MARKER_DRI 0xdd
+#define JPEG_MARKER_DHP 0xde
+#define JPEG_MARKER_APP0 0xe0
+#define JPEG_MARKER_COM 0xfe
+
+#endif /* _MEDIA_JPEG_H_ */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 741f9c629c6f..2b6cd343ee9e 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -741,7 +741,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* media_get_pad_index() - retrieves a pad index from an entity
*
* @entity: entity where the pads belong
- * @is_sink: true if the pad is a sink, false if it is a source
+ * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types
* @sig_type: type of signal of the pad to be search
*
* This helper function finds the first pad index inside an entity that
@@ -752,7 +752,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* On success, return the pad number. If the pad was not found or the media
* entity is a NULL pointer, return -EINVAL.
*/
-int media_get_pad_index(struct media_entity *entity, bool is_sink,
+int media_get_pad_index(struct media_entity *entity, u32 pad_type,
enum media_pad_signal_type sig_type);
/**
@@ -1079,7 +1079,7 @@ struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
* Return: returns the pad number on success or a negative error code.
*/
int media_entity_get_fwnode_pad(struct media_entity *entity,
- struct fwnode_handle *fwnode,
+ const struct fwnode_handle *fwnode,
unsigned long direction_flags);
/**
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1bdaea248089..d278836fd9cb 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -480,6 +480,7 @@ enum v4l2_pixel_encoding {
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
+ * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes.
* @hdiv: Horizontal chroma subsampling factor
* @vdiv: Vertical chroma subsampling factor
* @block_w: Per-plane macroblock pixel width (optional)
@@ -491,6 +492,7 @@ struct v4l2_format_info {
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
+ u8 bpp_div[4];
u8 hdiv;
u8 vdiv;
u8 block_w[4];
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7788eeb3e2bb..59679a42b3e7 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -52,6 +52,10 @@ struct video_device;
* @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure.
* @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure.
* @p_area: Pointer to an area.
+ * @p_av1_sequence: Pointer to an AV1 sequence structure.
+ * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure.
+ * @p_av1_frame: Pointer to an AV1 frame structure.
+ * @p_av1_film_grain: Pointer to an AV1 film grain structure.
* @p: Pointer to a compound value.
* @p_const: Pointer to a constant compound value.
*/
@@ -81,6 +85,10 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll;
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_area *p_area;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
void *p;
const void *p_const;
};
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index bb9de6a899e0..d6c8eb2b5201 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -593,7 +593,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->out_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -605,7 +612,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->cap_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 1b4230cd42a3..af729859385e 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -185,7 +185,7 @@ struct bt_iso_ucast_qos {
struct bt_iso_bcast_qos {
__u8 big;
__u8 bis;
- __u8 sync_interval;
+ __u8 sync_factor;
__u8 packing;
__u8 framing;
struct bt_iso_io_qos in;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index a5801649f619..5e68b3dd4422 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -979,6 +979,7 @@ struct mgmt_ev_auth_failed {
#define MGMT_DEV_FOUND_NOT_CONNECTABLE BIT(2)
#define MGMT_DEV_FOUND_INITIATED_CONN BIT(3)
#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED BIT(4)
+#define MGMT_DEV_FOUND_SCAN_RSP BIT(5)
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9d45a5b20316..06287de69cd2 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -436,8 +436,10 @@ struct uapi_definition {
}, \
##__VA_ARGS__
#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \
- UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, &UVERBS_OBJECT(_object_enum), \
- ##__VA_ARGS__)
+ UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, \
+ PTR_IF(IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS), \
+ &UVERBS_OBJECT(_object_enum)), \
+ ##__VA_ARGS__)
/*
* =======================================
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index cb8fbb241879..22aae505c813 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -730,6 +730,11 @@ enum macaccess_entry_type {
ENTRYTYPE_MACv6,
};
+enum ocelot_proto {
+ OCELOT_PROTO_PTP_L2 = BIT(0),
+ OCELOT_PROTO_PTP_L4 = BIT(1),
+};
+
#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
@@ -775,6 +780,8 @@ struct ocelot_port {
unsigned int ptp_skbs_in_flight;
struct sk_buff_head tx_skbs;
+ unsigned int trap_proto;
+
u16 mrp_ring_id;
u8 ptp_cmd;
@@ -868,12 +875,9 @@ struct ocelot {
u8 mm_supported:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
unsigned int ptp_skbs_in_flight;
/* Protects the 2-step TX timestamp ID logic */
spinlock_t ts_id_lock;
- /* Protects the PTP interface state */
- struct mutex ptp_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 99cbc5949e3c..793f82cc1515 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1512,7 +1512,7 @@ DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
TP_ARGS(dev, blkstart, blklen)
);
-TRACE_EVENT(f2fs_issue_reset_zone,
+DECLARE_EVENT_CLASS(f2fs_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1528,11 +1528,25 @@ TRACE_EVENT(f2fs_issue_reset_zone,
__entry->blkstart = blkstart;
),
- TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ TP_printk("dev = (%d,%d), zone at block = 0x%llx",
show_dev(__entry->dev),
(unsigned long long)__entry->blkstart)
);
+DEFINE_EVENT(f2fs_reset_zone, f2fs_queue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
+DEFINE_EVENT(f2fs_reset_zone, f2fs_issue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
@@ -1979,6 +1993,7 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, fs_nrio)
__field(unsigned long long, fs_mrio)
__field(unsigned long long, fs_discard)
+ __field(unsigned long long, fs_reset_zone)
),
TP_fast_assign(
@@ -2010,12 +2025,14 @@ TRACE_EVENT(f2fs_iostat,
__entry->fs_nrio = iostat[FS_NODE_READ_IO];
__entry->fs_mrio = iostat[FS_META_READ_IO];
__entry->fs_discard = iostat[FS_DISCARD_IO];
+ __entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO];
),
TP_printk("dev = (%d,%d), "
"app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
"compr(buffered=%llu, mapped=%llu)], "
- "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu, "
+ "reset_zone=%llu], "
"gc [data=%llu, node=%llu], "
"cp [data=%llu, node=%llu, meta=%llu], "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
@@ -2026,6 +2043,7 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_bio, __entry->app_mio, __entry->app_bcdio,
__entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_reset_zone,
__entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
__entry->fs_cp_nio, __entry->fs_cp_mio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 76297ecd4935..20b914250ce9 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -65,7 +65,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
+ strscpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 4d3e607b3cde..5d7ee2610728 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -63,7 +63,7 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
+ strscpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
strcpy(__entry->name, "-");
}
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index da611a7aaf97..f667c76a3b02 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -51,7 +51,8 @@ TRACE_EVENT(net_dev_start_xmit,
__entry->network_offset = skb_network_offset(skb);
__entry->transport_offset_valid =
skb_transport_header_was_set(skb);
- __entry->transport_offset = skb_transport_offset(skb);
+ __entry->transport_offset = skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) : 0;
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 69e42ef30979..43711753616a 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -139,36 +139,68 @@ DEFINE_RPC_CLNT_EVENT(release);
DEFINE_RPC_CLNT_EVENT(replace_xprt);
DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_NONE);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_TLS_X509);
+
+#define rpc_show_xprtsec_policy(policy) \
+ __print_symbolic(policy, \
+ { RPC_XPRTSEC_NONE, "none" }, \
+ { RPC_XPRTSEC_TLS_ANON, "tls-anon" }, \
+ { RPC_XPRTSEC_TLS_X509, "tls-x509" })
+
+#define rpc_show_create_flags(flags) \
+ __print_flags(flags, "|", \
+ { RPC_CLNT_CREATE_HARDRTRY, "HARDRTRY" }, \
+ { RPC_CLNT_CREATE_AUTOBIND, "AUTOBIND" }, \
+ { RPC_CLNT_CREATE_NONPRIVPORT, "NONPRIVPORT" }, \
+ { RPC_CLNT_CREATE_NOPING, "NOPING" }, \
+ { RPC_CLNT_CREATE_DISCRTRY, "DISCRTRY" }, \
+ { RPC_CLNT_CREATE_QUIET, "QUIET" }, \
+ { RPC_CLNT_CREATE_INFINITE_SLOTS, \
+ "INFINITE_SLOTS" }, \
+ { RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, \
+ "NO_IDLE_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT, \
+ "NO_RETRANS_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_SOFTERR, "SOFTERR" }, \
+ { RPC_CLNT_CREATE_REUSEPORT, "REUSEPORT" })
+
TRACE_EVENT(rpc_clnt_new,
TP_PROTO(
const struct rpc_clnt *clnt,
const struct rpc_xprt *xprt,
- const char *program,
- const char *server
+ const struct rpc_create_args *args
),
- TP_ARGS(clnt, xprt, program, server),
+ TP_ARGS(clnt, xprt, args),
TP_STRUCT__entry(
__field(unsigned int, client_id)
+ __field(unsigned long, xprtsec)
+ __field(unsigned long, flags)
+ __string(program, clnt->cl_program->name)
+ __string(server, xprt->servername)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
- __string(program, program)
- __string(server, server)
),
TP_fast_assign(
__entry->client_id = clnt->cl_clid;
+ __entry->xprtsec = args->xprtsec.policy;
+ __entry->flags = args->flags;
+ __assign_str(program, clnt->cl_program->name);
+ __assign_str(server, xprt->servername);
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
- __assign_str(program, program);
- __assign_str(server, server);
),
- TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER
- " peer=[%s]:%s program=%s server=%s",
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s"
+ " program=%s server=%s xprtsec=%s flags=%s",
__entry->client_id, __get_str(addr), __get_str(port),
- __get_str(program), __get_str(server))
+ __get_str(program), __get_str(server),
+ rpc_show_xprtsec_policy(__entry->xprtsec),
+ rpc_show_create_flags(__entry->flags)
+ )
);
TRACE_EVENT(rpc_clnt_new_err,
@@ -1493,6 +1525,50 @@ TRACE_EVENT(rpcb_unregister,
)
);
+/**
+ ** RPC-over-TLS tracepoints
+ **/
+
+DECLARE_EVENT_CLASS(rpc_tls_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(clnt, xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, requested_policy)
+ __field(u32, version)
+ __string(servername, xprt->servername)
+ __string(progname, clnt->cl_program->name)
+ ),
+
+ TP_fast_assign(
+ __entry->requested_policy = clnt->cl_xprtsec.policy;
+ __entry->version = clnt->cl_vers;
+ __assign_str(servername, xprt->servername);
+ __assign_str(progname, clnt->cl_program->name)
+ ),
+
+ TP_printk("server=%s %sv%u requested_policy=%s",
+ __get_str(servername), __get_str(progname), __entry->version,
+ rpc_show_xprtsec_policy(__entry->requested_policy)
+ )
+);
+
+#define DEFINE_RPC_TLS_EVENT(name) \
+ DEFINE_EVENT(rpc_tls_class, rpc_tls_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt, \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(clnt, xprt))
+
+DEFINE_RPC_TLS_EVENT(unavailable);
+DEFINE_RPC_TLS_EVENT(not_started);
+
+
/* Record an xdr_buf containing a fully-formed RPC message */
DECLARE_EVENT_CLASS(svc_xdr_msg_class,
TP_PROTO(
diff --git a/include/uapi/asm-generic/bitsperlong.h b/include/uapi/asm-generic/bitsperlong.h
index 693d9a40eb7b..352cb81947b8 100644
--- a/include/uapi/asm-generic/bitsperlong.h
+++ b/include/uapi/asm-generic/bitsperlong.h
@@ -2,6 +2,17 @@
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
#define _UAPI__ASM_GENERIC_BITS_PER_LONG
+#ifndef __BITS_PER_LONG
+/*
+ * In order to keep safe and avoid regression, only unify uapi
+ * bitsperlong.h for some archs which are using newer toolchains
+ * that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
+ * See the following link for more info:
+ * https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
+ */
+#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
+#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@@ -9,8 +20,8 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
-#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
+#endif
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index cd639fae9086..fd6c1cb585db 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
-/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
-
-/* fs/eventfd.c */
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
-
-/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
-
-/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
-
-/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
-/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
-
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
-
-/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
-/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
-/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
-
-/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
-/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
+
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
-/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
-/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
-/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
-
-/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
-/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
+
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
-/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
-/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
-
-/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
-
-/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
+
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
+
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
-/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
-
-/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
-/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
+
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
-
-/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
-/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
-
-/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
+
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
-
-/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
-
-/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
+
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
-
-/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
-
-/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
-
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
+
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
+
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -770,15 +698,19 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
+
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
@@ -844,13 +776,14 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
-
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
@@ -865,7 +798,6 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
@@ -877,12 +809,11 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
+
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
index 8ab12d731e3b..fc248ef00e86 100644
--- a/include/uapi/linux/counter.h
+++ b/include/uapi/linux/counter.h
@@ -127,6 +127,12 @@ enum counter_count_mode {
COUNTER_COUNT_MODE_RANGE_LIMIT,
COUNTER_COUNT_MODE_NON_RECYCLE,
COUNTER_COUNT_MODE_MODULO_N,
+ COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
+ COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
+ COUNTER_COUNT_MODE_RATE_GENERATOR,
+ COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
+ COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
+ COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
};
/* Count function values */
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index 326f6a53f1f2..7e0983b987c2 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -296,6 +296,10 @@ enum fe_spectral_inversion {
* @FEC_28_45: Forward Error Correction Code 28/45
* @FEC_32_45: Forward Error Correction Code 32/45
* @FEC_77_90: Forward Error Correction Code 77/90
+ * @FEC_11_45: Forward Error Correction Code 11/45
+ * @FEC_4_15: Forward Error Correction Code 4/15
+ * @FEC_14_45: Forward Error Correction Code 14/45
+ * @FEC_7_15: Forward Error Correction Code 7/15
*
* Please note that not all FEC types are supported by a given standard.
*/
@@ -329,6 +333,10 @@ enum fe_code_rate {
FEC_28_45,
FEC_32_45,
FEC_77_90,
+ FEC_11_45,
+ FEC_4_15,
+ FEC_14_45,
+ FEC_7_15,
};
/**
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 1a8cd038aa0b..20bc874de321 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -10,6 +10,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 11
+#define DVB_API_VERSION_MINOR 12
#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 92be3ea3c6e0..1f2c9469f921 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -46,6 +46,12 @@
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
+/* available since kernel version 6.5 */
+#define FW_CDEV_EVENT_REQUEST3 0x0a
+#define FW_CDEV_EVENT_RESPONSE2 0x0b
+#define FW_CDEV_EVENT_PHY_PACKET_SENT2 0x0c
+#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 0x0d
+
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
@@ -103,6 +109,32 @@ struct fw_cdev_event_bus_reset {
* @length: Data length, i.e. the response's payload size in bytes
* @data: Payload data, if any
*
+ * This event is sent instead of &fw_cdev_event_response if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_response2.
+ */
+struct fw_cdev_event_response {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_response2 - Sent when a response packet was received
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
+ * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
+ * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
+ * @rcode: Response code returned by the remote node
+ * @length: Data length, i.e. the response's payload size in bytes
+ * @request_tstamp: The time stamp of isochronous cycle at which the request was sent.
+ * @response_tstamp: The time stamp of isochronous cycle at which the response was sent.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Payload data, if any
+ *
* This event is sent when the stack receives a response to an outgoing request
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
@@ -112,12 +144,21 @@ struct fw_cdev_event_bus_reset {
* involve response packets. This includes unified write transactions,
* broadcast write transactions, and transmission of asynchronous stream
* packets. @rcode indicates success or failure of such transmissions.
+ *
+ * The value of @request_tstamp expresses the isochronous cycle at which the request was sent to
+ * initiate the transaction. The value of @response_tstamp expresses the isochronous cycle at which
+ * the response arrived to complete the transaction. Each value is unsigned 16 bit integer
+ * containing three low order bits of second field and all 13 bits of cycle field in format of
+ * CYCLE_TIMER register.
*/
-struct fw_cdev_event_response {
+struct fw_cdev_event_response2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
+ __u32 request_tstamp;
+ __u32 response_tstamp;
+ __u32 padding;
__u32 data[];
};
@@ -159,6 +200,41 @@ struct fw_cdev_event_request {
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
*
+ * This event is sent instead of &fw_cdev_event_request3 if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_request3.
+ */
+struct fw_cdev_event_request2 {
+ __u64 closure;
+ __u32 type;
+ __u32 tcode;
+ __u64 offset;
+ __u32 source_node_id;
+ __u32 destination_node_id;
+ __u32 card;
+ __u32 generation;
+ __u32 handle;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_request3 - Sent on incoming request to an address region
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @source_node_id: Sender node ID
+ * @destination_node_id: Destination node ID
+ * @card: The index of the card from which the request came
+ * @generation: Bus generation in which the request is valid
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @tstamp: The time stamp of isochronous cycle at which the request arrived.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Incoming data, if any
+ *
* This event is sent when the stack receives an incoming request to an address
* region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
* guaranteed to be completely contained in the specified region. Userspace is
@@ -191,10 +267,14 @@ struct fw_cdev_event_request {
* sent.
*
* If the client subsequently needs to initiate requests to the sender node of
- * an &fw_cdev_event_request2, it needs to use a device file with matching
+ * an &fw_cdev_event_request3, it needs to use a device file with matching
* card index, node ID, and generation for outbound requests.
+ *
+ * @tstamp is isochronous cycle at which the request arrived. It is 16 bit integer value and the
+ * higher 3 bits expresses three low order bits of second field in the format of CYCLE_TIME
+ * register and the rest 13 bits expresses cycle field.
*/
-struct fw_cdev_event_request2 {
+struct fw_cdev_event_request3 {
__u64 closure;
__u32 type;
__u32 tcode;
@@ -205,6 +285,8 @@ struct fw_cdev_event_request2 {
__u32 generation;
__u32 handle;
__u32 length;
+ __u32 tstamp;
+ __u32 padding;
__u32 data[];
};
@@ -341,20 +423,59 @@ struct fw_cdev_event_iso_resource {
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
+ * @data: Incoming data for %FW_CDEV_IOC_RECEIVE_PHY_PACKETS. For %FW_CDEV_IOC_SEND_PHY_PACKET
+ * the field has the same data in the request, thus the length of 8 bytes.
+ *
+ * This event is sent instead of &fw_cdev_event_phy_packet2 if the kernel or
+ * the client implements ABI version <= 5. It has the lack of time stamp field comparing to
+ * &fw_cdev_event_phy_packet2.
+ */
+struct fw_cdev_event_phy_packet {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_phy_packet2 - A PHY packet was transmitted or received with time stamp.
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
+ * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
+ * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT2 or %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ * @rcode: %RCODE_..., indicates success or failure of transmission
+ * @length: Data length in bytes
+ * @tstamp: For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the time stamp of isochronous cycle at
+ * which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet,
+ * the time stamp of isochronous cycle at which the packet was sent. For ping packet,
+ * the tick count for round-trip time measured by 1394 OHCI controller.
+ * The time stamp of isochronous cycle at which either the response was sent for
+ * %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2.
* @data: Incoming data
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
- * except in case of a ping packet: Then, @length is 4, and @data[0] is the
- * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT2, @length is 8 and @data consists of the two PHY
+ * packet quadlets to be sent, in host byte order,
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
- * consists of the two PHY packet quadlets, in host byte order.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, @length is 8 and @data consists of the two PHY
+ * packet quadlets, in host byte order.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the @tstamp is the isochronous cycle at which the
+ * packet arrived. It is 16 bit integer value and the higher 3 bits expresses three low order bits
+ * of second field and the rest 13 bits expresses cycle field in the format of CYCLE_TIME register.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_SENT2, the @tstamp has different meanings whether to sent the
+ * packet for ping or not. If it's not for ping, the @tstamp is the isochronous cycle at which the
+ * packet was sent, and use the same format as the case of %FW_CDEV_EVENT_PHY_PACKET_SENT2. If it's
+ * for ping, the @tstamp is for round-trip time measured by 1394 OHCI controller with 42.195 MHz
+ * resolution.
*/
-struct fw_cdev_event_phy_packet {
+struct fw_cdev_event_phy_packet2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
+ __u32 tstamp;
__u32 data[];
};
@@ -375,6 +496,11 @@ struct fw_cdev_event_phy_packet {
* %FW_CDEV_EVENT_PHY_PACKET_SENT or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
+ * @request3: Valid if @common.type == %FW_CDEV_EVENT_REQUEST3
+ * @response2: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE2
+ * @phy_packet2: Valid if @common.type == %FW_CDEV_EVENT_PHY_PACKET_SENT2 or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ *
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
* processing. Note that for a request, response or iso_interrupt event,
@@ -393,6 +519,9 @@ union fw_cdev_event {
struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
+ struct fw_cdev_event_request3 request3; /* added in 6.5 */
+ struct fw_cdev_event_response2 response2; /* added in 6.5 */
+ struct fw_cdev_event_phy_packet2 phy_packet2; /* added in 6.5 */
};
/* available since kernel version 2.6.22 */
@@ -457,6 +586,11 @@ union fw_cdev_event {
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
+ * 6 (6.5) - added some event for subactions of asynchronous transaction with time stamp
+ * - %FW_CDEV_EVENT_REQUEST3
+ * - %FW_CDEV_EVENT_RESPONSE2
+ * - %FW_CDEV_EVENT_PHY_PACKET_SENT2
+ * - %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
*/
/**
@@ -502,11 +636,11 @@ struct fw_cdev_get_info {
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
*
- * Send a request to the device. This ioctl implements all outgoing requests.
- * Both quadlet and block request specify the payload as a pointer to the data
- * in the @data field. Once the transaction completes, the kernel writes an
- * &fw_cdev_event_response event back. The @closure field is passed back to
- * user space in the response event.
+ * Send a request to the device. This ioctl implements all outgoing requests. Both quadlet and
+ * block request specify the payload as a pointer to the data in the @data field. Once the
+ * transaction completes, the kernel writes either &fw_cdev_event_response event or
+ * &fw_cdev_event_response event back. The @closure field is passed back to user space in the
+ * response event.
*/
struct fw_cdev_send_request {
__u32 tcode;
@@ -989,10 +1123,9 @@ struct fw_cdev_allocate_iso_resource {
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
- * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
- * to every device which is listening to the specified channel. The kernel
- * writes an &fw_cdev_event_response event which indicates success or failure of
- * the transmission.
+ * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet to every device
+ * which is listening to the specified channel. The kernel writes either &fw_cdev_event_response
+ * event or &fw_cdev_event_response2 event which indicates success or failure of the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
@@ -1011,8 +1144,8 @@ struct fw_cdev_send_stream_packet {
* @data: First and second quadlet of the PHY packet
* @generation: The bus generation where packet is valid
*
- * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
- * on the same card as this device. After transmission, an
+ * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes on the same card as this
+ * device. After transmission, either %FW_CDEV_EVENT_PHY_PACKET_SENT event or
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
* The payload @data\[\] shall be specified in host byte order. Usually,
@@ -1031,8 +1164,9 @@ struct fw_cdev_send_phy_packet {
* struct fw_cdev_receive_phy_packets - start reception of PHY packets
* @closure: Passed back to userspace in phy packet events
*
- * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
- * incoming PHY packets from any node on the same bus as the device.
+ * This ioctl activates issuing of either %FW_CDEV_EVENT_PHY_PACKET_RECEIVED or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 due to incoming PHY packets from any node on the same bus
+ * as the device.
*
* The ioctl is only permitted on device files which represent a local node.
*/
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index f222d263bc55..08720c7bd92f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -244,8 +244,10 @@ enum io_uring_op {
* sqe->uring_cmd_flags
* IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
+ * IORING_URING_CMD_POLLED driver use only
*/
#define IORING_URING_CMD_FIXED (1U << 0)
+#define IORING_URING_CMD_POLLED (1U << 31)
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 737318b1c1d9..f089ab290978 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1190,6 +1190,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
#define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1442,6 +1444,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
+ KVM_DEV_TYPE_RISCV_AIA,
+#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_MAX,
};
@@ -1613,7 +1617,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
/*
- * vcpu version available with KVM_ENABLE_CAP
+ * vcpu version available with KVM_CAP_ENABLE_CAP
* vm version available with KVM_CAP_ENABLE_CAP_VM
*/
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 3ddadaea849f..1c80b1d6bbaf 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -140,8 +140,8 @@ struct media_device_info {
#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
/* Entity flags */
-#define MEDIA_ENT_FL_DEFAULT (1 << 0)
-#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+#define MEDIA_ENT_FL_DEFAULT (1U << 0)
+#define MEDIA_ENT_FL_CONNECTOR (1U << 1)
/* OR with the entity id value to find the next entity */
#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31)
@@ -205,9 +205,9 @@ struct media_entity_desc {
};
};
-#define MEDIA_PAD_FL_SINK (1 << 0)
-#define MEDIA_PAD_FL_SOURCE (1 << 1)
-#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
+#define MEDIA_PAD_FL_SINK (1U << 0)
+#define MEDIA_PAD_FL_SOURCE (1U << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
@@ -216,14 +216,14 @@ struct media_pad_desc {
__u32 reserved[2];
};
-#define MEDIA_LNK_FL_ENABLED (1 << 0)
-#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
-#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
+#define MEDIA_LNK_FL_ENABLED (1U << 0)
+#define MEDIA_LNK_FL_IMMUTABLE (1U << 1)
+#define MEDIA_LNK_FL_DYNAMIC (1U << 2)
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
-# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
-# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
-# define MEDIA_LNK_FL_ANCILLARY_LINK (2 << 28)
+# define MEDIA_LNK_FL_DATA_LINK (0U << 28)
+# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28)
+# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28)
struct media_link_desc {
struct media_pad_desc source;
@@ -293,7 +293,7 @@ struct media_links_enum {
* struct media_device_info.
*/
#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_entity {
__u32 id;
@@ -328,7 +328,7 @@ struct media_v2_interface {
* struct media_device_info.
*/
#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_pad {
__u32 id;
@@ -432,7 +432,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
+#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U)
#endif
diff --git a/include/uapi/linux/tps6594_pfsm.h b/include/uapi/linux/tps6594_pfsm.h
new file mode 100644
index 000000000000..c69569e0a7a2
--- /dev/null
+++ b/include/uapi/linux/tps6594_pfsm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for TPS6594 PMIC Pre-configurable Finite State Machine
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __TPS6594_PFSM_H
+#define __TPS6594_PFSM_H
+
+#include <linux/const.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct pmic_state_opt - PMIC state options
+ * @gpio_retention: if enabled, power rails associated with GPIO retention remain active
+ * @ddr_retention: if enabled, power rails associated with DDR retention remain active
+ * @mcu_only_startup_dest: if enabled, startup destination state is MCU_ONLY
+ */
+struct pmic_state_opt {
+ __u8 gpio_retention;
+ __u8 ddr_retention;
+ __u8 mcu_only_startup_dest;
+};
+
+/* Commands */
+#define PMIC_BASE 'P'
+
+#define PMIC_GOTO_STANDBY _IO(PMIC_BASE, 0)
+#define PMIC_GOTO_LP_STANDBY _IO(PMIC_BASE, 1)
+#define PMIC_UPDATE_PGM _IO(PMIC_BASE, 2)
+#define PMIC_SET_ACTIVE_STATE _IO(PMIC_BASE, 3)
+#define PMIC_SET_MCU_ONLY_STATE _IOW(PMIC_BASE, 4, struct pmic_state_opt)
+#define PMIC_SET_RETENTION_STATE _IOW(PMIC_BASE, 5, struct pmic_state_opt)
+
+#endif /* __TPS6594_PFSM_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index b17e3a21b15f..62d318377379 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -376,7 +376,10 @@ struct usb_string_descriptor {
__u8 bLength;
__u8 bDescriptorType;
- __le16 wData[1]; /* UTF-16LE encoded */
+ union {
+ __le16 legacy_padding;
+ __DECLARE_FLEX_ARRAY(__le16, wData); /* UTF-16LE encoded */
+ };
} __attribute__ ((packed));
/* note that "string" zero is special, it holds language codes that
@@ -981,7 +984,11 @@ struct usb_ssp_cap_descriptor {
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
__le16 wReserved;
- __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
+ union {
+ __le32 legacy_padding;
+ /* list of sublink speed attrib entries */
+ __DECLARE_FLEX_ARRAY(__le32, bmSublinkSpeedAttr);
+ };
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 5e80daa4ffe0..c3604a0a3e30 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -804,6 +804,88 @@ enum v4l2_mpeg_video_frame_skip_mode {
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
+#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655)
+/**
+ * enum v4l2_mpeg_video_av1_profile - AV1 profiles
+ *
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode
+ * streams with seq_profile equal to 0.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode
+ * streams with seq_profile equal less than or equal to 1.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to
+ * decode streams with seq_profile less than or equal to 2.
+ *
+ * Conveys the highest profile a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_profile {
+ V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656)
+/**
+ * enum v4l2_mpeg_video_av1_level - AV1 levels
+ *
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3.
+ *
+ * Conveys the highest level a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_level {
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
+};
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
@@ -2385,6 +2467,9 @@ struct v4l2_ctrl_hevc_slice_params {
* @poc_st_curr_after: provides the index of the short term after references
* in DPB array
* @poc_lt_curr: provides the index of the long term references in DPB array
+ * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
+ * can be used to parse the RPS data in slice headers
+ * instead of skipping it with @short_term_ref_pic_set_size.
* @reserved: padding field. Should be zeroed by applications.
* @dpb: the decoded picture buffer, for meta-data about reference frames
* @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
@@ -2400,7 +2485,8 @@ struct v4l2_ctrl_hevc_decode_params {
__u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __u8 reserved[4];
+ __u8 num_delta_pocs_of_ref_rps_idx;
+ __u8 reserved[3];
struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u64 flags;
};
@@ -2754,6 +2840,645 @@ struct v4l2_ctrl_vp9_compressed_hdr {
struct v4l2_vp9_mv_probs mv;
};
+/* Stateless AV1 controls */
+
+#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8
+#define V4L2_AV1_CDEF_MAX 8
+#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */
+#define V4L2_AV1_MAX_SEGMENTS 8
+#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */
+#define V4L2_AV1_REFS_PER_FRAME 7
+#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */
+#define V4L2_AV1_MAX_NUM_PLANES 3
+#define V4L2_AV1_MAX_TILE_COLS 64
+#define V4L2_AV1_MAX_TILE_ROWS 64
+#define V4L2_AV1_MAX_TILE_COUNT 512
+
+#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001
+#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000
+#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000
+#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000
+#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000
+#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500)
+/**
+ * struct v4l2_ctrl_av1_sequence - AV1 Sequence
+ *
+ * Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax"
+ * for more details.
+ *
+ * @flags: See V4L2_AV1_SEQUENCE_FLAG_{}.
+ * @seq_profile: specifies the features that can be used in the coded video
+ * sequence.
+ * @order_hint_bits: specifies the number of bits used for the order_hint field
+ * at each frame.
+ * @bit_depth: the bitdepth to use for the sequence as described in section
+ * 5.5.2 "Color config syntax".
+ * @reserved: padding field. Should be zeroed by applications.
+ * @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the
+ * frames represented by this sequence header.
+ * @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the
+ * frames represented by this sequence header.
+ */
+struct v4l2_ctrl_av1_sequence {
+ __u32 flags;
+ __u8 seq_profile;
+ __u8 order_hint_bits;
+ __u8 bit_depth;
+ __u8 reserved;
+ __u16 max_frame_width_minus_1;
+ __u16 max_frame_height_minus_1;
+};
+
+#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501)
+/**
+ * struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry
+ *
+ * Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
+ * MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
+ * v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
+ * tile_col. See section 6.10.1 "General tile group OBU semantics" for more
+ * details.
+ *
+ * @tile_offset: offset from the OBU data, i.e. where the coded tile data
+ * actually starts.
+ * @tile_size: specifies the size in bytes of the coded tile. Equivalent to
+ * "TileSize" in the AV1 Specification.
+ * @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in
+ * the AV1 Specification.
+ * @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in
+ * the AV1 Specification.
+ */
+struct v4l2_ctrl_av1_tile_group_entry {
+ __u32 tile_offset;
+ __u32 tile_size;
+ __u32 tile_row;
+ __u32 tile_col;
+};
+
+/**
+ * enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3
+ * "Symbols and abbreviated terms" of the AV1 Specification.
+ *
+ * @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform.
+ * @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation.
+ * @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom +
+ * translation.
+ * @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform.
+ */
+enum v4l2_av1_warp_model {
+ V4L2_AV1_WARP_MODEL_IDENTITY = 0,
+ V4L2_AV1_WARP_MODEL_TRANSLATION = 1,
+ V4L2_AV1_WARP_MODEL_ROTZOOM = 2,
+ V4L2_AV1_WARP_MODEL_AFFINE = 3,
+};
+
+/**
+ * enum v4l2_av1_reference_frame - AV1 reference frames
+ *
+ * @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference
+ * @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame
+ * @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame
+ * @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame
+ * @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame
+ * @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame
+ * @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame
+ * @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame
+ */
+enum v4l2_av1_reference_frame {
+ V4L2_AV1_REF_INTRA_FRAME = 0,
+ V4L2_AV1_REF_LAST_FRAME = 1,
+ V4L2_AV1_REF_LAST2_FRAME = 2,
+ V4L2_AV1_REF_LAST3_FRAME = 3,
+ V4L2_AV1_REF_GOLDEN_FRAME = 4,
+ V4L2_AV1_REF_BWDREF_FRAME = 5,
+ V4L2_AV1_REF_ALTREF2_FRAME = 6,
+ V4L2_AV1_REF_ALTREF_FRAME = 7,
+};
+
+#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref))
+
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4
+/**
+ * struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in
+ * section 6.8.17 "Global motion params semantics" of the AV1 specification.
+ *
+ * @flags: A bitfield containing the flags per reference frame. See
+ * V4L2_AV1_GLOBAL_MOTION_FLAG_{}
+ * @type: The type of global motion transform used.
+ * @params: this field has the same meaning as "gm_params" in the AV1
+ * specification.
+ * @invalid: bitfield indicating whether the global motion params are invalid
+ * for a given reference frame. See section 7.11.3.6 Setup shear process and
+ * the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
+ * create a suitable mask.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+
+struct v4l2_av1_global_motion {
+ __u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6];
+ __u8 invalid;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type
+ * @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied.
+ * @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable.
+ */
+enum v4l2_av1_frame_restoration_type {
+ V4L2_AV1_FRAME_RESTORE_NONE = 0,
+ V4L2_AV1_FRAME_RESTORE_WIENER = 1,
+ V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2,
+ V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3,
+};
+
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2
+
+/**
+ * struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in
+ * section 6.10.15 "Loop restoration params semantics" of the AV1 specification.
+ *
+ * @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}.
+ * @lr_unit_shift: specifies if the luma restoration size should be halved.
+ * @lr_uv_shift: specifies if the chroma size should be half the luma size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @frame_restoration_type: specifies the type of restoration used for each
+ * plane. See enum v4l2_av1_frame_restoration_type.
+ * @loop_restoration_size: specifies the size of loop restoration units in units
+ * of samples in the current plane.
+ */
+struct v4l2_av1_loop_restoration {
+ __u8 flags;
+ __u8 lr_unit_shift;
+ __u8 lr_uv_shift;
+ __u8 reserved;
+ enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
+ __u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES];
+};
+
+/**
+ * struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section
+ * 6.10.14 "CDEF params semantics" of the AV1 specification
+ *
+ * @damping_minus_3: controls the amount of damping in the deringing filter.
+ * @bits: specifies the number of bits needed to specify which CDEF filter to
+ * apply.
+ * @y_pri_strength: specifies the strength of the primary filter.
+ * @y_sec_strength: specifies the strength of the secondary filter.
+ * @uv_pri_strength: specifies the strength of the primary filter.
+ * @uv_sec_strength: specifies the strength of the secondary filter.
+ */
+struct v4l2_av1_cdef {
+ __u8 damping_minus_3;
+ __u8 bits;
+ __u8 y_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 y_sec_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_sec_strength[V4L2_AV1_CDEF_MAX];
+};
+
+#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2
+#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8
+#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10
+
+/**
+ * enum v4l2_av1_segment_feature - AV1 segment features as described in section
+ * 3 "Symbols and abbreviated terms" of the AV1 specification.
+ *
+ * @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature.
+ * @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment
+ * feature.
+ * @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature.
+ * @V4L2_AV1_SEG_LVL_MAX: Number of segment features.
+ */
+enum v4l2_av1_segment_feature {
+ V4L2_AV1_SEG_LVL_ALT_Q = 0,
+ V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1,
+ V4L2_AV1_SEG_LVL_REF_FRAME = 5,
+ V4L2_AV1_SEG_LVL_REF_SKIP = 6,
+ V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7,
+ V4L2_AV1_SEG_LVL_MAX = 8
+};
+
+#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+
+/**
+ * struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section
+ * 6.8.13 "Segmentation params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}.
+ * @last_active_seg_id: indicates the highest numbered segment id that has some
+ * enabled feature. This is used when decoding the segment id to only decode
+ * choices corresponding to used segments.
+ * @feature_enabled: bitmask defining which features are enabled in each
+ * segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
+ * @feature_data: data attached to each feature. Data entry is only valid if the
+ * feature is enabled
+ */
+struct v4l2_av1_segmentation {
+ __u8 flags;
+ __u8 last_active_seg_id;
+ __u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS];
+ __s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
+};
+
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8
+
+/**
+ * struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section
+ * 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters
+ * semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{}
+ * @level: an array containing loop filter strength values. Different loop
+ * filter strength values from the array are used depending on the image plane
+ * being filtered, and the edge direction (vertical or horizontal) being
+ * filtered.
+ * @sharpness: indicates the sharpness level. The loop_filter_level and
+ * loop_filter_sharpness together determine when a block edge is filtered, and
+ * by how much the filtering can change the sample values. The loop filter
+ * process is described in section 7.14 of the AV1 specification.
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present, it maintains
+ * its previous value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on
+ * the chosen mode. If this syntax element is not present, it maintains its
+ * previous value.
+ * @delta_lf_res: specifies the left shift which should be applied to decoded
+ * loop filter delta values.
+ */
+struct v4l2_av1_loop_filter {
+ __u8 flags;
+ __u8 level[4];
+ __u8 sharpness;
+ __s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 mode_deltas[2];
+ __u8 delta_lf_res;
+};
+
+#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1
+#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2
+#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4
+
+/**
+ * struct v4l2_av1_quantization - AV1 Quantization params as defined in section
+ * 6.8.11 "Quantization params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_QUANTIZATION_FLAG_{}
+ * @base_q_idx: indicates the base frame qindex. This is used for Y AC
+ * coefficients and as the base value for the other quantizers.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx.
+ * @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx.
+ * @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx.
+ * @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx.
+ * @qm_y: specifies the level in the quantizer matrix that should be used for
+ * luma plane decoding.
+ * @qm_u: specifies the level in the quantizer matrix that should be used for
+ * chroma U plane decoding.
+ * @qm_v: specifies the level in the quantizer matrix that should be used for
+ * chroma V plane decoding.
+ * @delta_q_res: specifies the left shift which should be applied to decoded
+ * quantizer index delta values.
+ */
+struct v4l2_av1_quantization {
+ __u8 flags;
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_u_dc;
+ __s8 delta_q_u_ac;
+ __s8 delta_q_v_dc;
+ __s8 delta_q_v_ac;
+ __u8 qm_y;
+ __u8 qm_u;
+ __u8 qm_v;
+ __u8 delta_q_res;
+};
+
+#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1
+
+/**
+ * struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile
+ * info semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_TILE_INFO_FLAG_{}
+ * @context_update_tile_id: specifies which tile to use for the CDF update.
+ * @tile_rows: specifies the number of tiles down the frame.
+ * @tile_cols: specifies the number of tiles across the frame.
+ * @mi_col_starts: an array specifying the start column (in units of 4x4 luma
+ * samples) for each tile across the image.
+ * @mi_row_starts: an array specifying the start row (in units of 4x4 luma
+ * samples) for each tile down the image.
+ * @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of
+ * superblocks.
+ * @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of
+ * superblocks.
+ * @tile_size_bytes: specifies the number of bytes needed to code each tile
+ * size.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_av1_tile_info {
+ __u8 flags;
+ __u8 context_update_tile_id;
+ __u8 tile_cols;
+ __u8 tile_rows;
+ __u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
+ __u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
+ __u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS];
+ __u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS];
+ __u8 tile_size_bytes;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_type - AV1 Frame Type
+ *
+ * @V4L2_AV1_KEY_FRAME: Key frame
+ * @V4L2_AV1_INTER_FRAME: Inter frame
+ * @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame
+ * @V4L2_AV1_SWITCH_FRAME: Switch frame
+ */
+enum v4l2_av1_frame_type {
+ V4L2_AV1_KEY_FRAME = 0,
+ V4L2_AV1_INTER_FRAME = 1,
+ V4L2_AV1_INTRA_ONLY_FRAME = 2,
+ V4L2_AV1_SWITCH_FRAME = 3
+};
+
+/**
+ * enum v4l2_av1_interpolation_filter - AV1 interpolation filter types
+ *
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at
+ * the block level
+ *
+ * See section 6.8.9 "Interpolation filter semantics" of the AV1 specification
+ * for more details.
+ */
+enum v4l2_av1_interpolation_filter {
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2,
+ V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3,
+ V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
+};
+
+/**
+ * enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode
+ * semantics" of the AV1 specification.
+ * @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4
+ * transforms
+ * @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest
+ * transform size that fits inside the block
+ * @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified
+ * explicitly for each block.
+ */
+enum v4l2_av1_tx_mode {
+ V4L2_AV1_TX_MODE_ONLY_4X4 = 0,
+ V4L2_AV1_TX_MODE_LARGEST = 1,
+ V4L2_AV1_TX_MODE_SELECT = 2
+};
+
+#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001
+#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002
+#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004
+#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008
+#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010
+#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020
+#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040
+#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080
+#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100
+#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200
+#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800
+#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000
+#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000
+#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000
+#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000
+#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000
+#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502)
+/**
+ * struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU.
+ *
+ * @tile_info: tile info
+ * @quantization: quantization params
+ * @segmentation: segmentation params
+ * @superres_denom: the denominator for the upscaling ratio.
+ * @loop_filter: loop filter params
+ * @cdef: cdef params
+ * @skip_mode_frame: specifies the frames to use for compound prediction when
+ * skip_mode is equal to 1.
+ * @primary_ref_frame: specifies which reference frame contains the CDF values
+ * and other state that should be loaded at the start of the frame.
+ * @loop_restoration: loop restoration params
+ * @global_motion: global motion params
+ * @flags: see V4L2_AV1_FRAME_FLAG_{}
+ * @frame_type: specifies the AV1 frame type
+ * @order_hint: specifies OrderHintBits least significant bits of the expected
+ * output order for this frame.
+ * @upscaled_width: the upscaled width.
+ * @interpolation_filter: specifies the filter selection used for performing
+ * inter prediction.
+ * @tx_mode: specifies how the transform size is determined.
+ * @frame_width_minus_1: add 1 to get the frame's width.
+ * @frame_height_minus_1: add 1 to get the frame's height
+ * @render_width_minus_1: add 1 to get the render width of the frame in luma
+ * samples.
+ * @render_height_minus_1: add 1 to get the render height of the frame in luma
+ * samples.
+ * @current_frame_id: specifies the frame id number for the current frame. Frame
+ * id numbers are additional information that do not affect the decoding
+ * process, but provide decoders with a way of detecting missing reference
+ * frames so that appropriate action can be taken.
+ * @buffer_removal_time: specifies the frame removal time in units of DecCT clock
+ * ticks counted from the removal time of the last random access point for
+ * operating point opNum.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @order_hints: specifies the expected output order hint for each reference
+ * frame. This field corresponds to the OrderHints variable from the
+ * specification (section 5.9.2 "Uncompressed header syntax"). As such, this is
+ * only used for non-intra frames and ignored otherwise. order_hints[0] is
+ * always ignored.
+ * @reference_frame_ts: the V4L2 timestamp of the reference frame slots.
+ * @ref_frame_idx: used to index into @reference_frame_ts when decoding
+ * inter-frames. The meaning of this array is the same as in the specification.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer. Use
+ * v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @refresh_frame_flags: contains a bitmask that specifies which reference frame
+ * slots will be updated with the current frame after it is decoded.
+ */
+struct v4l2_ctrl_av1_frame {
+ struct v4l2_av1_tile_info tile_info;
+ struct v4l2_av1_quantization quantization;
+ __u8 superres_denom;
+ struct v4l2_av1_segmentation segmentation;
+ struct v4l2_av1_loop_filter loop_filter;
+ struct v4l2_av1_cdef cdef;
+ __u8 skip_mode_frame[2];
+ __u8 primary_ref_frame;
+ struct v4l2_av1_loop_restoration loop_restoration;
+ struct v4l2_av1_global_motion global_motion;
+ __u32 flags;
+ enum v4l2_av1_frame_type frame_type;
+ __u32 order_hint;
+ __u32 upscaled_width;
+ enum v4l2_av1_interpolation_filter interpolation_filter;
+ enum v4l2_av1_tx_mode tx_mode;
+ __u32 frame_width_minus_1;
+ __u32 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+
+ __u32 current_frame_id;
+ __u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS];
+ __u8 reserved[4];
+ __u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME];
+ __u8 refresh_frame_flags;
+};
+
+#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1
+#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2
+#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4
+#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8
+#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10
+
+#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505)
+/**
+ * struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters.
+ *
+ * Film grain parameters as specified by section 6.8.20 of the AV1 Specification.
+ *
+ * @flags: see V4L2_AV1_FILM_GRAIN_{}.
+ * @cr_mult: represents a multiplier for the cr component used in derivation of
+ * the input index to the cr component scaling function.
+ * @grain_seed: specifies the starting value for the pseudo-random numbers used
+ * during film grain synthesis.
+ * @film_grain_params_ref_idx: indicates which reference frame contains the
+ * film grain parameters to be used for this frame.
+ * @num_y_points: specifies the number of points for the piece-wise linear
+ * scaling function of the luma component.
+ * @point_y_value: represents the x (luma value) coordinate for the i-th point
+ * of the piecewise linear scaling function for luma component. The values are
+ * signaled on the scale of 0..255. In case of 10 bit video, these values
+ * correspond to luma values divided by 4. In case of 12 bit video, these values
+ * correspond to luma values divided by 16.
+ * @point_y_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for luma component.
+ * @num_cb_points: specifies the number of points for the piece-wise linear
+ * scaling function of the cb component.
+ * @point_cb_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cb component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cb_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cb component.
+ * @num_cr_points: specifies represents the number of points for the piece-wise
+ * linear scaling function of the cr component.
+ * @point_cr_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cr component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cr_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cr component.
+ * @grain_scaling_minus_8: represents the shift – 8 applied to the values of the
+ * chroma component. The grain_scaling_minus_8 can take values of 0..3 and
+ * determines the range and quantization step of the standard deviation of film
+ * grain.
+ * @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma
+ * and chroma.
+ * @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y
+ * plane.
+ * @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U
+ * plane.
+ * @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V
+ * plane.
+ * @ar_coeff_shift_minus_6: specifies the range of the auto-regressive
+ * coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for
+ * auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25,
+ * 0.25) respectively.
+ * @grain_scale_shift: specifies how much the Gaussian random numbers should be
+ * scaled down during the grain synthesis process.
+ * @cb_mult: represents a multiplier for the cb component used in derivation of
+ * the input index to the cb component scaling function.
+ * @cb_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cb component scaling function.
+ * @cr_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cr component scaling function.
+ * @cb_offset: represents an offset used in derivation of the input index to the
+ * cb component scaling function.
+ * @cr_offset: represents an offset used in derivation of the input index to the
+ * cr component scaling function.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_av1_film_grain {
+ __u8 flags;
+ __u8 cr_mult;
+ __u16 grain_seed;
+ __u8 film_grain_params_ref_idx;
+ __u8 num_y_points;
+ __u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 num_cb_points;
+ __u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 num_cr_points;
+ __u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 grain_scaling_minus_8;
+ __u8 ar_coeff_lag;
+ __u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeff_shift_minus_6;
+ __u8 grain_scale_shift;
+ __u8 cb_mult;
+ __u8 cb_luma_mult;
+ __u8 cr_luma_mult;
+ __u16 cb_offset;
+ __u16 cr_offset;
+ __u8 reserved[4];
+};
+
/* MPEG-compression definitions kept for backwards compatibility */
#ifndef __KERNEL__
#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 92e1b700b51c..f5c48b61ab62 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -45,6 +45,25 @@
#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
/* Specify an eventfd file descriptor to signal on log write. */
#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+/* By default, a device gets one vhost_worker that its virtqueues share. This
+ * command allows the owner of the device to create an additional vhost_worker
+ * for the device. It can later be bound to 1 or more of its virtqueues using
+ * the VHOST_ATTACH_VRING_WORKER command.
+ *
+ * This must be called after VHOST_SET_OWNER and the caller must be the owner
+ * of the device. The new thread will inherit caller's cgroups and namespaces,
+ * and will share the caller's memory space. The new thread will also be
+ * counted against the caller's RLIMIT_NPROC value.
+ *
+ * The worker's ID used in other commands will be returned in
+ * vhost_worker_state.
+ */
+#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state)
+/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any
+ * virtqueue. If userspace is not able to call this for workers its created,
+ * the kernel will free all the device's workers when the device is closed.
+ */
+#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)
/* Ring setup. */
/* Set number of descriptors in ring. This parameter can not
@@ -70,6 +89,18 @@
#define VHOST_VRING_BIG_ENDIAN 1
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
+ * virtqueues.
+ *
+ * This will replace the virtqueue's existing worker. If the replaced worker
+ * is no longer attached to any virtqueues, it can be freed with
+ * VHOST_FREE_WORKER.
+ */
+#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \
+ struct vhost_vring_worker)
+/* Return the vring worker's ID */
+#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \
+ struct vhost_vring_worker)
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index c5690a8992d8..d3aad12ad1fa 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -47,6 +47,22 @@ struct vhost_vring_addr {
__u64 log_guest_addr;
};
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
/* no alignment requirement */
struct vhost_iotlb_msg {
__u64 iova;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index aee75eb9e686..3af6a82d0cad 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -672,6 +672,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */
#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
@@ -758,6 +759,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
@@ -1720,7 +1722,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* enum v4l2_tuner_type */
+ __u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
@@ -1807,8 +1809,8 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
- __u32 __user *p_s32;
- __u32 __user *p_s64;
+ __s32 __user *p_s32;
+ __s64 __user *p_s64;
struct v4l2_area __user *p_area;
struct v4l2_ctrl_h264_sps __user *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
@@ -1828,6 +1830,10 @@ struct v4l2_ext_control {
struct v4l2_ctrl_hevc_slice_params __user *p_hevc_slice_params;
struct v4l2_ctrl_hevc_scaling_matrix __user *p_hevc_scaling_matrix;
struct v4l2_ctrl_hevc_decode_params __user *p_hevc_decode_params;
+ struct v4l2_ctrl_av1_sequence __user *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry __user *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame __user *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain __user *p_av1_film_grain;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1901,6 +1907,11 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
+
+ V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
+ V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
+ V4L2_CTRL_TYPE_AV1_FRAME = 0x282,
+ V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 08967b3f19c8..3c2ad5fae17f 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -835,7 +835,7 @@ struct iw_encode_ext {
* individual keys */
__u16 alg; /* IW_ENCODE_ALG_* */
__u16 key_len;
- __u8 key[0];
+ __u8 key[];
};
/* SIOCSIWMLME data */
diff --git a/include/video/sticore.h b/include/video/sticore.h
index fbb78d7e7565..945ad60463a1 100644
--- a/include/video/sticore.h
+++ b/include/video/sticore.h
@@ -39,7 +39,6 @@ struct fb_info;
#define STI_WAIT 1
#define STI_PTR(p) ( virt_to_phys(p) )
-#define PTR_STI(p) ( phys_to_virt((unsigned long)p) )
#define sti_onscreen_x(sti) (sti->glob_cfg->onscreen_x)
#define sti_onscreen_y(sti) (sti->glob_cfg->onscreen_y)
@@ -78,8 +77,8 @@ struct sti_glob_cfg_ext {
u8 friendly_boot; /* in friendly boot mode */
s16 power; /* power calculation (in Watts) */
s32 freq_ref; /* frequency reference */
- u32 sti_mem_addr; /* pointer to global sti memory (size=sti_mem_request) */
- u32 future_ptr; /* pointer to future data */
+ u32 *sti_mem_addr; /* pointer to global sti memory (size=sti_mem_request) */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_glob_cfg {
@@ -90,10 +89,10 @@ struct sti_glob_cfg {
s16 offscreen_y; /* offset height in pixels */
s16 total_x; /* frame buffer width in pixels */
s16 total_y; /* frame buffer height in pixels */
- u32 region_ptrs[STI_REGION_MAX]; /* region pointers */
+ u32 *region_ptrs[STI_REGION_MAX]; /* region pointers */
s32 reent_lvl; /* storage for reentry level value */
- u32 save_addr; /* where to save or restore reentrant state */
- u32 ext_ptr; /* pointer to extended glob_cfg data structure */
+ u32 *save_addr; /* where to save or restore reentrant state */
+ u32 *ext_ptr; /* pointer to extended glob_cfg data structure */
};
@@ -119,26 +118,26 @@ struct sti_init_flags {
u32 caller_kernel : 1; /* set only by kernel for each call */
u32 caller_other : 1; /* set only by non-[BR/K] caller */
u32 pad : 14; /* pad to word boundary */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_init_inptr_ext {
u8 config_mon_type; /* configure to monitor type */
u8 pad[1]; /* pad to word boundary */
u16 inflight_data; /* inflight data possible on PCI */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_init_inptr {
s32 text_planes; /* number of planes to use for text */
- u32 ext_ptr; /* pointer to extended init_graph inptr data structure*/
+ u32 *ext_ptr; /* pointer to extended init_graph inptr data structure*/
};
struct sti_init_outptr {
s32 errno; /* error number on failure */
s32 text_planes; /* number of planes used for text */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
@@ -148,17 +147,17 @@ struct sti_init_outptr {
struct sti_conf_flags {
u32 wait : 1; /* should routine idle wait or not */
u32 pad : 31; /* pad to word boundary */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_conf_inptr {
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_conf_outptr_ext {
u32 crt_config[3]; /* hardware specific X11/OGL information */
u32 crt_hdw[3];
- u32 future_ptr;
+ u32 *future_ptr;
};
struct sti_conf_outptr {
@@ -174,7 +173,7 @@ struct sti_conf_outptr {
s32 planes; /* number of fb planes in system */
u8 dev_name[STI_DEV_NAME_LENGTH]; /* null terminated product name */
u32 attributes; /* flags denoting attributes */
- u32 ext_ptr; /* pointer to future data */
+ u32 *ext_ptr; /* pointer to future data */
};
struct sti_rom {
@@ -258,25 +257,25 @@ struct sti_cooked_rom {
/* STI font printing function structs */
struct sti_font_inptr {
- u32 font_start_addr; /* address of font start */
+ u32 *font_start_addr; /* address of font start */
s16 index; /* index into font table of character */
u8 fg_color; /* foreground color of character */
u8 bg_color; /* background color of character */
s16 dest_x; /* X location of character upper left */
s16 dest_y; /* Y location of character upper left */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_font_flags {
u32 wait : 1; /* should routine idle wait or not */
u32 non_text : 1; /* font unpack/move in non_text planes =1, text =0 */
u32 pad : 30; /* pad to word boundary */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_font_outptr {
s32 errno; /* error number on failure */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
/* STI blockmove structs */
@@ -287,7 +286,7 @@ struct sti_blkmv_flags {
u32 clear : 1; /* clear during move? */
u32 non_text : 1; /* block move in non_text planes =1, text =0 */
u32 pad : 28; /* pad to word boundary */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_blkmv_inptr {
@@ -299,12 +298,12 @@ struct sti_blkmv_inptr {
s16 dest_y; /* dest upper left pixel y location */
s16 width; /* block width in pixels */
s16 height; /* block height in pixels */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
struct sti_blkmv_outptr {
s32 errno; /* error number on failure */
- u32 future_ptr; /* pointer to future data */
+ u32 *future_ptr; /* pointer to future data */
};
@@ -351,6 +350,7 @@ struct sti_struct {
unsigned long block_move;
unsigned long init_graph;
unsigned long inq_conf;
+ int do_call64; /* call 64-bit code */
/* all following fields are initialized by the generic routines */
int text_planes;
diff --git a/init/Makefile b/init/Makefile
index 26de459006c4..ec557ada3c12 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -60,3 +60,4 @@ include/generated/utsversion.h: FORCE
$(obj)/version-timestamp.o: include/generated/utsversion.h
CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
KASAN_SANITIZE_version-timestamp.o := n
+GCOV_PROFILE_version-timestamp.o := n
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 1b53a2ab0a27..e8096d502a7c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -149,7 +149,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
static void io_queue_sqe(struct io_kiocb *req);
static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
-static __cold void io_fallback_tw(struct io_uring_task *tctx);
struct kmem_cache *req_cachep;
@@ -1238,6 +1237,34 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
return cmpxchg(&head->first, old, new);
}
+static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+{
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+ struct io_ring_ctx *last_ctx = NULL;
+ struct io_kiocb *req;
+
+ while (node) {
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ node = node->next;
+ if (sync && last_ctx != req->ctx) {
+ if (last_ctx) {
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
+ last_ctx = req->ctx;
+ percpu_ref_get(&last_ctx->refs);
+ }
+ if (llist_add(&req->io_task_work.node,
+ &req->ctx->fallback_llist))
+ schedule_delayed_work(&req->ctx->fallback_work, 1);
+ }
+
+ if (last_ctx) {
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
+}
+
void tctx_task_work(struct callback_head *cb)
{
struct io_tw_state ts = {};
@@ -1250,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb)
unsigned int count = 0;
if (unlikely(current->flags & PF_EXITING)) {
- io_fallback_tw(tctx);
+ io_fallback_tw(tctx, true);
return;
}
@@ -1279,20 +1306,6 @@ void tctx_task_work(struct callback_head *cb)
trace_io_uring_task_work_run(tctx, count, loops);
}
-static __cold void io_fallback_tw(struct io_uring_task *tctx)
-{
- struct llist_node *node = llist_del_all(&tctx->task_list);
- struct io_kiocb *req;
-
- while (node) {
- req = container_of(node, struct io_kiocb, io_task_work.node);
- node = node->next;
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
- }
-}
-
static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1359,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
- io_fallback_tw(tctx);
+ io_fallback_tw(tctx, false);
}
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
@@ -3109,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
if (ctx->rings)
io_kill_timeouts(ctx, NULL, true);
+ flush_delayed_work(&ctx->fallback_work);
+
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
* Use system_unbound_wq to avoid spawning tons of event kworkers
diff --git a/io_uring/net.c b/io_uring/net.c
index d7e6efe89f48..eb1f51ddcb23 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -631,7 +631,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags);
- if (msg->msg_inq && msg->msg_inq != -1U)
+ if (msg->msg_inq && msg->msg_inq != -1)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
@@ -646,7 +646,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
- msg->msg_inq == -1U)
+ msg->msg_inq == -1)
return false;
if (issue_flags & IO_URING_F_MULTISHOT)
*ret = IOU_ISSUE_SKIP_COMPLETE;
@@ -805,7 +805,7 @@ retry_multishot:
flags |= MSG_DONTWAIT;
kmsg->msg.msg_get_inq = 1;
- kmsg->msg.msg_inq = -1U;
+ kmsg->msg.msg_inq = -1;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
&mshot_finished);
@@ -903,7 +903,7 @@ retry_multishot:
if (unlikely(ret))
goto out_free;
- msg.msg_inq = -1U;
+ msg.msg_inq = -1;
msg.msg_flags = 0;
flags = sr->msg_flags;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 29fe21099298..817204d53372 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -7891,10 +7891,8 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
pr_err("missing vmlinux BTF, cannot register kfuncs\n");
return -ENOENT;
}
- if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
- pr_err("missing module BTF, cannot register kfuncs\n");
- return -ENOENT;
- }
+ if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+ pr_warn("missing module BTF, cannot register kfuncs\n");
return 0;
}
if (IS_ERR(btf))
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 5c7e9ba7cd6b..813cb6cf72d6 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -131,6 +131,7 @@ char kdb_getchar(void)
int escape_delay = 0;
get_char_func *f, *f_prev = NULL;
int key;
+ static bool last_char_was_cr;
for (f = &kdb_poll_funcs[0]; ; ++f) {
if (*f == NULL) {
@@ -150,6 +151,18 @@ char kdb_getchar(void)
}
/*
+ * The caller expects that newlines are either CR or LF. However
+ * some terminals send _both_ CR and LF. Avoid having to handle
+ * this in the caller by stripping the LF if we saw a CR right
+ * before.
+ */
+ if (last_char_was_cr && key == '\n') {
+ last_char_was_cr = false;
+ continue;
+ }
+ last_char_was_cr = (key == '\r');
+
+ /*
* When the first character is received (or we get a change
* input source) we set ourselves up to handle an escape
* sequences (just in case).
@@ -244,7 +257,8 @@ poll_again:
*cp = tmp;
}
break;
- case 13: /* enter */
+ case 10: /* linefeed */
+ case 13: /* carriage return */
*lastchar++ = '\n';
*lastchar++ = '\0';
if (!KDB_STATE(KGDB_TRANS)) {
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
index f87c750d3eb3..3c2987f46f6e 100644
--- a/kernel/debug/kdb/kdb_keyboard.c
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -13,6 +13,8 @@
#include <linux/ctype.h>
#include <linux/io.h>
+#include "kdb_private.h"
+
/* Keyboard Controller Registers on normal PCs. */
#define KBD_STATUS_REG 0x64 /* Status register (R) */
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 1f8c519a5f81..548fd4059bf9 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -194,7 +194,6 @@ extern char kdb_task_state_char (const struct task_struct *);
extern bool kdb_task_state(const struct task_struct *p, const char *mask);
extern void kdb_ps_suppressed(void);
extern void kdb_ps1(const struct task_struct *p);
-extern void kdb_send_sig(struct task_struct *p, int sig);
extern char kdb_getchar(void);
extern char *kdb_getstr(char *, size_t, const char *);
extern void kdb_gdb_state_pass(char *buf);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3060427f6c9e..78ae7b6f90fd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11391,6 +11391,7 @@ static int pmu_dev_alloc(struct pmu *pmu)
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
+ pmu->dev->parent = pmu->parent;
pmu->dev->release = pmu_dev_release;
ret = dev_set_name(pmu->dev, "%s", pmu->name);
diff --git a/kernel/fork.c b/kernel/fork.c
index b85814e614a5..d2e12b6d2b18 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -686,6 +686,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
for_each_vma(old_vmi, mpnt) {
struct file *file;
+ vma_start_write(mpnt);
if (mpnt->vm_flags & VM_DONTCOPY) {
vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
continue;
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 1ef9a87511f5..6d443ea22bb7 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -83,12 +83,9 @@ find $cpio_dir -type f -print0 |
xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
# Create archive and try to normalize metadata for reproducibility.
-# For compatibility with older versions of tar, files are fed to tar
-# pre-sorted, as --sort=name might not be available.
-find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
- tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --numeric-owner --no-recursion \
- -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
+tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --owner=0 --group=0 --sort=name --numeric-owner \
+ -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
echo $headers_md5 > kernel/kheaders.md5
echo "$this_file_md5" >> kernel/kheaders.md5
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index dc7b0160c480..c8b7b4dcf782 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -32,6 +32,18 @@
/* Maximum number of characters written by module_flags() */
#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
+struct kernel_symbol {
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ int value_offset;
+ int name_offset;
+ int namespace_offset;
+#else
+ unsigned long value;
+ const char *name;
+ const char *namespace;
+#endif
+};
+
extern struct mutex module_mutex;
extern struct list_head modules;
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 834de86ebe35..59b1d067e528 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -3092,7 +3092,7 @@ static bool idempotent(struct idempotent *u, const void *cookie)
* remove everybody - which includes ourselves - fill in the return
* value, and then complete the operation.
*/
-static void idempotent_complete(struct idempotent *u, int ret)
+static int idempotent_complete(struct idempotent *u, int ret)
{
const void *cookie = u->cookie;
int hash = hash_ptr(cookie, IDEM_HASH_BITS);
@@ -3109,27 +3109,18 @@ static void idempotent_complete(struct idempotent *u, int ret)
complete(&pos->complete);
}
spin_unlock(&idem_lock);
+ return ret;
}
static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
{
- struct idempotent idem;
struct load_info info = { };
void *buf = NULL;
- int len, ret;
-
- if (!f || !(f->f_mode & FMODE_READ))
- return -EBADF;
-
- if (idempotent(&idem, file_inode(f))) {
- wait_for_completion(&idem.complete);
- return idem.ret;
- }
+ int len;
len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE);
if (len < 0) {
mod_stat_inc(&failed_kreads);
- mod_stat_add_long(len, &invalid_kread_bytes);
return len;
}
@@ -3146,9 +3137,25 @@ static int init_module_from_file(struct file *f, const char __user * uargs, int
info.len = len;
}
- ret = load_module(&info, uargs, flags);
- idempotent_complete(&idem, ret);
- return ret;
+ return load_module(&info, uargs, flags);
+}
+
+static int idempotent_init_module(struct file *f, const char __user * uargs, int flags)
+{
+ struct idempotent idem;
+
+ if (!f || !(f->f_mode & FMODE_READ))
+ return -EBADF;
+
+ /* See if somebody else is doing the operation? */
+ if (idempotent(&idem, file_inode(f))) {
+ wait_for_completion(&idem.complete);
+ return idem.ret;
+ }
+
+ /* Otherwise, we'll do it and complete others */
+ return idempotent_complete(&idem,
+ init_module_from_file(f, uargs, flags));
}
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
@@ -3168,7 +3175,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
return -EINVAL;
f = fdget(fd);
- err = init_module_from_file(f.file, uargs, flags);
+ err = idempotent_init_module(f.file, uargs, flags);
fdput(f);
return err;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 8bce3aebc949..6a1d23a11026 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -657,7 +657,7 @@ void __init pid_idr_init(void)
idr_init(&init_pid_ns.idr);
init_pid_ns.pid_cachep = kmem_cache_create("pid",
- struct_size((struct pid *)NULL, numbers, 1),
+ struct_size_t(struct pid, numbers, 1),
__alignof__(struct pid),
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
NULL);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 70a929784a5d..0bf44afe04dd 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -48,7 +48,7 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
return kc;
snprintf(name, sizeof(name), "pid_%u", level + 1);
- len = struct_size((struct pid *)NULL, numbers, level + 1);
+ len = struct_size_t(struct pid, numbers, level + 1);
mutex_lock(&pid_caches_mutex);
/* Name collision forces to do allocation under mutex. */
if (!*pkc)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 04bfb1e4d377..781de7cc6a4e 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -51,99 +51,35 @@ COND_SYSCALL_COMPAT(io_pgetevents);
COND_SYSCALL(io_uring_setup);
COND_SYSCALL(io_uring_enter);
COND_SYSCALL(io_uring_register);
-
-/* fs/xattr.c */
-
-/* fs/dcache.c */
-
-/* fs/cookies.c */
COND_SYSCALL(lookup_dcookie);
COND_SYSCALL_COMPAT(lookup_dcookie);
-
-/* fs/eventfd.c */
COND_SYSCALL(eventfd2);
-
-/* fs/eventfd.c */
COND_SYSCALL(epoll_create1);
COND_SYSCALL(epoll_ctl);
COND_SYSCALL(epoll_pwait);
COND_SYSCALL_COMPAT(epoll_pwait);
COND_SYSCALL(epoll_pwait2);
COND_SYSCALL_COMPAT(epoll_pwait2);
-
-/* fs/fcntl.c */
-
-/* fs/inotify_user.c */
COND_SYSCALL(inotify_init1);
COND_SYSCALL(inotify_add_watch);
COND_SYSCALL(inotify_rm_watch);
-
-/* fs/ioctl.c */
-
-/* fs/ioprio.c */
COND_SYSCALL(ioprio_set);
COND_SYSCALL(ioprio_get);
-
-/* fs/locks.c */
COND_SYSCALL(flock);
-
-/* fs/namei.c */
-
-/* fs/namespace.c */
-
-/* fs/nfsctl.c */
-
-/* fs/open.c */
-
-/* fs/pipe.c */
-
-/* fs/quota.c */
COND_SYSCALL(quotactl);
COND_SYSCALL(quotactl_fd);
-
-/* fs/readdir.c */
-
-/* fs/read_write.c */
-
-/* fs/sendfile.c */
-
-/* fs/select.c */
-
-/* fs/signalfd.c */
COND_SYSCALL(signalfd4);
COND_SYSCALL_COMPAT(signalfd4);
-
-/* fs/splice.c */
-
-/* fs/stat.c */
-
-/* fs/sync.c */
-
-/* fs/timerfd.c */
COND_SYSCALL(timerfd_create);
COND_SYSCALL(timerfd_settime);
COND_SYSCALL(timerfd_settime32);
COND_SYSCALL(timerfd_gettime);
COND_SYSCALL(timerfd_gettime32);
-
-/* fs/utimes.c */
-
-/* kernel/acct.c */
COND_SYSCALL(acct);
-
-/* kernel/capability.c */
COND_SYSCALL(capget);
COND_SYSCALL(capset);
-
-/* kernel/exec_domain.c */
-
-/* kernel/exit.c */
-
-/* kernel/fork.c */
/* __ARCH_WANT_SYS_CLONE3 */
COND_SYSCALL(clone3);
-
-/* kernel/futex/syscalls.c */
COND_SYSCALL(futex);
COND_SYSCALL(futex_time32);
COND_SYSCALL(set_robust_list);
@@ -151,29 +87,11 @@ COND_SYSCALL_COMPAT(set_robust_list);
COND_SYSCALL(get_robust_list);
COND_SYSCALL_COMPAT(get_robust_list);
COND_SYSCALL(futex_waitv);
-
-/* kernel/hrtimer.c */
-
-/* kernel/itimer.c */
-
-/* kernel/kexec.c */
COND_SYSCALL(kexec_load);
COND_SYSCALL_COMPAT(kexec_load);
-
-/* kernel/module.c */
COND_SYSCALL(init_module);
COND_SYSCALL(delete_module);
-
-/* kernel/posix-timers.c */
-
-/* kernel/printk.c */
COND_SYSCALL(syslog);
-
-/* kernel/ptrace.c */
-
-/* kernel/sched/core.c */
-
-/* kernel/sys.c */
COND_SYSCALL(setregid);
COND_SYSCALL(setgid);
COND_SYSCALL(setreuid);
@@ -186,12 +104,6 @@ COND_SYSCALL(setfsuid);
COND_SYSCALL(setfsgid);
COND_SYSCALL(setgroups);
COND_SYSCALL(getgroups);
-
-/* kernel/time.c */
-
-/* kernel/timer.c */
-
-/* ipc/mqueue.c */
COND_SYSCALL(mq_open);
COND_SYSCALL_COMPAT(mq_open);
COND_SYSCALL(mq_unlink);
@@ -203,8 +115,6 @@ COND_SYSCALL(mq_notify);
COND_SYSCALL_COMPAT(mq_notify);
COND_SYSCALL(mq_getsetattr);
COND_SYSCALL_COMPAT(mq_getsetattr);
-
-/* ipc/msg.c */
COND_SYSCALL(msgget);
COND_SYSCALL(old_msgctl);
COND_SYSCALL(msgctl);
@@ -214,8 +124,6 @@ COND_SYSCALL(msgrcv);
COND_SYSCALL_COMPAT(msgrcv);
COND_SYSCALL(msgsnd);
COND_SYSCALL_COMPAT(msgsnd);
-
-/* ipc/sem.c */
COND_SYSCALL(semget);
COND_SYSCALL(old_semctl);
COND_SYSCALL(semctl);
@@ -224,8 +132,6 @@ COND_SYSCALL_COMPAT(old_semctl);
COND_SYSCALL(semtimedop);
COND_SYSCALL(semtimedop_time32);
COND_SYSCALL(semop);
-
-/* ipc/shm.c */
COND_SYSCALL(shmget);
COND_SYSCALL(old_shmctl);
COND_SYSCALL(shmctl);
@@ -234,8 +140,6 @@ COND_SYSCALL_COMPAT(old_shmctl);
COND_SYSCALL(shmat);
COND_SYSCALL_COMPAT(shmat);
COND_SYSCALL(shmdt);
-
-/* net/socket.c */
COND_SYSCALL(socket);
COND_SYSCALL(socketpair);
COND_SYSCALL(bind);
@@ -256,30 +160,18 @@ COND_SYSCALL(sendmsg);
COND_SYSCALL_COMPAT(sendmsg);
COND_SYSCALL(recvmsg);
COND_SYSCALL_COMPAT(recvmsg);
-
-/* mm/filemap.c */
-
-/* mm/nommu.c, also with MMU */
COND_SYSCALL(mremap);
-
-/* security/keys/keyctl.c */
COND_SYSCALL(add_key);
COND_SYSCALL(request_key);
COND_SYSCALL(keyctl);
COND_SYSCALL_COMPAT(keyctl);
-
-/* security/landlock/syscalls.c */
COND_SYSCALL(landlock_create_ruleset);
COND_SYSCALL(landlock_add_rule);
COND_SYSCALL(landlock_restrict_self);
-
-/* arch/example/kernel/sys_example.c */
-
-/* mm/fadvise.c */
COND_SYSCALL(fadvise64_64);
COND_SYSCALL_COMPAT(fadvise64_64);
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
COND_SYSCALL(swapon);
COND_SYSCALL(swapoff);
COND_SYSCALL(mprotect);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b04f52e7cd28..4529e264cb86 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8146,7 +8146,7 @@ static const struct file_operations tracing_err_log_fops = {
.open = tracing_err_log_open,
.write = tracing_err_log_write,
.read = seq_read,
- .llseek = seq_lseek,
+ .llseek = tracing_lseek,
.release = tracing_err_log_release,
};
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 5fe525f1b8cc..7ccc7a8e155b 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -31,7 +31,7 @@ trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
/* Common ftrace options */
xbc_node_for_each_array_value(node, "options", anode, p) {
- if (strscpy(buf, p, ARRAY_SIZE(buf)) == -E2BIG) {
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) {
pr_err("String is too long: %s\n", p);
continue;
}
@@ -87,7 +87,7 @@ trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
const char *p;
xbc_node_for_each_array_value(node, "events", anode, p) {
- if (strscpy(buf, p, ARRAY_SIZE(buf)) == -E2BIG) {
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) {
pr_err("String is too long: %s\n", p);
continue;
}
@@ -486,7 +486,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
p = xbc_node_find_value(enode, "filter", NULL);
if (p && *p != '\0') {
- if (strscpy(buf, p, ARRAY_SIZE(buf)) == -E2BIG)
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
pr_err("filter string is too long: %s\n", p);
else if (apply_event_filter(file, buf) < 0)
pr_err("Failed to apply filter: %s\n", buf);
@@ -494,7 +494,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
xbc_node_for_each_array_value(enode, "actions", anode, p) {
- if (strscpy(buf, p, ARRAY_SIZE(buf)) == -E2BIG)
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
pr_err("action string is too long: %s\n", p);
else if (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply an action: %s\n", p);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 781f061ec0fa..fbc89baf7de6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2739,7 +2739,7 @@ config STACKINIT_KUNIT_TEST
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
+ depends on KUNIT && FORTIFY_SOURCE
default KUNIT_ALL_TESTS
help
Builds unit tests for checking internals of FORTIFY_SOURCE as used
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1c81413c51f8..ddb31015e38a 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -1495,7 +1495,7 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
EXPORT_SYMBOL(bitmap_to_arr32);
#endif
-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+#if BITS_PER_LONG == 32
/**
* bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
diff --git a/lib/cpumask.c b/lib/cpumask.c
index e7258836b60b..de356f16773a 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(cpumask_local_spread);
static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
/**
- * Returns an arbitrary cpu within srcp1 & srcp2.
+ * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2.
*
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index 83247106824c..08edbbb19f57 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -139,8 +139,15 @@ int dhry(int n)
/* Initializations */
- Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_KERNEL);
- Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_KERNEL);
+ Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Next_Ptr_Glob)
+ return -ENOMEM;
+
+ Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Ptr_Glob) {
+ kfree(Next_Ptr_Glob);
+ return -ENOMEM;
+ }
Ptr_Glob->Ptr_Comp = Next_Ptr_Glob;
Ptr_Glob->Discr = Ident_1;
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 524132f33cf0..c8c33cbaae9e 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -25,11 +25,6 @@ static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
static char array_unknown[] = "compiler thinks I might change";
-/* Handle being built without CONFIG_FORTIFY_SOURCE */
-#ifndef __compiletime_strlen
-# define __compiletime_strlen __builtin_strlen
-#endif
-
static void known_sizes_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
@@ -312,14 +307,6 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
-static int fortify_test_init(struct kunit *test)
-{
- if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
- kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
-
- return 0;
-}
-
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
@@ -336,7 +323,6 @@ static struct kunit_case fortify_test_cases[] = {
static struct kunit_suite fortify_test_suite = {
.name = "fortify",
- .init = fortify_test_init,
.test_cases = fortify_test_cases,
};
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e97d7060329e..e86231a44c3d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1237,7 +1237,7 @@ static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
if (is_vmalloc_or_module_addr((void *)kaddr))
page = vmalloc_to_page((void *)kaddr);
else
- page = virt_to_page(kaddr);
+ page = virt_to_page((void *)kaddr);
sg_set_page(sg, page, len, off);
sgtable->nents++;
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index a8005ad3bd58..187f5b2db4cf 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -470,6 +470,7 @@ static void __init test_bitmap_parselist(void)
if (err != ptest.errno) {
pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
i, ptest.in, err, ptest.errno);
+ failed_tests++;
continue;
}
@@ -478,6 +479,7 @@ static void __init test_bitmap_parselist(void)
pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, ptest.in, bmap[0],
*ptest.expected);
+ failed_tests++;
continue;
}
@@ -511,11 +513,13 @@ static void __init test_bitmap_printlist(void)
if (ret != slen + 1) {
pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
+ failed_tests++;
goto out;
}
if (strncmp(buf, expected, slen)) {
pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
+ failed_tests++;
goto out;
}
@@ -583,6 +587,7 @@ static void __init test_bitmap_parse(void)
if (err != test.errno) {
pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
i, test.in, err, test.errno);
+ failed_tests++;
continue;
}
@@ -591,6 +596,7 @@ static void __init test_bitmap_parse(void)
pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, test.in, bmap[0],
*test.expected);
+ failed_tests++;
continue;
}
@@ -615,10 +621,12 @@ static void __init test_bitmap_arr32(void)
next_bit = find_next_bit(bmap2,
round_up(nbits, BITS_PER_LONG), nbits);
- if (next_bit < round_up(nbits, BITS_PER_LONG))
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr32(nbits == %d:"
" tail is not safely cleared: %d\n",
nbits, next_bit);
+ failed_tests++;
+ }
if (nbits < EXP1_IN_BITS - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
@@ -641,15 +649,19 @@ static void __init test_bitmap_arr64(void)
expect_eq_bitmap(bmap2, exp1, nbits);
next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
- if (next_bit < round_up(nbits, BITS_PER_LONG))
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr64(nbits == %d:"
" tail is not safely cleared: %d\n", nbits, next_bit);
+ failed_tests++;
+ }
if ((nbits % 64) &&
- (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0)))
+ (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) {
pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
nbits, arr[(nbits - 1) / 64],
GENMASK_ULL((nbits - 1) % 64, 0));
+ failed_tests++;
+ }
if (nbits < EXP1_IN_BITS - 64)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 1d7d480b8eeb..add4699fc6cd 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -214,7 +214,7 @@ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
{
*dst = kstrndup(name, count, gfp);
if (!*dst)
- return -ENOSPC;
+ return -ENOMEM;
return count;
}
@@ -671,7 +671,7 @@ static ssize_t trigger_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
@@ -719,7 +719,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("inserting test platform fw '%s'\n", name);
efi_embedded_fw.name = name;
@@ -772,7 +772,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
@@ -817,7 +817,7 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s' using custom fallback mechanism\n", name);
@@ -868,7 +868,7 @@ static int test_fw_run_batch_request(void *data)
test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
if (!test_buf)
- return -ENOSPC;
+ return -ENOMEM;
if (test_fw_config->partial)
req->rc = request_partial_firmware_into_buf
diff --git a/mm/gup.c b/mm/gup.c
index ef29641671c7..76d222ccc3ff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1091,6 +1091,45 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
return 0;
}
+/*
+ * This is "vma_lookup()", but with a warning if we would have
+ * historically expanded the stack in the GUP code.
+ */
+static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
+ unsigned long addr)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return vma_lookup(mm, addr);
+#else
+ static volatile unsigned long next_warn;
+ struct vm_area_struct *vma;
+ unsigned long now, next;
+
+ vma = find_vma(mm, addr);
+ if (!vma || (addr >= vma->vm_start))
+ return vma;
+
+ /* Only warn for half-way relevant accesses */
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ if (vma->vm_start - addr > 65536)
+ return NULL;
+
+ /* Let's not warn more than once an hour.. */
+ now = jiffies; next = next_warn;
+ if (next && time_before(now, next))
+ return NULL;
+ next_warn = now + 60*60*HZ;
+
+ /* Let people know things may have changed. */
+ pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
+ current->comm, task_pid_nr(current),
+ vma->vm_start, vma->vm_end, addr);
+ dump_stack();
+ return NULL;
+#endif
+}
+
/**
* __get_user_pages() - pin user pages in memory
* @mm: mm_struct of target mm
@@ -1168,11 +1207,7 @@ static long __get_user_pages(struct mm_struct *mm,
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
- vma = find_vma(mm, start);
- if (vma && (start < vma->vm_start)) {
- WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN);
- vma = NULL;
- }
+ vma = gup_vma_lookup(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
@@ -1337,13 +1372,9 @@ int fixup_user_fault(struct mm_struct *mm,
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
retry:
- vma = find_vma(mm, address);
+ vma = gup_vma_lookup(mm, address);
if (!vma)
return -EFAULT;
- if (address < vma->vm_start ) {
- WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN);
- return -EFAULT;
- }
if (!vma_permits_fault(vma, fault_flags))
return -EFAULT;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bce28cca73a1..64a3239b6407 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7246,7 +7246,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
- BUG_ON(pte && pte_present(ptep_get(pte)) && !pte_huge(ptep_get(pte)));
+
+ if (pte) {
+ pte_t pteval = ptep_get_lockless(pte);
+
+ BUG_ON(pte_present(pteval) && !pte_huge(pteval));
+ }
return pte;
}
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 5b4c97baa656..4d837ab83f08 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -130,9 +130,10 @@ static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
if (unlikely(ret)) {
const void *last_byte = addr + size - 1;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
+ s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
if (unlikely(ret != (unsigned long)last_shadow ||
- (((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
+ last_accessible_byte >= *last_shadow))
return true;
}
return false;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index b799f11e45dc..2e973b36fe07 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -466,18 +466,6 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
- /*
- * Explicitly initialize the memory with the precise object size to
- * avoid overwriting the slab redzone. This disables initialization in
- * the arch code and may thus lead to performance penalty. This penalty
- * does not affect production builds, as slab redzones are not enabled
- * there.
- */
- if (__slub_debug_enabled() &&
- init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
- init = false;
- memzero_explicit((void *)addr, size);
- }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/memory.c b/mm/memory.c
index 0ae594703021..01f39e8144ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3951,6 +3951,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/*
+ * Some architectures may have to restore extra metadata to the page
+ * when reading from swap. This metadata may be indexed by swap entry
+ * so this must be called before swap_free().
+ */
+ arch_swap_restore(entry, folio);
+
+ /*
* Remove the swap entry and conditionally try to free up the swapcache.
* We're already holding a reference on the page but haven't mapped it
* yet.
diff --git a/mm/mmap.c b/mm/mmap.c
index 3e5793ebbaae..3eda23c9ebe7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -193,8 +193,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
struct mm_struct *mm = current->mm;
struct vm_area_struct *brkvma, *next = NULL;
unsigned long min_brk;
- bool populate;
- bool downgraded = false;
+ bool populate = false;
LIST_HEAD(uf);
struct vma_iterator vmi;
@@ -236,13 +235,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto success;
}
- /*
- * Always allow shrinking brk.
- * do_vma_munmap() may downgrade mmap_lock to read.
- */
+ /* Always allow shrinking brk. */
if (brk <= mm->brk) {
- int ret;
-
/* Search one past newbrk */
vma_iter_init(&vmi, mm, newbrk);
brkvma = vma_find(&vmi, oldbrk);
@@ -250,19 +244,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_vma_munmap() may downgrade the lock, so update it
+ * do_vma_munmap() will drop the lock on success, so update it
* before calling do_vma_munmap().
*/
mm->brk = brk;
- ret = do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true);
- if (ret == 1) {
- downgraded = true;
- goto success;
- } else if (!ret)
- goto success;
-
- mm->brk = origbrk;
- goto out;
+ if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
+ goto out;
+
+ goto success_unlocked;
}
if (check_brk_limits(oldbrk, newbrk - oldbrk))
@@ -283,19 +272,19 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
mm->brk = brk;
+ if (mm->def_flags & VM_LOCKED)
+ populate = true;
success:
- populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
- if (downgraded)
- mmap_read_unlock(mm);
- else
- mmap_write_unlock(mm);
+ mmap_write_unlock(mm);
+success_unlocked:
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
+ mm->brk = origbrk;
mmap_write_unlock(mm);
return origbrk;
}
@@ -1988,6 +1977,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
return -ENOMEM;
}
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_lock in read mode. We need the
@@ -2075,6 +2066,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
return -ENOMEM;
}
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_lock in read mode. We need the
@@ -2428,14 +2421,16 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
* @start: The aligned start address to munmap.
* @end: The aligned end address to munmap.
* @uf: The userfaultfd list_head
- * @downgrade: Set to true to attempt a write downgrade of the mmap_lock
+ * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
+ * success.
*
- * If @downgrade is true, check return code for potential release of the lock.
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
*/
static int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf, bool downgrade)
+ unsigned long end, struct list_head *uf, bool unlock)
{
struct vm_area_struct *prev, *next = NULL;
struct maple_tree mt_detach;
@@ -2551,33 +2546,23 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Point of no return */
mm->locked_vm -= locked_vm;
mm->map_count -= count;
- /*
- * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
- * VM_GROWSUP VMA. Such VMAs can change their size under
- * down_read(mmap_lock) and collide with the VMA we are about to unmap.
- */
- if (downgrade) {
- if (next && (next->vm_flags & VM_GROWSDOWN))
- downgrade = false;
- else if (prev && (prev->vm_flags & VM_GROWSUP))
- downgrade = false;
- else
- mmap_write_downgrade(mm);
- }
+ if (unlock)
+ mmap_write_downgrade(mm);
/*
* We can free page tables without write-locking mmap_lock because VMAs
* were isolated before we downgraded mmap_lock.
*/
- unmap_region(mm, &mt_detach, vma, prev, next, start, end, !downgrade);
+ unmap_region(mm, &mt_detach, vma, prev, next, start, end, !unlock);
/* Statistics and freeing VMAs */
mas_set(&mas_detach, start);
remove_mt(mm, &mas_detach);
__mt_destroy(&mt_detach);
-
-
validate_mm(mm);
- return downgrade ? 1 : 0;
+ if (unlock)
+ mmap_read_unlock(mm);
+
+ return 0;
clear_tree_failed:
userfaultfd_error:
@@ -2590,6 +2575,7 @@ end_split_failed:
__mt_destroy(&mt_detach);
start_split_failed:
map_count_exceeded:
+ validate_mm(mm);
return error;
}
@@ -2600,18 +2586,18 @@ map_count_exceeded:
* @start: The start address to munmap
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
- * @downgrade: set to true if the user wants to attempt to write_downgrade the
- * mmap_lock
+ * @unlock: set to true if the user wants to drop the mmap_lock on success
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
* aligned and any arch_unmap work will be preformed.
*
- * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise.
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
*/
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool downgrade)
+ bool unlock)
{
unsigned long end;
struct vm_area_struct *vma;
@@ -2628,10 +2614,13 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
/* Find the first overlapping VMA */
vma = vma_find(vmi, end);
- if (!vma)
+ if (!vma) {
+ if (unlock)
+ mmap_write_unlock(mm);
return 0;
+ }
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
}
/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
@@ -2639,6 +2628,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
* @start: The start address to munmap
* @len: The length to be munmapped.
* @uf: The userfaultfd list_head
+ *
+ * Return: 0 on success, error otherwise.
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
@@ -2818,6 +2809,8 @@ cannot_expand:
if (vma_iter_prealloc(&vmi))
goto close_and_free_vma;
+ /* Lock the VMA since it is modified after insertion into VMA tree */
+ vma_start_write(vma);
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
@@ -2899,7 +2892,7 @@ unacct_error:
return error;
}
-static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
+static int __vm_munmap(unsigned long start, size_t len, bool unlock)
{
int ret;
struct mm_struct *mm = current->mm;
@@ -2909,16 +2902,8 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, downgrade);
- /*
- * Returning 1 indicates mmap_lock is downgraded.
- * But 1 is not legal return value of vm_munmap() and munmap(), reset
- * it to 0 before return.
- */
- if (ret == 1) {
- mmap_read_unlock(mm);
- ret = 0;
- } else
+ ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+ if (ret || !unlock)
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
@@ -3028,23 +3013,22 @@ out:
* @start: the start of the address to unmap
* @end: The end of the address to unmap
* @uf: The userfaultfd list_head
- * @downgrade: Attempt to downgrade or not
+ * @unlock: Drop the lock on success
*
- * Returns: 0 on success and not downgraded, 1 on success and downgraded.
* unmaps a VMA mapping when the vma iterator is already in position.
* Does not handle alignment.
+ *
+ * Return: 0 on success drops the lock of so directed, error on failure and will
+ * still hold the lock.
*/
int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool downgrade)
+ unsigned long start, unsigned long end, struct list_head *uf,
+ bool unlock)
{
struct mm_struct *mm = vma->vm_mm;
- int ret;
arch_unmap(mm, start, end);
- ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
- validate_mm(mm);
- return ret;
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
}
/*
diff --git a/mm/mremap.c b/mm/mremap.c
index fe6b722ae633..11e06e4ab33b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -715,7 +715,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}
vma_iter_init(&vmi, mm, old_addr);
- if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
+ if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
@@ -913,7 +913,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
bool locked = false;
- bool downgraded = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
@@ -999,24 +998,23 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_vmi_munmap does all the needed commit accounting, and
- * downgrades mmap_lock to read if so directed.
+ * unlocks the mmap_lock if so directed.
*/
if (old_len >= new_len) {
- int retval;
VMA_ITERATOR(vmi, mm, addr + new_len);
- retval = do_vmi_munmap(&vmi, mm, addr + new_len,
- old_len - new_len, &uf_unmap, true);
- /* Returning 1 indicates mmap_lock is downgraded to read. */
- if (retval == 1) {
- downgraded = true;
- } else if (retval < 0 && old_len != new_len) {
- ret = retval;
+ if (old_len == new_len) {
+ ret = addr;
goto out;
}
+ ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
+ &uf_unmap, true);
+ if (ret)
+ goto out;
+
ret = addr;
- goto out;
+ goto out_unlocked;
}
/*
@@ -1101,12 +1099,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
out:
if (offset_in_page(ret))
locked = false;
- if (downgraded)
- mmap_read_unlock(current->mm);
- else
- mmap_write_unlock(current->mm);
+ mmap_write_unlock(current->mm);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
+out_unlocked:
userfaultfd_unmap_complete(mm, &uf_unmap_early);
mremap_userfaultfd_complete(&uf, addr, ret, old_len);
userfaultfd_unmap_complete(mm, &uf_unmap);
diff --git a/mm/nommu.c b/mm/nommu.c
index fdc392735ec6..c072a660ec2c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -637,8 +637,13 @@ EXPORT_SYMBOL(find_vma);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long addr, struct pt_regs *regs)
{
+ struct vm_area_struct *vma;
+
mmap_read_lock(mm);
- return vma_lookup(mm, addr);
+ vma = vma_lookup(mm, addr);
+ if (!vma)
+ mmap_read_unlock(mm);
+ return vma;
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1d17fb1ec863..d3f42009bb70 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2434,6 +2434,7 @@ int write_cache_pages(struct address_space *mapping,
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
+ unsigned long nr;
done_index = folio->index;
@@ -2471,6 +2472,7 @@ continue_unlock:
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
error = writepage(folio, wbc, data);
+ nr = folio_nr_pages(folio);
if (unlikely(error)) {
/*
* Handle errors according to the type of
@@ -2489,8 +2491,7 @@ continue_unlock:
error = 0;
} else if (wbc->sync_mode != WB_SYNC_ALL) {
ret = error;
- done_index = folio->index +
- folio_nr_pages(folio);
+ done_index = folio->index + nr;
done = 1;
break;
}
@@ -2504,7 +2505,8 @@ continue_unlock:
* keep going until we have written all the pages
* we tagged for writeback prior to entering this loop.
*/
- if (--wbc->nr_to_write <= 0 &&
+ wbc->nr_to_write -= nr;
+ if (wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
diff --git a/mm/slab.h b/mm/slab.h
index 6a5633b25eb5..9c0e09d0f81f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -723,6 +723,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
unsigned int orig_size)
{
unsigned int zero_size = s->object_size;
+ bool kasan_init = init;
size_t i;
flags &= gfp_allowed_mask;
@@ -740,6 +741,17 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
zero_size = orig_size;
/*
+ * When slub_debug is enabled, avoid memory initialization integrated
+ * into KASAN and instead zero out the memory via the memset below with
+ * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
+ * cause false-positive reports. This does not lead to a performance
+ * penalty on production builds, as slub_debug is not intended to be
+ * enabled there.
+ */
+ if (__slub_debug_enabled())
+ kasan_init = false;
+
+ /*
* As memory initialization might be integrated into KASAN,
* kasan_slab_alloc and initialization memset must be
* kept together to avoid discrepancies in behavior.
@@ -747,8 +759,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* As p[i] might get tagged, memset and kmemleak hook come after KASAN.
*/
for (i = 0; i < size; i++) {
- p[i] = kasan_slab_alloc(s, p[i], flags, init);
- if (p[i] && init && !kasan_has_integrated_init())
+ p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
+ if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
memset(p[i], 0, zero_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 1ef952bda97d..056f9516e46d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -775,6 +775,11 @@ static void le_conn_timeout(struct work_struct *work)
hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
}
+struct iso_cig_params {
+ struct hci_cp_le_set_cig_params cp;
+ struct hci_cis_params cis[0x1f];
+};
+
struct iso_list_data {
union {
u8 cig;
@@ -786,10 +791,7 @@ struct iso_list_data {
u16 sync_handle;
};
int count;
- struct {
- struct hci_cp_le_set_cig_params cp;
- struct hci_cis_params cis[0x11];
- } pdu;
+ struct iso_cig_params pdu;
};
static void bis_list(struct hci_conn *conn, void *data)
@@ -1764,10 +1766,33 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
}
+static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct iso_cig_params *pdu = data;
+
+ bt_dev_dbg(hdev, "");
+
+ if (err)
+ bt_dev_err(hdev, "Unable to set CIG parameters: %d", err);
+
+ kfree(pdu);
+}
+
+static int set_cig_params_sync(struct hci_dev *hdev, void *data)
+{
+ struct iso_cig_params *pdu = data;
+ u32 plen;
+
+ plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]);
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu,
+ HCI_CMD_TIMEOUT);
+}
+
static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
{
struct hci_dev *hdev = conn->hdev;
struct iso_list_data data;
+ struct iso_cig_params *pdu;
memset(&data, 0, sizeof(data));
@@ -1837,11 +1862,15 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
return false;
- if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
- sizeof(data.pdu.cp) +
- (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
- &data.pdu) < 0)
+ pdu = kmemdup(&data.pdu, sizeof(*pdu), GFP_KERNEL);
+ if (!pdu)
+ return false;
+
+ if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu,
+ set_cig_params_complete) < 0) {
+ kfree(pdu);
return false;
+ }
return true;
}
@@ -2044,10 +2073,10 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
flags |= MGMT_ADV_FLAG_SEC_2M;
/* Align intervals */
- interval = qos->bcast.out.interval / 1250;
+ interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
if (qos->bcast.bis)
- sync_interval = qos->bcast.sync_interval * 1600;
+ sync_interval = interval * 4;
err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
conn->le_per_adv_data, flags, interval,
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 09ba6d8987ee..95816a938cea 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3812,7 +3812,8 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
- if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) {
+ if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
+ rp->cig_id != cp->cig_id)) {
bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
status = HCI_ERROR_UNSPECIFIED;
}
@@ -6316,23 +6317,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
return;
}
- /* When receiving non-connectable or scannable undirected
- * advertising reports, this means that the remote device is
- * not connectable and then clearly indicate this in the
- * device found event.
- *
- * When receiving a scan response, then there is no way to
+ /* When receiving a scan response, then there is no way to
* know if the remote device is connectable or not. However
* since scan responses are merged with a previously seen
* advertising report, the flags field from that report
* will be used.
*
- * In the really unlikely case that a controller get confused
- * and just sends a scan response event, then it is marked as
- * not connectable as well.
+ * In the unlikely case that a controller just sends a scan
+ * response event that doesn't match the pending report, then
+ * it is marked as a standalone SCAN_RSP.
*/
if (type == LE_ADV_SCAN_RSP)
- flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
+ flags = MGMT_DEV_FOUND_SCAN_RSP;
/* If there's nothing pending either store the data from this
* event or send an immediate device found event if the data
@@ -6790,6 +6786,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
{
struct hci_evt_le_cis_established *ev = data;
struct hci_conn *conn;
+ struct bt_iso_qos *qos;
u16 handle = __le16_to_cpu(ev->handle);
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6811,21 +6808,39 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
- if (conn->role == HCI_ROLE_SLAVE) {
- __le32 interval;
-
- memset(&interval, 0, sizeof(interval));
-
- memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
- conn->iso_qos.ucast.in.interval = le32_to_cpu(interval);
- memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
- conn->iso_qos.ucast.out.interval = le32_to_cpu(interval);
- conn->iso_qos.ucast.in.latency = le16_to_cpu(ev->interval);
- conn->iso_qos.ucast.out.latency = le16_to_cpu(ev->interval);
- conn->iso_qos.ucast.in.sdu = le16_to_cpu(ev->c_mtu);
- conn->iso_qos.ucast.out.sdu = le16_to_cpu(ev->p_mtu);
- conn->iso_qos.ucast.in.phy = ev->c_phy;
- conn->iso_qos.ucast.out.phy = ev->p_phy;
+ qos = &conn->iso_qos;
+
+ /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
+ qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
+ qos->ucast.out.interval = qos->ucast.in.interval;
+
+ switch (conn->role) {
+ case HCI_ROLE_SLAVE:
+ /* Convert Transport Latency (us) to Latency (msec) */
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+ 1000);
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+ qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
+ qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.in.phy = ev->c_phy;
+ qos->ucast.out.phy = ev->p_phy;
+ break;
+ case HCI_ROLE_MASTER:
+ /* Convert Transport Latency (us) to Latency (msec) */
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+ 1000);
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+ qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
+ qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.out.phy = ev->c_phy;
+ qos->ucast.in.phy = ev->p_phy;
+ break;
}
if (!ev->status) {
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 804cde43b4e0..8561616abbe5 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -4623,26 +4623,18 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
* BD_ADDR invalid before creating the HCI device or in
* its setup callback.
*/
- invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
-
+ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
+ test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (!ret) {
- if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
- if (!bacmp(&hdev->public_addr, BDADDR_ANY))
- hci_dev_get_bd_addr_from_property(hdev);
-
- if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr) {
- ret = hdev->set_bdaddr(hdev,
- &hdev->public_addr);
-
- /* If setting of the BD_ADDR from the device
- * property succeeds, then treat the address
- * as valid even if the invalid BD_ADDR
- * quirk indicates otherwise.
- */
- if (!ret)
- invalid_bdaddr = false;
- }
+ if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
+ !bacmp(&hdev->public_addr, BDADDR_ANY))
+ hci_dev_get_bd_addr_from_property(hdev);
+
+ if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr) {
+ ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
+ if (!ret)
+ invalid_bdaddr = false;
}
}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 2934d7f4d564..15b33579007c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -6,7 +6,9 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-static struct class *bt_class;
+static const struct class bt_class = {
+ .name = "bluetooth",
+};
static void bt_link_release(struct device *dev)
{
@@ -36,7 +38,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
BT_DBG("conn %p", conn);
conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
+ conn->dev.class = &bt_class;
conn->dev.parent = &hdev->dev;
device_initialize(&conn->dev);
@@ -104,7 +106,7 @@ void hci_init_sysfs(struct hci_dev *hdev)
struct device *dev = &hdev->dev;
dev->type = &bt_host;
- dev->class = bt_class;
+ dev->class = &bt_class;
__module_get(THIS_MODULE);
device_initialize(dev);
@@ -112,12 +114,10 @@ void hci_init_sysfs(struct hci_dev *hdev)
int __init bt_sysfs_init(void)
{
- bt_class = class_create("bluetooth");
-
- return PTR_ERR_OR_ZERO(bt_class);
+ return class_register(&bt_class);
}
void bt_sysfs_cleanup(void)
{
- class_destroy(bt_class);
+ class_unregister(&bt_class);
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 34d55a85d8f6..0e6cc57b3911 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -704,7 +704,7 @@ static struct bt_iso_qos default_qos = {
.bcast = {
.big = BT_ISO_QOS_BIG_UNSET,
.bis = BT_ISO_QOS_BIS_UNSET,
- .sync_interval = 0x00,
+ .sync_factor = 0x01,
.packing = 0x00,
.framing = 0x00,
.in = DEFAULT_IO_QOS,
@@ -1213,7 +1213,7 @@ static bool check_ucast_qos(struct bt_iso_qos *qos)
static bool check_bcast_qos(struct bt_iso_qos *qos)
{
- if (qos->bcast.sync_interval > 0x07)
+ if (qos->bcast.sync_factor == 0x00)
return false;
if (qos->bcast.packing > 0x01)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index c5e8798e297c..17ca13e8c044 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6374,9 +6374,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
if (!chan)
goto done;
+ chan = l2cap_chan_hold_unless_zero(chan);
+ if (!chan)
+ goto done;
+
l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNREFUSED);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
done:
mutex_unlock(&conn->chan_lock);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index eebe256104bc..947ca580bb9a 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -46,6 +46,7 @@ static const struct proto_ops l2cap_sock_ops;
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio, int kern);
+static void l2cap_sock_cleanup_listen(struct sock *parent);
bool l2cap_is_socket(struct socket *sock)
{
@@ -1415,6 +1416,7 @@ static int l2cap_sock_release(struct socket *sock)
if (!sk)
return 0;
+ l2cap_sock_cleanup_listen(sk);
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, SHUT_RDWR);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 3f04b40f6056..2450690f98cf 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -166,8 +166,9 @@ void br_manage_promisc(struct net_bridge *br)
* This lets us disable promiscuous mode and write
* this config to hw.
*/
- if (br->auto_cnt == 0 ||
- (br->auto_cnt == 1 && br_auto_port(p)))
+ if ((p->dev->priv_flags & IFF_UNICAST_FLT) &&
+ (br->auto_cnt == 0 ||
+ (br->auto_cnt == 1 && br_auto_port(p))))
br_port_clear_promisc(p);
else
br_port_set_promisc(p);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index a5f3b73da417..ade3eeb2f3e6 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -58,11 +58,8 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
-#define SJA1105_HWTS_RX_EN 0
-
struct sja1105_tagger_private {
struct sja1105_tagger_data data; /* Must be first */
- unsigned long state;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
*/
@@ -118,8 +115,8 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
* a unified unpacking command for both device series.
*/
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
- packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
- packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
@@ -392,10 +389,6 @@ static struct sk_buff
priv = sja1105_tagger_private(ds);
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
- /* Do normal processing. */
- return skb;
-
spin_lock(&priv->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
@@ -431,12 +424,6 @@ static struct sk_buff
priv = sja1105_tagger_private(ds);
- /* Drop the meta frame if we're not in the right state
- * to process it.
- */
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
- return NULL;
-
spin_lock(&priv->meta_lock);
stampable_skb = priv->stampable_skb;
@@ -472,30 +459,6 @@ static struct sk_buff
return skb;
}
-static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
-{
- struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
-
- return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
-}
-
-static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
-{
- struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
-
- if (on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->state);
- else
- clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
-
- /* Initialize the meta state machine to a known state */
- if (!priv->stampable_skb)
- return;
-
- kfree_skb(priv->stampable_skb);
- priv->stampable_skb = NULL;
-}
-
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
@@ -545,33 +508,53 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
- if (sja1105_skb_has_tag_8021q(skb)) {
- /* Normal traffic path. */
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
- } else if (is_link_local) {
+ if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.
*/
source_port = hdr->h_dest[3];
switch_id = hdr->h_dest[4];
- /* Clear the DMAC bytes that were mangled by the switch */
- hdr->h_dest[3] = 0;
- hdr->h_dest[4] = 0;
} else if (is_meta) {
sja1105_meta_unpack(skb, &meta);
source_port = meta.source_port;
switch_id = meta.switch_id;
- } else {
+ }
+
+ /* Normal data plane traffic and link-local frames are tagged with
+ * a tag_8021q VLAN which we have to strip
+ */
+ if (sja1105_skb_has_tag_8021q(skb)) {
+ int tmp_source_port = -1, tmp_switch_id = -1;
+
+ sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
+ &vid);
+ /* Preserve the source information from the INCL_SRCPT option,
+ * if available. This allows us to not overwrite a valid source
+ * port and switch ID with zeroes when receiving link-local
+ * frames from a VLAN-unaware bridged port (non-zero vbid) or a
+ * VLAN-aware bridged port (non-zero vid). Furthermore, the
+ * tag_8021q source port information is only of trust when the
+ * vbid is 0 (precise port). Otherwise, tmp_source_port and
+ * tmp_switch_id will be zeroes.
+ */
+ if (vbid == 0 && source_port == -1)
+ source_port = tmp_source_port;
+ if (vbid == 0 && switch_id == -1)
+ switch_id = tmp_switch_id;
+ } else if (source_port == -1 && switch_id == -1) {
+ /* Packets with no source information have no chance of
+ * getting accepted, drop them straight away.
+ */
return NULL;
}
- if (vbid >= 1)
+ if (source_port != -1 && switch_id != -1)
+ skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ else if (vbid >= 1)
skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
else
- skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -762,7 +745,6 @@ static void sja1105_disconnect(struct dsa_switch *ds)
static int sja1105_connect(struct dsa_switch *ds)
{
- struct sja1105_tagger_data *tagger_data;
struct sja1105_tagger_private *priv;
struct kthread_worker *xmit_worker;
int err;
@@ -782,10 +764,6 @@ static int sja1105_connect(struct dsa_switch *ds)
}
priv->xmit_worker = xmit_worker;
- /* Export functions for switch driver use */
- tagger_data = &priv->data;
- tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
- tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
ds->tagger_data = priv;
return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6f072095211e..57c8af1859c1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3590,8 +3590,11 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
u32 *last_oow_ack_time)
{
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
+ /* Paired with the WRITE_ONCE() in this function. */
+ u32 val = READ_ONCE(*last_oow_ack_time);
+
+ if (val) {
+ s32 elapsed = (s32)(tcp_jiffies32 - val);
if (0 <= elapsed &&
elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
@@ -3600,7 +3603,10 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
}
}
- *last_oow_ack_time = tcp_jiffies32;
+ /* Paired with the prior READ_ONCE() and with itself,
+ * as we might be lockless.
+ */
+ WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
return false; /* not rate-limited: go ahead, send dupack now! */
}
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 6de8d0ad5497..2dc732147e85 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -282,7 +282,7 @@ static void tpt_trig_timer(struct timer_list *t)
}
}
- led_trigger_blink(&local->tpt_led, &on, &off);
+ led_trigger_blink(&local->tpt_led, on, off);
}
const char *
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index b71a1428d883..d25f13346b82 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -13,22 +13,18 @@
static inline void ieee80211_led_rx(struct ieee80211_local *local)
{
#ifdef CONFIG_MAC80211_LEDS
- unsigned long led_delay = MAC80211_BLINK_DELAY;
-
if (!atomic_read(&local->rx_led_active))
return;
- led_trigger_blink_oneshot(&local->rx_led, &led_delay, &led_delay, 0);
+ led_trigger_blink_oneshot(&local->rx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0);
#endif
}
static inline void ieee80211_led_tx(struct ieee80211_local *local)
{
#ifdef CONFIG_MAC80211_LEDS
- unsigned long led_delay = MAC80211_BLINK_DELAY;
-
if (!atomic_read(&local->tx_led_active))
return;
- led_trigger_blink_oneshot(&local->tx_led, &led_delay, &led_delay, 0);
+ led_trigger_blink_oneshot(&local->tx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0);
#endif
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e892673deb73..3613489eb6e3 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2909,10 +2909,10 @@ static void mptcp_check_listen_stop(struct sock *sk)
return;
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ tcp_set_state(ssk, TCP_CLOSE);
mptcp_subflow_queue_clean(sk, ssk);
inet_csk_listen_stop(ssk);
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
- tcp_set_state(ssk, TCP_CLOSE);
release_sock(ssk);
}
@@ -3703,6 +3703,11 @@ static int mptcp_listen(struct socket *sock, int backlog)
pr_debug("msk=%p", msk);
lock_sock(sk);
+
+ err = -EINVAL;
+ if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
+ goto unlock;
+
ssock = __mptcp_nmpc_socket(msk);
if (IS_ERR(ssock)) {
err = PTR_ERR(ssock);
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 66b0f941d8fb..36c9720ad8d6 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -43,7 +43,6 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
- unsigned long led_delay = XT_LED_BLINK_DELAY;
/*
* If "always blink" is enabled, and there's still some time until the
@@ -52,7 +51,7 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
timer_pending(&ledinternal->timer))
led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger,
- &led_delay, &led_delay, 1);
+ XT_LED_BLINK_DELAY, XT_LED_BLINK_DELAY, 1);
else
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 5d96ffebd40f..598d6e299152 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -21,6 +21,7 @@
#include <linux/tc_act/tc_ipt.h>
#include <net/tc_act/tc_ipt.h>
#include <net/tc_wrapper.h>
+#include <net/ip.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -48,7 +49,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
par.entryinfo = &e;
par.target = target;
par.targinfo = t->data;
- par.hook_mask = hook;
+ par.hook_mask = 1 << hook;
par.family = NFPROTO_IPV4;
ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
@@ -85,7 +86,8 @@ static void tcf_ipt_release(struct tc_action *a)
static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
[TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
- [TCA_IPT_HOOK] = { .type = NLA_U32 },
+ [TCA_IPT_HOOK] = NLA_POLICY_RANGE(NLA_U32, NF_INET_PRE_ROUTING,
+ NF_INET_NUMHOOKS),
[TCA_IPT_INDEX] = { .type = NLA_U32 },
[TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
};
@@ -158,15 +160,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
return -EEXIST;
}
}
+
+ err = -EINVAL;
hook = nla_get_u32(tb[TCA_IPT_HOOK]);
+ switch (hook) {
+ case NF_INET_PRE_ROUTING:
+ break;
+ case NF_INET_POST_ROUTING:
+ break;
+ default:
+ goto err1;
+ }
+
+ if (tb[TCA_IPT_TABLE]) {
+ /* mangle only for now */
+ if (nla_strcmp(tb[TCA_IPT_TABLE], "mangle"))
+ goto err1;
+ }
- err = -ENOMEM;
- tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
+ tname = kstrdup("mangle", GFP_KERNEL);
if (unlikely(!tname))
goto err1;
- if (tb[TCA_IPT_TABLE] == NULL ||
- nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
- strcpy(tname, "mangle");
t = kmemdup(td, td->u.target_size, GFP_KERNEL);
if (unlikely(!t))
@@ -217,10 +231,31 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla,
a, &act_xt_ops, tp, flags);
}
+static bool tcf_ipt_act_check(struct sk_buff *skb)
+{
+ const struct iphdr *iph;
+ unsigned int nhoff, len;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return false;
+
+ nhoff = skb_network_offset(skb);
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return false;
+
+ len = skb_ip_totlen(skb);
+ if (skb->len < nhoff + len || len < (iph->ihl * 4u))
+ return false;
+
+ return pskb_may_pull(skb, iph->ihl * 4u);
+}
+
TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb,
const struct tc_action *a,
struct tcf_result *res)
{
+ char saved_cb[sizeof_field(struct sk_buff, cb)];
int ret = 0, result = 0;
struct tcf_ipt *ipt = to_ipt(a);
struct xt_action_param par;
@@ -231,9 +266,24 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb,
.pf = NFPROTO_IPV4,
};
+ if (skb_protocol(skb, false) != htons(ETH_P_IP))
+ return TC_ACT_UNSPEC;
+
if (skb_unclone(skb, GFP_ATOMIC))
return TC_ACT_UNSPEC;
+ if (!tcf_ipt_act_check(skb))
+ return TC_ACT_UNSPEC;
+
+ if (state.hook == NF_INET_POST_ROUTING) {
+ if (!skb_dst(skb))
+ return TC_ACT_UNSPEC;
+
+ state.out = skb->dev;
+ }
+
+ memcpy(saved_cb, skb->cb, sizeof(saved_cb));
+
spin_lock(&ipt->tcf_lock);
tcf_lastuse_update(&ipt->tcf_tm);
@@ -246,6 +296,9 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb,
par.state = &state;
par.target = ipt->tcfi_t->u.kernel.target;
par.targinfo = ipt->tcfi_t->data;
+
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
ret = par.target->target(skb, &par);
switch (ret) {
@@ -266,6 +319,9 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb,
break;
}
spin_unlock(&ipt->tcf_lock);
+
+ memcpy(skb->cb, saved_cb, sizeof(skb->cb));
+
return result;
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b562fc2bb5b1..1ef8fcfa9997 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -29,6 +29,7 @@ static struct tc_action_ops act_pedit_ops;
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
+ [TCA_PEDIT_PARMS_EX] = { .len = sizeof(struct tc_pedit) },
[TCA_PEDIT_KEYS_EX] = { .type = NLA_NESTED },
};
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6554a357fe33..9388d98aebc0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -364,9 +364,9 @@ static void sctp_auto_asconf_init(struct sctp_sock *sp)
struct net *net = sock_net(&sp->inet.sk);
if (net->sctp.default_auto_asconf) {
- spin_lock(&net->sctp.addr_wq_lock);
+ spin_lock_bh(&net->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
- spin_unlock(&net->sctp.addr_wq_lock);
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
sp->do_auto_asconf = 1;
}
}
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 1c8de397d6ad..f89c10fe7e6a 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
- auth.o auth_null.o auth_unix.o \
+ auth.o auth_null.o auth_tls.o auth_unix.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
addr.o rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o sysfs.o \
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index fb75a883503f..2f16f9d17966 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -32,7 +32,7 @@ static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS;
static const struct rpc_authops __rcu *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
[RPC_AUTH_NULL] = (const struct rpc_authops __force __rcu *)&authnull_ops,
[RPC_AUTH_UNIX] = (const struct rpc_authops __force __rcu *)&authunix_ops,
- NULL, /* others can be loadable modules */
+ [RPC_AUTH_TLS] = (const struct rpc_authops __force __rcu *)&authtls_ops,
};
static LIST_HEAD(cred_unused);
diff --git a/net/sunrpc/auth_tls.c b/net/sunrpc/auth_tls.c
new file mode 100644
index 000000000000..de7678f8a23d
--- /dev/null
+++ b/net/sunrpc/auth_tls.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, 2022 Oracle. All rights reserved.
+ *
+ * The AUTH_TLS credential is used only to probe a remote peer
+ * for RPC-over-TLS support.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/sunrpc/clnt.h>
+
+static const char *starttls_token = "STARTTLS";
+static const size_t starttls_len = 8;
+
+static struct rpc_auth tls_auth;
+static struct rpc_cred tls_cred;
+
+static void tls_encode_probe(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *obj)
+{
+}
+
+static int tls_decode_probe(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *obj)
+{
+ return 0;
+}
+
+static const struct rpc_procinfo rpcproc_tls_probe = {
+ .p_encode = tls_encode_probe,
+ .p_decode = tls_decode_probe,
+};
+
+static void rpc_tls_probe_call_prepare(struct rpc_task *task, void *data)
+{
+ task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
+ rpc_call_start(task);
+}
+
+static void rpc_tls_probe_call_done(struct rpc_task *task, void *data)
+{
+}
+
+static const struct rpc_call_ops rpc_tls_probe_ops = {
+ .rpc_call_prepare = rpc_tls_probe_call_prepare,
+ .rpc_call_done = rpc_tls_probe_call_done,
+};
+
+static int tls_probe(struct rpc_clnt *clnt)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &rpcproc_tls_probe,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .rpc_op_cred = &tls_cred,
+ .callback_ops = &rpc_tls_probe_ops,
+ .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
+ };
+ struct rpc_task *task;
+ int status;
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = task->tk_status;
+ rpc_put_task(task);
+ return status;
+}
+
+static struct rpc_auth *tls_create(const struct rpc_auth_create_args *args,
+ struct rpc_clnt *clnt)
+{
+ refcount_inc(&tls_auth.au_count);
+ return &tls_auth;
+}
+
+static void tls_destroy(struct rpc_auth *auth)
+{
+}
+
+static struct rpc_cred *tls_lookup_cred(struct rpc_auth *auth,
+ struct auth_cred *acred, int flags)
+{
+ return get_rpccred(&tls_cred);
+}
+
+static void tls_destroy_cred(struct rpc_cred *cred)
+{
+}
+
+static int tls_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags)
+{
+ return 1;
+}
+
+static int tls_marshal(struct rpc_task *task, struct xdr_stream *xdr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4 * XDR_UNIT);
+ if (!p)
+ return -EMSGSIZE;
+ /* Credential */
+ *p++ = rpc_auth_tls;
+ *p++ = xdr_zero;
+ /* Verifier */
+ *p++ = rpc_auth_null;
+ *p = xdr_zero;
+ return 0;
+}
+
+static int tls_refresh(struct rpc_task *task)
+{
+ set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
+ return 0;
+}
+
+static int tls_validate(struct rpc_task *task, struct xdr_stream *xdr)
+{
+ __be32 *p;
+ void *str;
+
+ p = xdr_inline_decode(xdr, XDR_UNIT);
+ if (!p)
+ return -EIO;
+ if (*p != rpc_auth_null)
+ return -EIO;
+ if (xdr_stream_decode_opaque_inline(xdr, &str, starttls_len) != starttls_len)
+ return -EIO;
+ if (memcmp(str, starttls_token, starttls_len))
+ return -EIO;
+ return 0;
+}
+
+const struct rpc_authops authtls_ops = {
+ .owner = THIS_MODULE,
+ .au_flavor = RPC_AUTH_TLS,
+ .au_name = "NULL",
+ .create = tls_create,
+ .destroy = tls_destroy,
+ .lookup_cred = tls_lookup_cred,
+ .ping = tls_probe,
+};
+
+static struct rpc_auth tls_auth = {
+ .au_cslack = NUL_CALLSLACK,
+ .au_rslack = NUL_REPLYSLACK,
+ .au_verfsize = NUL_REPLYSLACK,
+ .au_ralign = NUL_REPLYSLACK,
+ .au_ops = &authtls_ops,
+ .au_flavor = RPC_AUTH_TLS,
+ .au_count = REFCOUNT_INIT(1),
+};
+
+static const struct rpc_credops tls_credops = {
+ .cr_name = "AUTH_TLS",
+ .crdestroy = tls_destroy_cred,
+ .crmatch = tls_match,
+ .crmarshal = tls_marshal,
+ .crwrap_req = rpcauth_wrap_req_encode,
+ .crrefresh = tls_refresh,
+ .crvalidate = tls_validate,
+ .crunwrap_resp = rpcauth_unwrap_resp_decode,
+};
+
+static struct rpc_cred tls_cred = {
+ .cr_lru = LIST_HEAD_INIT(tls_cred.cr_lru),
+ .cr_auth = &tls_auth,
+ .cr_ops = &tls_credops,
+ .cr_count = REFCOUNT_INIT(2),
+ .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE,
+};
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d2ee56634308..d7c697af3762 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -385,6 +385,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
if (!clnt)
goto out_err;
clnt->cl_parent = parent ? : clnt;
+ clnt->cl_xprtsec = args->xprtsec;
err = rpc_alloc_clid(clnt);
if (err)
@@ -434,7 +435,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
if (parent)
refcount_inc(&parent->cl_count);
- trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
+ trace_rpc_clnt_new(clnt, xprt, args);
return clnt;
out_no_path:
@@ -532,6 +533,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.addrlen = args->addrsize,
.servername = args->servername,
.bc_xprt = args->bc_xprt,
+ .xprtsec = args->xprtsec,
};
char servername[48];
struct rpc_clnt *clnt;
@@ -565,8 +567,12 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
servername[0] = '\0';
switch (args->address->sa_family) {
case AF_LOCAL:
- snprintf(servername, sizeof(servername), "%s",
- sun->sun_path);
+ if (sun->sun_path[0])
+ snprintf(servername, sizeof(servername), "%s",
+ sun->sun_path);
+ else
+ snprintf(servername, sizeof(servername), "@%s",
+ sun->sun_path+1);
break;
case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
@@ -727,6 +733,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
struct rpc_clnt *parent;
int err;
+ args->xprtsec = clnt->cl_xprtsec;
xprt = xprt_create_transport(args);
if (IS_ERR(xprt))
return PTR_ERR(xprt);
@@ -1717,6 +1724,11 @@ call_start(struct rpc_task *task)
trace_rpc_request(task);
+ if (task->tk_client->cl_shutdown) {
+ rpc_call_rpcerror(task, -EIO);
+ return;
+ }
+
/* Increment call count (version might not be valid for ping) */
if (clnt->cl_program->version[clnt->cl_vers])
clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
@@ -2826,6 +2838,9 @@ static int rpc_ping(struct rpc_clnt *clnt)
struct rpc_task *task;
int status;
+ if (clnt->cl_auth->au_ops->ping)
+ return clnt->cl_auth->au_ops->ping(clnt);
+
task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -3046,6 +3061,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
if (!xprtargs->ident)
xprtargs->ident = ident;
+ xprtargs->xprtsec = clnt->cl_xprtsec;
xprt = xprt_create_transport(xprtargs);
if (IS_ERR(xprt)) {
ret = PTR_ERR(xprt);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 5a8e6d46809a..5988a5c5ff3f 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -36,6 +36,7 @@
#include "netns.h"
#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
+#define RPCBIND_SOCK_ABSTRACT_NAME "\0/run/rpcbind.sock"
#define RPCBIND_PROGRAM (100000u)
#define RPCBIND_PORT (111u)
@@ -216,21 +217,22 @@ static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
sn->rpcb_users = 1;
}
+/* Evaluate to actual length of the `sockaddr_un' structure. */
+# define SUN_LEN(ptr) (offsetof(struct sockaddr_un, sun_path) \
+ + 1 + strlen((ptr)->sun_path + 1))
+
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
-static int rpcb_create_local_unix(struct net *net)
+static int rpcb_create_af_local(struct net *net,
+ const struct sockaddr_un *addr)
{
- static const struct sockaddr_un rpcb_localaddr_rpcbind = {
- .sun_family = AF_LOCAL,
- .sun_path = RPCBIND_SOCK_PATHNAME,
- };
struct rpc_create_args args = {
.net = net,
.protocol = XPRT_TRANSPORT_LOCAL,
- .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
- .addrsize = sizeof(rpcb_localaddr_rpcbind),
+ .address = (struct sockaddr *)addr,
+ .addrsize = SUN_LEN(addr),
.servername = "localhost",
.program = &rpcb_program,
.version = RPCBVERS_2,
@@ -269,6 +271,26 @@ out:
return result;
}
+static int rpcb_create_local_abstract(struct net *net)
+{
+ static const struct sockaddr_un rpcb_localaddr_abstract = {
+ .sun_family = AF_LOCAL,
+ .sun_path = RPCBIND_SOCK_ABSTRACT_NAME,
+ };
+
+ return rpcb_create_af_local(net, &rpcb_localaddr_abstract);
+}
+
+static int rpcb_create_local_unix(struct net *net)
+{
+ static const struct sockaddr_un rpcb_localaddr_unix = {
+ .sun_family = AF_LOCAL,
+ .sun_path = RPCBIND_SOCK_PATHNAME,
+ };
+
+ return rpcb_create_af_local(net, &rpcb_localaddr_unix);
+}
+
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
@@ -332,7 +354,8 @@ int rpcb_create_local(struct net *net)
if (rpcb_get_local(net))
goto out;
- if (rpcb_create_local_unix(net) != 0)
+ if (rpcb_create_local_abstract(net) != 0 &&
+ rpcb_create_local_unix(net) != 0)
result = rpcb_create_local_net(net);
out:
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 0d0db4e1064e..5c8ecdaaa985 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -239,6 +239,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
if (!xprt)
return 0;
if (!(xprt->xprt_class->ident == XPRT_TRANSPORT_TCP ||
+ xprt->xprt_class->ident == XPRT_TRANSPORT_TCP_TLS ||
xprt->xprt_class->ident == XPRT_TRANSPORT_RDMA)) {
xprt_put(xprt);
return -EOPNOTSUPP;
diff --git a/net/sunrpc/sysfs.h b/net/sunrpc/sysfs.h
index 6620cebd1037..d2dd77a0a0e9 100644
--- a/net/sunrpc/sysfs.h
+++ b/net/sunrpc/sysfs.h
@@ -5,13 +5,6 @@
#ifndef __SUNRPC_SYSFS_H
#define __SUNRPC_SYSFS_H
-struct rpc_sysfs_client {
- struct kobject kobject;
- struct net *net;
- struct rpc_clnt *clnt;
- struct rpc_xprt_switch *xprt_switch;
-};
-
struct rpc_sysfs_xprt_switch {
struct kobject kobject;
struct net *net;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 5f9030b81c9e..9f010369100a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -47,6 +47,9 @@
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>
+#include <net/tls.h>
+#include <net/handshake.h>
+
#include <linux/bvec.h>
#include <linux/highmem.h>
#include <linux/uio.h>
@@ -96,6 +99,7 @@ static struct ctl_table_header *sunrpc_table_header;
static struct xprt_class xs_local_transport;
static struct xprt_class xs_udp_transport;
static struct xprt_class xs_tcp_transport;
+static struct xprt_class xs_tcp_tls_transport;
static struct xprt_class xs_bc_tcp_transport;
/*
@@ -187,6 +191,11 @@ static struct ctl_table xs_tunables_table[] = {
*/
#define XS_IDLE_DISC_TO (5U * 60 * HZ)
+/*
+ * TLS handshake timeout.
+ */
+#define XS_TLS_HANDSHAKE_TO (10U * HZ)
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# undef RPC_DEBUG_DATA
# define RPCDBG_FACILITY RPCDBG_TRANS
@@ -253,7 +262,12 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
switch (sap->sa_family) {
case AF_LOCAL:
sun = xs_addr_un(xprt);
- strscpy(buf, sun->sun_path, sizeof(buf));
+ if (sun->sun_path[0]) {
+ strscpy(buf, sun->sun_path, sizeof(buf));
+ } else {
+ buf[0] = '@';
+ strscpy(buf+1, sun->sun_path+1, sizeof(buf)-1);
+ }
xprt->address_strings[RPC_DISPLAY_ADDR] =
kstrdup(buf, GFP_KERNEL);
break;
@@ -342,13 +356,56 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
return want;
}
+static int
+xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ struct cmsghdr *cmsg, int ret)
+{
+ if (cmsg->cmsg_level == SOL_TLS &&
+ cmsg->cmsg_type == TLS_GET_RECORD_TYPE) {
+ u8 content_type = *((u8 *)CMSG_DATA(cmsg));
+
+ switch (content_type) {
+ case TLS_RECORD_TYPE_DATA:
+ /* TLS sets EOR at the end of each application data
+ * record, even though there might be more frames
+ * waiting to be decrypted.
+ */
+ msg->msg_flags &= ~MSG_EOR;
+ break;
+ case TLS_RECORD_TYPE_ALERT:
+ ret = -ENOTCONN;
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ }
+ return ret;
+}
+
+static int
+xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
+{
+ union {
+ struct cmsghdr cmsg;
+ u8 buf[CMSG_SPACE(sizeof(u8))];
+ } u;
+ int ret;
+
+ msg->msg_control = &u;
+ msg->msg_controllen = sizeof(u);
+ ret = sock_recvmsg(sock, msg, flags);
+ if (msg->msg_controllen != sizeof(u))
+ ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
+ return ret;
+}
+
static ssize_t
xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
{
ssize_t ret;
if (seek != 0)
iov_iter_advance(&msg->msg_iter, seek);
- ret = sock_recvmsg(sock, msg, flags);
+ ret = xs_sock_recv_cmsg(sock, msg, flags);
return ret > 0 ? ret + seek : ret;
}
@@ -374,7 +431,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
size_t count)
{
iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
- return sock_recvmsg(sock, msg, flags);
+ return xs_sock_recv_cmsg(sock, msg, flags);
}
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
@@ -695,6 +752,8 @@ static void xs_poll_check_readable(struct sock_xprt *transport)
{
clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+ if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state))
+ return;
if (!xs_poll_socket_readable(transport))
return;
if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
@@ -1191,6 +1250,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
if (atomic_read(&transport->xprt.swapper))
sk_clear_memalloc(sk);
+ tls_handshake_cancel(sk);
+
kernel_sock_shutdown(sock, SHUT_RDWR);
mutex_lock(&transport->recv_mutex);
@@ -1380,6 +1441,10 @@ static void xs_data_ready(struct sock *sk)
trace_xs_data_ready(xprt);
transport->old_data_ready(sk);
+
+ if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state))
+ return;
+
/* Any data means we had a useful conversation, so
* then we don't need to delay the next reconnect
*/
@@ -2360,6 +2425,267 @@ out_unlock:
current_restore_flags(pflags, PF_MEMALLOC);
}
+/*
+ * Transfer the connected socket to @upper_transport, then mark that
+ * xprt CONNECTED.
+ */
+static int xs_tcp_tls_finish_connecting(struct rpc_xprt *lower_xprt,
+ struct sock_xprt *upper_transport)
+{
+ struct sock_xprt *lower_transport =
+ container_of(lower_xprt, struct sock_xprt, xprt);
+ struct rpc_xprt *upper_xprt = &upper_transport->xprt;
+
+ if (!upper_transport->inet) {
+ struct socket *sock = lower_transport->sock;
+ struct sock *sk = sock->sk;
+
+ /* Avoid temporary address, they are bad for long-lived
+ * connections such as NFS mounts.
+ * RFC4941, section 3.6 suggests that:
+ * Individual applications, which have specific
+ * knowledge about the normal duration of connections,
+ * MAY override this as appropriate.
+ */
+ if (xs_addr(upper_xprt)->sa_family == PF_INET6)
+ ip6_sock_set_addr_preferences(sk, IPV6_PREFER_SRC_PUBLIC);
+
+ xs_tcp_set_socket_timeouts(upper_xprt, sock);
+ tcp_sock_set_nodelay(sk);
+
+ lock_sock(sk);
+
+ /* @sk is already connected, so it now has the RPC callbacks.
+ * Reach into @lower_transport to save the original ones.
+ */
+ upper_transport->old_data_ready = lower_transport->old_data_ready;
+ upper_transport->old_state_change = lower_transport->old_state_change;
+ upper_transport->old_write_space = lower_transport->old_write_space;
+ upper_transport->old_error_report = lower_transport->old_error_report;
+ sk->sk_user_data = upper_xprt;
+
+ /* socket options */
+ sock_reset_flag(sk, SOCK_LINGER);
+
+ xprt_clear_connected(upper_xprt);
+
+ upper_transport->sock = sock;
+ upper_transport->inet = sk;
+ upper_transport->file = lower_transport->file;
+
+ release_sock(sk);
+
+ /* Reset lower_transport before shutting down its clnt */
+ mutex_lock(&lower_transport->recv_mutex);
+ lower_transport->inet = NULL;
+ lower_transport->sock = NULL;
+ lower_transport->file = NULL;
+
+ xprt_clear_connected(lower_xprt);
+ xs_sock_reset_connection_flags(lower_xprt);
+ xs_stream_reset_connect(lower_transport);
+ mutex_unlock(&lower_transport->recv_mutex);
+ }
+
+ if (!xprt_bound(upper_xprt))
+ return -ENOTCONN;
+
+ xs_set_memalloc(upper_xprt);
+
+ if (!xprt_test_and_set_connected(upper_xprt)) {
+ upper_xprt->connect_cookie++;
+ clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state);
+ xprt_clear_connecting(upper_xprt);
+
+ upper_xprt->stat.connect_count++;
+ upper_xprt->stat.connect_time += (long)jiffies -
+ upper_xprt->stat.connect_start;
+ xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING);
+ }
+ return 0;
+}
+
+/**
+ * xs_tls_handshake_done - TLS handshake completion handler
+ * @data: address of xprt to wake
+ * @status: status of handshake
+ * @peerid: serial number of key containing the remote's identity
+ *
+ */
+static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
+{
+ struct rpc_xprt *lower_xprt = data;
+ struct sock_xprt *lower_transport =
+ container_of(lower_xprt, struct sock_xprt, xprt);
+
+ lower_transport->xprt_err = status ? -EACCES : 0;
+ complete(&lower_transport->handshake_done);
+ xprt_put(lower_xprt);
+}
+
+static int xs_tls_handshake_sync(struct rpc_xprt *lower_xprt, struct xprtsec_parms *xprtsec)
+{
+ struct sock_xprt *lower_transport =
+ container_of(lower_xprt, struct sock_xprt, xprt);
+ struct tls_handshake_args args = {
+ .ta_sock = lower_transport->sock,
+ .ta_done = xs_tls_handshake_done,
+ .ta_data = xprt_get(lower_xprt),
+ .ta_peername = lower_xprt->servername,
+ };
+ struct sock *sk = lower_transport->inet;
+ int rc;
+
+ init_completion(&lower_transport->handshake_done);
+ set_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state);
+ lower_transport->xprt_err = -ETIMEDOUT;
+ switch (xprtsec->policy) {
+ case RPC_XPRTSEC_TLS_ANON:
+ rc = tls_client_hello_anon(&args, GFP_KERNEL);
+ if (rc)
+ goto out_put_xprt;
+ break;
+ case RPC_XPRTSEC_TLS_X509:
+ args.ta_my_cert = xprtsec->cert_serial;
+ args.ta_my_privkey = xprtsec->privkey_serial;
+ rc = tls_client_hello_x509(&args, GFP_KERNEL);
+ if (rc)
+ goto out_put_xprt;
+ break;
+ default:
+ rc = -EACCES;
+ goto out_put_xprt;
+ }
+
+ rc = wait_for_completion_interruptible_timeout(&lower_transport->handshake_done,
+ XS_TLS_HANDSHAKE_TO);
+ if (rc <= 0) {
+ if (!tls_handshake_cancel(sk)) {
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ goto out_put_xprt;
+ }
+ }
+
+ rc = lower_transport->xprt_err;
+
+out:
+ xs_stream_reset_connect(lower_transport);
+ clear_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state);
+ return rc;
+
+out_put_xprt:
+ xprt_put(lower_xprt);
+ goto out;
+}
+
+/**
+ * xs_tcp_tls_setup_socket - establish a TLS session on a TCP socket
+ * @work: queued work item
+ *
+ * Invoked by a work queue tasklet.
+ *
+ * For RPC-with-TLS, there is a two-stage connection process.
+ *
+ * The "upper-layer xprt" is visible to the RPC consumer. Once it has
+ * been marked connected, the consumer knows that a TCP connection and
+ * a TLS session have been established.
+ *
+ * A "lower-layer xprt", created in this function, handles the mechanics
+ * of connecting the TCP socket, performing the RPC_AUTH_TLS probe, and
+ * then driving the TLS handshake. Once all that is complete, the upper
+ * layer xprt is marked connected.
+ */
+static void xs_tcp_tls_setup_socket(struct work_struct *work)
+{
+ struct sock_xprt *upper_transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
+ struct rpc_clnt *upper_clnt = upper_transport->clnt;
+ struct rpc_xprt *upper_xprt = &upper_transport->xprt;
+ struct rpc_create_args args = {
+ .net = upper_xprt->xprt_net,
+ .protocol = upper_xprt->prot,
+ .address = (struct sockaddr *)&upper_xprt->addr,
+ .addrsize = upper_xprt->addrlen,
+ .timeout = upper_clnt->cl_timeout,
+ .servername = upper_xprt->servername,
+ .program = upper_clnt->cl_program,
+ .prognumber = upper_clnt->cl_prog,
+ .version = upper_clnt->cl_vers,
+ .authflavor = RPC_AUTH_TLS,
+ .cred = upper_clnt->cl_cred,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ },
+ };
+ unsigned int pflags = current->flags;
+ struct rpc_clnt *lower_clnt;
+ struct rpc_xprt *lower_xprt;
+ int status;
+
+ if (atomic_read(&upper_xprt->swapper))
+ current->flags |= PF_MEMALLOC;
+
+ xs_stream_start_connect(upper_transport);
+
+ /* This implicitly sends an RPC_AUTH_TLS probe */
+ lower_clnt = rpc_create(&args);
+ if (IS_ERR(lower_clnt)) {
+ trace_rpc_tls_unavailable(upper_clnt, upper_xprt);
+ clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state);
+ xprt_clear_connecting(upper_xprt);
+ xprt_wake_pending_tasks(upper_xprt, PTR_ERR(lower_clnt));
+ xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING);
+ goto out_unlock;
+ }
+
+ /* RPC_AUTH_TLS probe was successful. Try a TLS handshake on
+ * the lower xprt.
+ */
+ rcu_read_lock();
+ lower_xprt = rcu_dereference(lower_clnt->cl_xprt);
+ rcu_read_unlock();
+ status = xs_tls_handshake_sync(lower_xprt, &upper_xprt->xprtsec);
+ if (status) {
+ trace_rpc_tls_not_started(upper_clnt, upper_xprt);
+ goto out_close;
+ }
+
+ status = xs_tcp_tls_finish_connecting(lower_xprt, upper_transport);
+ if (status)
+ goto out_close;
+
+ trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0);
+ if (!xprt_test_and_set_connected(upper_xprt)) {
+ upper_xprt->connect_cookie++;
+ clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state);
+ xprt_clear_connecting(upper_xprt);
+
+ upper_xprt->stat.connect_count++;
+ upper_xprt->stat.connect_time += (long)jiffies -
+ upper_xprt->stat.connect_start;
+ xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING);
+ }
+ rpc_shutdown_client(lower_clnt);
+
+out_unlock:
+ current_restore_flags(pflags, PF_MEMALLOC);
+ upper_transport->clnt = NULL;
+ xprt_unlock_connect(upper_xprt, upper_transport);
+ return;
+
+out_close:
+ rpc_shutdown_client(lower_clnt);
+
+ /* xprt_force_disconnect() wakes tasks with a fixed tk_status code.
+ * Wake them first here to ensure they get our tk_status code.
+ */
+ xprt_wake_pending_tasks(upper_xprt, status);
+ xs_tcp_force_close(upper_xprt);
+ xprt_clear_connecting(upper_xprt);
+ goto out_unlock;
+}
+
/**
* xs_connect - connect a socket to a remote endpoint
* @xprt: pointer to transport structure
@@ -2391,6 +2717,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
} else
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
+ transport->clnt = task->tk_client;
queue_delayed_work(xprtiod_workqueue,
&transport->connect_worker,
delay);
@@ -2858,7 +3185,7 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
switch (sun->sun_family) {
case AF_LOCAL:
- if (sun->sun_path[0] != '/') {
+ if (sun->sun_path[0] != '/' && sun->sun_path[0] != '\0') {
dprintk("RPC: bad AF_LOCAL address: %s\n",
sun->sun_path);
ret = ERR_PTR(-EINVAL);
@@ -3045,6 +3372,94 @@ out_err:
}
/**
+ * xs_setup_tcp_tls - Set up transport to use a TCP with TLS
+ * @args: rpc transport creation arguments
+ *
+ */
+static struct rpc_xprt *xs_setup_tcp_tls(struct xprt_create *args)
+{
+ struct sockaddr *addr = args->dstaddr;
+ struct rpc_xprt *xprt;
+ struct sock_xprt *transport;
+ struct rpc_xprt *ret;
+ unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
+
+ if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
+ max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
+
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
+ max_slot_table_size);
+ if (IS_ERR(xprt))
+ return xprt;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+
+ xprt->prot = IPPROTO_TCP;
+ xprt->xprt_class = &xs_tcp_transport;
+ xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_tcp_ops;
+ xprt->timeout = &xs_tcp_default_timeout;
+
+ xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
+ xprt->connect_timeout = xprt->timeout->to_initval *
+ (xprt->timeout->to_retries + 1);
+
+ INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
+ INIT_WORK(&transport->error_worker, xs_error_handle);
+
+ switch (args->xprtsec.policy) {
+ case RPC_XPRTSEC_TLS_ANON:
+ case RPC_XPRTSEC_TLS_X509:
+ xprt->xprtsec = args->xprtsec;
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_tcp_tls_setup_socket);
+ break;
+ default:
+ ret = ERR_PTR(-EACCES);
+ goto out_err;
+ }
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ if (((struct sockaddr_in *)addr)->sin_port != htons(0))
+ xprt_set_bound(xprt);
+
+ xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
+ break;
+ case AF_INET6:
+ if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
+ xprt_set_bound(xprt);
+
+ xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
+ break;
+ default:
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
+ }
+
+ if (xprt_bound(xprt))
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+ else
+ dprintk("RPC: set up xprt to %s (autobind) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+
+ if (try_module_get(THIS_MODULE))
+ return xprt;
+ ret = ERR_PTR(-EINVAL);
+out_err:
+ xs_xprt_free(xprt);
+ return ret;
+}
+
+/**
* xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
* @args: rpc transport creation arguments
*
@@ -3153,6 +3568,15 @@ static struct xprt_class xs_tcp_transport = {
.netid = { "tcp", "tcp6", "" },
};
+static struct xprt_class xs_tcp_tls_transport = {
+ .list = LIST_HEAD_INIT(xs_tcp_tls_transport.list),
+ .name = "tcp-with-tls",
+ .owner = THIS_MODULE,
+ .ident = XPRT_TRANSPORT_TCP_TLS,
+ .setup = xs_setup_tcp_tls,
+ .netid = { "tcp", "tcp6", "" },
+};
+
static struct xprt_class xs_bc_tcp_transport = {
.list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
.name = "tcp NFSv4.1 backchannel",
@@ -3174,6 +3598,7 @@ int init_socket_xprt(void)
xprt_register_transport(&xs_local_transport);
xprt_register_transport(&xs_udp_transport);
xprt_register_transport(&xs_tcp_transport);
+ xprt_register_transport(&xs_tcp_tls_transport);
xprt_register_transport(&xs_bc_tcp_transport);
return 0;
@@ -3193,6 +3618,7 @@ void cleanup_socket_xprt(void)
xprt_unregister_transport(&xs_local_transport);
xprt_unregister_transport(&xs_udp_transport);
xprt_unregister_transport(&xs_tcp_transport);
+ xprt_unregister_transport(&xs_tcp_tls_transport);
xprt_unregister_transport(&xs_bc_tcp_transport);
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5a8c0dd250af..31dca4ecb2c5 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -886,6 +886,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev;
+ int bound_dev_if;
u32 flags, qid;
int err = 0;
@@ -899,6 +900,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
XDP_USE_NEED_WAKEUP))
return -EINVAL;
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
+ return -EINVAL;
+
rtnl_lock();
mutex_lock(&xs->mutex);
if (xs->state != XSK_READY) {
diff --git a/samples/Kconfig b/samples/Kconfig
index b2db430bd3ff..bf49ed0d7362 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -253,6 +253,13 @@ config SAMPLE_INTEL_MEI
help
Build a sample program to work with mei device.
+config SAMPLE_TPS6594_PFSM
+ bool "Build example program working with TPS6594 PFSM driver"
+ depends on HEADERS_INSTALL
+ depends on CC_CAN_LINK
+ help
+ Build a sample program to work with PFSM devices.
+
config SAMPLE_WATCHDOG
bool "watchdog sample"
depends on CC_CAN_LINK
diff --git a/samples/Makefile b/samples/Makefile
index 7727f1a0d6d1..0a551c2b33f4 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
obj-y += vfio-mdev/
subdir-$(CONFIG_SAMPLE_VFS) += vfs
obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
+obj-$(CONFIG_SAMPLE_TPS6594_PFSM) += pfsm/
subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog
subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue
obj-$(CONFIG_SAMPLE_KMEMLEAK) += kmemleak/
diff --git a/samples/pfsm/.gitignore b/samples/pfsm/.gitignore
new file mode 100644
index 000000000000..f350a030a060
--- /dev/null
+++ b/samples/pfsm/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+/pfsm-wakeup
diff --git a/samples/pfsm/Makefile b/samples/pfsm/Makefile
new file mode 100644
index 000000000000..213e8d9f5dbc
--- /dev/null
+++ b/samples/pfsm/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += pfsm-wakeup
+
+userccflags += -I usr/include
diff --git a/samples/pfsm/pfsm-wakeup.c b/samples/pfsm/pfsm-wakeup.c
new file mode 100644
index 000000000000..299dd9e1f607
--- /dev/null
+++ b/samples/pfsm/pfsm-wakeup.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPS6594 PFSM userspace example
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ *
+ * This example shows how to use PFSMs from a userspace application,
+ * on TI j721s2 platform. The PMIC is armed to be triggered by a RTC
+ * alarm to execute state transition (RETENTION to ACTIVE).
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <linux/rtc.h>
+#include <linux/tps6594_pfsm.h>
+
+#define ALARM_DELTA_SEC 30
+
+#define RTC_A "/dev/rtc0"
+
+#define PMIC_NB 3
+#define PMIC_A "/dev/pfsm-0-0x48"
+#define PMIC_B "/dev/pfsm-0-0x4c"
+#define PMIC_C "/dev/pfsm-2-0x58"
+
+static const char * const dev_pfsm[] = {PMIC_A, PMIC_B, PMIC_C};
+
+int main(int argc, char *argv[])
+{
+ int i, ret, fd_rtc, fd_pfsm[PMIC_NB] = { 0 };
+ struct rtc_time rtc_tm;
+ struct pmic_state_opt pmic_opt = { 0 };
+ unsigned long data;
+
+ fd_rtc = open(RTC_A, O_RDONLY);
+ if (fd_rtc < 0) {
+ perror("Failed to open RTC device.");
+ goto out;
+ }
+
+ for (i = 0 ; i < PMIC_NB ; i++) {
+ fd_pfsm[i] = open(dev_pfsm[i], O_RDWR);
+ if (fd_pfsm[i] < 0) {
+ perror("Failed to open PFSM device.");
+ goto out;
+ }
+ }
+
+ /* Read RTC date/time */
+ ret = ioctl(fd_rtc, RTC_RD_TIME, &rtc_tm);
+ if (ret < 0) {
+ perror("Failed to read RTC date/time.");
+ goto out;
+ }
+ printf("Current RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
+ rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900,
+ rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
+
+ /* Set RTC alarm to ALARM_DELTA_SEC sec in the future, and check for rollover */
+ rtc_tm.tm_sec += ALARM_DELTA_SEC;
+ if (rtc_tm.tm_sec >= 60) {
+ rtc_tm.tm_sec %= 60;
+ rtc_tm.tm_min++;
+ }
+ if (rtc_tm.tm_min == 60) {
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_hour++;
+ }
+ if (rtc_tm.tm_hour == 24)
+ rtc_tm.tm_hour = 0;
+ ret = ioctl(fd_rtc, RTC_ALM_SET, &rtc_tm);
+ if (ret < 0) {
+ perror("Failed to set RTC alarm.");
+ goto out;
+ }
+
+ /* Enable alarm interrupts */
+ ret = ioctl(fd_rtc, RTC_AIE_ON, 0);
+ if (ret < 0) {
+ perror("Failed to enable alarm interrupts.");
+ goto out;
+ }
+ printf("Waiting %d seconds for alarm...\n", ALARM_DELTA_SEC);
+
+ /*
+ * Set RETENTION state with options for PMIC_C/B/A respectively.
+ * Since PMIC_A is master, it should be the last one to be configured.
+ */
+ pmic_opt.ddr_retention = 1;
+ for (i = PMIC_NB - 1 ; i >= 0 ; i--) {
+ printf("Set RETENTION state for PMIC_%d.\n", i);
+ sleep(1);
+ ret = ioctl(fd_pfsm[i], PMIC_SET_RETENTION_STATE, &pmic_opt);
+ if (ret < 0) {
+ perror("Failed to set RETENTION state.");
+ goto out_reset;
+ }
+ }
+
+ /* This blocks until the alarm ring causes an interrupt */
+ ret = read(fd_rtc, &data, sizeof(unsigned long));
+ if (ret < 0)
+ perror("Failed to get RTC alarm.");
+ else
+ puts("Alarm rang.\n");
+
+out_reset:
+ ioctl(fd_rtc, RTC_AIE_OFF, 0);
+
+ /* Set ACTIVE state for PMIC_A */
+ ioctl(fd_pfsm[0], PMIC_SET_ACTIVE_STATE, 0);
+
+out:
+ for (i = 0 ; i < PMIC_NB ; i++)
+ if (fd_pfsm[i])
+ close(fd_pfsm[i]);
+
+ if (fd_rtc)
+ close(fd_rtc);
+
+ return 0;
+}
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
index dd4e53ae9b73..c08cefb8eb1f 100644
--- a/samples/pktgen/functions.sh
+++ b/samples/pktgen/functions.sh
@@ -108,12 +108,13 @@ function pgset() {
fi
}
-if [[ -z "$APPEND" ]]; then
- if [[ $EUID -eq 0 ]]; then
- # Cleanup pktgen setup on exit if thats not "append mode"
- trap 'pg_ctrl "reset"' EXIT
- fi
-fi
+function trap_exit()
+{
+ # Cleanup pktgen setup on exit if thats not "append mode"
+ if [[ -z "$APPEND" ]] && [[ $EUID -eq 0 ]]; then
+ trap 'pg_ctrl "reset"' EXIT
+ fi
+}
## -- General shell tricks --
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
index 99ec0688b044..b4328db4a164 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -33,6 +33,10 @@ root_check_run_with_sudo "$@"
# Parameter parsing via include
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
# Using invalid DST_MAC will cause the packets to get dropped in
# ip_rcv() which is part of the test
if [ -z "$DEST_IP" ]; then
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
index 04b0dd0c36d6..f2beb512c5cd 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
@@ -14,6 +14,10 @@ root_check_run_with_sudo "$@"
# Parameter parsing via include
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
fi
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
index 09a92ea963f9..cdb9f497f87d 100755
--- a/samples/pktgen/pktgen_sample01_simple.sh
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -13,6 +13,10 @@ root_check_run_with_sudo "$@"
# - go look in parameters.sh to see which setting are avail
# - required param is the interface "-i" stored in $DEV
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
#
# Set some default params, if they didn't get set
if [ -z "$DEST_IP" ]; then
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
index 7fa41c84c32f..93f33d7d0a81 100755
--- a/samples/pktgen/pktgen_sample02_multiqueue.sh
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -14,6 +14,9 @@ root_check_run_with_sudo "$@"
# Required param: -i dev in $DEV
source ${basedir}/parameters.sh
+# Trap EXIT first
+trap_exit
+
[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
# Base Config
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
index 8bf2fdffba16..8f8ed1ac46a0 100755
--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -25,6 +25,10 @@ root_check_run_with_sudo "$@"
# Parameter parsing via include
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
# Set some default params, if they didn't get set
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
index cff51f861506..65ed486ce4f1 100755
--- a/samples/pktgen/pktgen_sample04_many_flows.sh
+++ b/samples/pktgen/pktgen_sample04_many_flows.sh
@@ -12,6 +12,10 @@ root_check_run_with_sudo "$@"
# Parameter parsing via include
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
# Set some default params, if they didn't get set
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
index 3578d0aa4ac5..bcbc386b2284 100755
--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
+++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
@@ -16,6 +16,10 @@ root_check_run_with_sudo "$@"
# Parameter parsing via include
source ${basedir}/parameters.sh
+
+# Trap EXIT first
+trap_exit
+
# Set some default params, if they didn't get set
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
index 264cc5db9c49..0c5409cb5bab 100755
--- a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
+++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
@@ -14,6 +14,9 @@ root_check_run_with_sudo "$@"
# Required param: -i dev in $DEV
source ${basedir}/parameters.sh
+# Trap EXIT first
+trap_exit
+
# Base Config
[ -z "$COUNT" ] && COUNT="20000000" # Zero means indefinitely
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 78175231c969..6413342a03f4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -82,7 +82,7 @@ ifdef need-builtin
targets-for-builtin += $(obj)/built-in.a
endif
-targets-for-modules := $(foreach x, o mod $(if $(CONFIG_TRIM_UNUSED_KSYMS), usyms), \
+targets-for-modules := $(foreach x, o mod, \
$(patsubst %.o, %.$x, $(filter %.o, $(obj-m))))
ifdef need-modorder
@@ -101,7 +101,9 @@ else ifeq ($(KBUILD_CHECKSRC),2)
endif
ifneq ($(KBUILD_EXTRA_WARN),)
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $<
+ cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $(KDOCFLAGS) \
+ $(if $(findstring 2, $(KBUILD_EXTRA_WARN)), -Wall) \
+ $<
endif
# Compile C sources (.c)
@@ -161,7 +163,7 @@ quiet_cmd_cc_o_c = CC $(quiet_modtag) $@
ifdef CONFIG_MODVERSIONS
# When module versioning is enabled the following steps are executed:
# o compile a <file>.o from <file>.c
-# o if <file>.o doesn't contain a __ksymtab version, i.e. does
+# o if <file>.o doesn't contain a __export_symbol_*, i.e. does
# not export symbols, it's done.
# o otherwise, we calculate symbol versions using the good old
# genksyms on the preprocessed source and dump them into the .cmd file.
@@ -169,7 +171,7 @@ ifdef CONFIG_MODVERSIONS
# be compiled and linked to the kernel and/or modules.
gen_symversions = \
- if $(NM) $@ 2>/dev/null | grep -q __ksymtab; then \
+ if $(NM) $@ 2>/dev/null | grep -q ' __export_symbol_'; then \
$(call cmd_gensymtypes_$(1),$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
>> $(dot-target).cmd; \
fi
@@ -215,21 +217,12 @@ is-standard-object = $(if $(filter-out y%, $(OBJECT_FILES_NON_STANDARD_$(basetar
$(obj)/%.o: objtool-enabled = $(if $(is-standard-object),$(if $(delay-objtool),$(is-single-obj-m),y))
-ifdef CONFIG_TRIM_UNUSED_KSYMS
-cmd_gen_ksymdeps = \
- $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd
-endif
-
-cmd_check_local_export = $(srctree)/scripts/check-local-export $@
-
ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),)
cmd_warn_shared_object = $(if $(word 2, $(modname-multi)),$(warning $(kbuild-file): $*.o is added to multiple modules: $(modname-multi)))
endif
define rule_cc_o_c
$(call cmd_and_fixdep,cc_o_c)
- $(call cmd,gen_ksymdeps)
- $(call cmd,check_local_export)
$(call cmd,checksrc)
$(call cmd,checkdoc)
$(call cmd,gen_objtooldep)
@@ -240,8 +233,6 @@ endef
define rule_as_o_S
$(call cmd_and_fixdep,as_o_S)
- $(call cmd,gen_ksymdeps)
- $(call cmd,check_local_export)
$(call cmd,gen_objtooldep)
$(call cmd,gen_symversions_S)
$(call cmd,warn_shared_object)
@@ -260,12 +251,6 @@ cmd_mod = printf '%s\n' $(call real-search, $*.o, .o, -objs -y -m) | \
$(obj)/%.mod: FORCE
$(call if_changed,mod)
-# List module undefined symbols
-cmd_undefined_syms = $(NM) $< | sed -n 's/^ *U //p' > $@
-
-$(obj)/%.usyms: $(obj)/%.o FORCE
- $(call if_changed,undefined_syms)
-
quiet_cmd_cc_lst_c = MKLST $@
cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
$(CONFIG_SHELL) $(srctree)/scripts/makelst $*.o \
@@ -340,9 +325,7 @@ $(obj)/%.ll: $(src)/%.rs FORCE
cmd_gensymtypes_S = \
{ echo "\#include <linux/kernel.h>" ; \
echo "\#include <asm/asm-prototypes.h>" ; \
- $(CPP) $(a_flags) $< | \
- grep "\<___EXPORT_SYMBOL\>" | \
- sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ; } | \
+ $(NM) $@ | sed -n 's/.* __export_symbol_\(.*\)/EXPORT_SYMBOL(\1);/p' ; } | \
$(CPP) -D__GENKSYMS__ $(c_flags) -xc - | $(genksyms)
quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang
index 9a0aebf2ae60..6c23c6af797f 100644
--- a/scripts/Makefile.clang
+++ b/scripts/Makefile.clang
@@ -35,6 +35,5 @@ CLANG_FLAGS += -Werror=unknown-warning-option
CLANG_FLAGS += -Werror=ignored-optimization-argument
CLANG_FLAGS += -Werror=option-ignored
CLANG_FLAGS += -Werror=unused-command-line-argument
-KBUILD_CFLAGS += $(CLANG_FLAGS)
-KBUILD_AFLAGS += $(CLANG_FLAGS)
+KBUILD_CPPFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 3649900696dd..f2cb4d7ffd96 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -37,8 +37,10 @@ __clean-files := $(wildcard $(addprefix $(obj)/, $(__clean-files)))
# ==========================================================================
+# To make this rule robust against "Argument list too long" error,
+# remove $(obj)/ prefix, and restore it by a shell command.
quiet_cmd_clean = CLEAN $(obj)
- cmd_clean = rm -rf $(__clean-files)
+ cmd_clean = printf '$(obj)/%s ' $(patsubst $(obj)/%,%,$(__clean-files)) | xargs rm -rf
__clean: $(subdir-ymn)
ifneq ($(strip $(__clean-files)),)
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 1279c5fd6e76..8fcb427405a6 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -32,13 +32,13 @@ try-run = $(shell set -e; \
# Usage: aflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
as-option = $(call try-run,\
- $(CC) -Werror $(KBUILD_AFLAGS) $(1) -c -x assembler-with-cpp /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(1) -c -x assembler-with-cpp /dev/null -o "$$TMP",$(1),$(2))
# as-instr
# Usage: aflags-y += $(call as-instr,instr,option1,option2)
as-instr = $(call try-run,\
- printf "%b\n" "$(1)" | $(CC) -Werror $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
+ printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
# __cc-option
# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 100a386fcd71..68d0134bdbf9 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -418,9 +418,6 @@ endif
$(obj)/%.dtb: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
$(call if_changed_dep,dtb)
-$(obj)/%.dtbo: $(src)/%.dts $(DTC) FORCE
- $(call if_changed_dep,dtc)
-
$(obj)/%.dtbo: $(src)/%.dtso $(DTC) FORCE
$(call if_changed_dep,dtc)
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 4703f652c009..fc19f67039bd 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
part-of-module = y
quiet_cmd_cc_o_c = CC [M] $@
- cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI), $(c_flags)) -c -o $@ $<
+ cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
%.mod.o: %.mod.c FORCE
$(call if_changed_dep,cc_o_c)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 0980c58d8afc..39472e834b63 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -47,6 +47,7 @@ modpost-args = \
$(if $(KBUILD_MODPOST_WARN),-w) \
$(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS)) \
$(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N) \
+ $(if $(findstring 1, $(KBUILD_EXTRA_WARN)),-W) \
-o $@
modpost-deps := $(MODPOST)
@@ -90,6 +91,13 @@ targets += .vmlinux.objs
.vmlinux.objs: vmlinux.a $(KBUILD_VMLINUX_LIBS) FORCE
$(call if_changed,vmlinux_objs)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+ksym-wl := $(CONFIG_UNUSED_KSYMS_WHITELIST)
+ksym-wl := $(if $(filter-out /%, $(ksym-wl)),$(srctree)/)$(ksym-wl)
+modpost-args += -t $(addprefix -u , $(ksym-wl))
+modpost-deps += $(ksym-wl)
+endif
+
ifeq ($(wildcard vmlinux.o),)
missing-input := vmlinux.o
output-symdump := modules-only.symvers
diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
index 10176dec97ea..3cd6ca15f390 100644
--- a/scripts/Makefile.vmlinux
+++ b/scripts/Makefile.vmlinux
@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
ifdef CONFIG_MODULES
KASAN_SANITIZE_.vmlinux.export.o := n
+GCOV_PROFILE_.vmlinux.export.o := n
targets += .vmlinux.export.o
vmlinux: .vmlinux.export.o
endif
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
deleted file mode 100755
index f1b5ac818411..000000000000
--- a/scripts/adjust_autoksyms.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Script to update include/generated/autoksyms.h and dependency files
-#
-# Copyright: (C) 2016 Linaro Limited
-# Created by: Nicolas Pitre, January 2016
-#
-
-# Update the include/generated/autoksyms.h file.
-#
-# For each symbol being added or removed, the corresponding dependency
-# file's timestamp is updated to force a rebuild of the affected source
-# file. All arguments passed to this script are assumed to be a command
-# to be exec'd to trigger a rebuild of those files.
-
-set -e
-
-cur_ksyms_file="include/generated/autoksyms.h"
-new_ksyms_file="include/generated/autoksyms.h.tmpnew"
-
-info() {
- if [ "$quiet" != "silent_" ]; then
- printf " %-7s %s\n" "$1" "$2"
- fi
-}
-
-info "CHK" "$cur_ksyms_file"
-
-# Use "make V=1" to debug this script.
-case "$KBUILD_VERBOSE" in
-*1*)
- set -x
- ;;
-esac
-
-# Generate a new symbol list file
-$CONFIG_SHELL $srctree/scripts/gen_autoksyms.sh --modorder "$new_ksyms_file"
-
-# Extract changes between old and new list and touch corresponding
-# dependency files.
-changed=$(
-count=0
-sort "$cur_ksyms_file" "$new_ksyms_file" | uniq -u |
-sed -n 's/^#define __KSYM_\(.*\) 1/\1/p' |
-while read sympath; do
- if [ -z "$sympath" ]; then continue; fi
- depfile="include/ksym/${sympath}"
- mkdir -p "$(dirname "$depfile")"
- touch "$depfile"
- # Filesystems with coarse time precision may create timestamps
- # equal to the one from a file that was very recently built and that
- # needs to be rebuild. Let's guard against that by making sure our
- # dep files are always newer than the first file we created here.
- while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
- touch "$depfile"
- done
- echo $((count += 1))
-done | tail -1 )
-changed=${changed:-0}
-
-if [ $changed -gt 0 ]; then
- # Replace the old list with tne new one
- old=$(grep -c "^#define __KSYM_" "$cur_ksyms_file" || true)
- new=$(grep -c "^#define __KSYM_" "$new_ksyms_file" || true)
- info "KSYMS" "symbols: before=$old, after=$new, changed=$changed"
- info "UPD" "$cur_ksyms_file"
- mv -f "$new_ksyms_file" "$cur_ksyms_file"
- # Then trigger a rebuild of affected source files
- exec $@
-else
- rm -f "$new_ksyms_file"
-fi
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index fa562806c2be..84b6efa849f4 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -246,8 +246,7 @@ static void *read_file(const char *filename)
/* Ignore certain dependencies */
static int is_ignored_file(const char *s, int len)
{
- return str_ends_with(s, len, "include/generated/autoconf.h") ||
- str_ends_with(s, len, "include/generated/autoksyms.h");
+ return str_ends_with(s, len, "include/generated/autoconf.h");
}
/* Do not parse these files */
diff --git a/scripts/check-local-export b/scripts/check-local-export
deleted file mode 100755
index f90b5a9c67b3..000000000000
--- a/scripts/check-local-export
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) 2022 Masahiro Yamada <masahiroy@kernel.org>
-# Copyright (C) 2022 Owen Rafferty <owen@owenrafferty.com>
-#
-# Exit with error if a local exported symbol is found.
-# EXPORT_SYMBOL should be used for global symbols.
-
-set -e
-pid=$$
-
-# If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm) shows
-# 'no symbols' diagnostic (but exits with 0). It is harmless and hidden by
-# '2>/dev/null'. However, it suppresses real error messages as well. Add a
-# hand-crafted error message here.
-#
-# TODO:
-# Use --quiet instead of 2>/dev/null when we upgrade the minimum version of
-# binutils to 2.37, llvm to 13.0.0.
-# Then, the following line will be simpler:
-# { ${NM} --quiet ${1} || kill 0; } |
-
-{ ${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; kill $pid; } } |
-${AWK} -v "file=${1}" '
-BEGIN {
- i = 0
-}
-
-# Skip the line if the number of fields is less than 3.
-#
-# case 1)
-# For undefined symbols, the first field (value) is empty.
-# The outout looks like this:
-# " U _printk"
-# It is unneeded to record undefined symbols.
-#
-# case 2)
-# For Clang LTO, llvm-nm outputs a line with type t but empty name:
-# "---------------- t"
-!length($3) {
- next
-}
-
-# save (name, type) in the associative array
-{ symbol_types[$3]=$2 }
-
-# append the exported symbol to the array
-($3 ~ /^__ksymtab_/) {
- export_symbols[i] = $3
- sub(/^__ksymtab_/, "", export_symbols[i])
- i++
-}
-
-END {
- exit_code = 0
- for (j = 0; j < i; ++j) {
- name = export_symbols[j]
- # nm(3) says "If lowercase, the symbol is usually local"
- if (symbol_types[name] ~ /[a-z]/) {
- printf "%s: error: local symbol %s was exported\n",
- file, name | "cat 1>&2"
- exit_code = 1
- }
- }
-
- exit exit_code
-}'
-
-exit $?
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 7bfa4d39d17f..880fde13d9b8 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5046,7 +5046,7 @@ sub process {
if|for|while|switch|return|case|
volatile|__volatile__|
__attribute__|format|__extension__|
- asm|__asm__)$/x)
+ asm|__asm__|scoped_guard)$/x)
{
# cpp #define statements have non-optional spaces, ie
# if there is a space between the name and the open
diff --git a/scripts/gen_autoksyms.sh b/scripts/gen_autoksyms.sh
deleted file mode 100755
index 12bcfae940ee..000000000000
--- a/scripts/gen_autoksyms.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Create an autoksyms.h header file from the list of all module's needed symbols
-# as recorded in *.usyms files and the user-provided symbol whitelist.
-
-set -e
-
-# Use "make V=1" to debug this script.
-case "$KBUILD_VERBOSE" in
-*1*)
- set -x
- ;;
-esac
-
-read_modorder=
-
-if [ "$1" = --modorder ]; then
- shift
- read_modorder=1
-fi
-
-output_file="$1"
-
-needed_symbols=
-
-# Special case for modversions (see modpost.c)
-if grep -q "^CONFIG_MODVERSIONS=y$" include/config/auto.conf; then
- needed_symbols="$needed_symbols module_layout"
-fi
-
-ksym_wl=$(sed -n 's/^CONFIG_UNUSED_KSYMS_WHITELIST=\(.*\)$/\1/p' include/config/auto.conf)
-if [ -n "$ksym_wl" ]; then
- [ "${ksym_wl}" != "${ksym_wl#/}" ] || ksym_wl="$abs_srctree/$ksym_wl"
- if [ ! -f "$ksym_wl" ] || [ ! -r "$ksym_wl" ]; then
- echo "ERROR: '$ksym_wl' whitelist file not found" >&2
- exit 1
- fi
-fi
-
-# Generate a new ksym list file with symbols needed by the current
-# set of modules.
-cat > "$output_file" << EOT
-/*
- * Automatically generated file; DO NOT EDIT.
- */
-
-EOT
-
-{
- [ -n "${read_modorder}" ] && sed 's/o$/usyms/' modules.order | xargs cat
- echo "$needed_symbols"
- [ -n "$ksym_wl" ] && cat "$ksym_wl"
-} | sed -e 's/ /\n/g' | sed -n -e '/^$/!p' |
-# Remove the dot prefix for ppc64; symbol names with a dot (.) hold entry
-# point addresses.
-sed -e 's/^\.//' |
-sort -u |
-# Ignore __this_module. It's not an exported symbol, and will be resolved
-# when the final .ko's are linked.
-grep -v '^__this_module$' |
-sed -e 's/\(.*\)/#define __KSYM_\1 1/' >> "$output_file"
diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh
deleted file mode 100755
index 8ee533f33659..000000000000
--- a/scripts/gen_ksymdeps.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-set -e
-
-# List of exported symbols
-#
-# If the object has no symbol, $NM warns 'no symbols'.
-# Suppress the stderr.
-# TODO:
-# Use -q instead of 2>/dev/null when we upgrade the minimum version of
-# binutils to 2.37, llvm to 13.0.0.
-ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p')
-
-if [ -z "$ksyms" ]; then
- exit 0
-fi
-
-echo
-echo "ksymdeps_$1 := \\"
-
-for s in $ksyms
-do
- printf ' $(wildcard include/ksym/%s) \\\n' "$s"
-done
-
-echo
-echo "$1: \$(ksymdeps_$1)"
-echo
-echo "\$(ksymdeps_$1):"
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 0d2db41177b2..d387c9381650 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -19,6 +19,7 @@
*
*/
+#include <errno.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdio.h>
@@ -29,24 +30,8 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#define _stringify_1(x) #x
-#define _stringify(x) _stringify_1(x)
-
#define KSYM_NAME_LEN 512
-/*
- * A substantially bigger size than the current maximum.
- *
- * It cannot be defined as an expression because it gets stringified
- * for the fscanf() format string. Therefore, a _Static_assert() is
- * used instead to maintain the relationship with KSYM_NAME_LEN.
- */
-#define KSYM_NAME_LEN_BUFFER 2048
-_Static_assert(
- KSYM_NAME_LEN_BUFFER == KSYM_NAME_LEN * 4,
- "Please keep KSYM_NAME_LEN_BUFFER in sync with KSYM_NAME_LEN"
-);
-
struct sym_entry {
unsigned long long addr;
unsigned int len;
@@ -136,24 +121,40 @@ static void check_symbol_range(const char *sym, unsigned long long addr,
}
}
-static struct sym_entry *read_symbol(FILE *in)
+static struct sym_entry *read_symbol(FILE *in, char **buf, size_t *buf_len)
{
- char name[KSYM_NAME_LEN_BUFFER+1], type;
+ char *name, type, *p;
unsigned long long addr;
- unsigned int len;
+ size_t len;
+ ssize_t readlen;
struct sym_entry *sym;
- int rc;
- rc = fscanf(in, "%llx %c %" _stringify(KSYM_NAME_LEN_BUFFER) "s\n", &addr, &type, name);
- if (rc != 3) {
- if (rc != EOF && fgets(name, ARRAY_SIZE(name), in) == NULL)
- fprintf(stderr, "Read error or end of file.\n");
+ readlen = getline(buf, buf_len, in);
+ if (readlen < 0) {
+ if (errno) {
+ perror("read_symbol");
+ exit(EXIT_FAILURE);
+ }
return NULL;
}
- if (strlen(name) >= KSYM_NAME_LEN) {
+
+ if ((*buf)[readlen - 1] == '\n')
+ (*buf)[readlen - 1] = 0;
+
+ addr = strtoull(*buf, &p, 16);
+
+ if (*buf == p || *p++ != ' ' || !isascii((type = *p++)) || *p++ != ' ') {
+ fprintf(stderr, "line format error\n");
+ exit(EXIT_FAILURE);
+ }
+
+ name = p;
+ len = strlen(name);
+
+ if (len >= KSYM_NAME_LEN) {
fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
"Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
- name, strlen(name), KSYM_NAME_LEN);
+ name, len, KSYM_NAME_LEN);
return NULL;
}
@@ -169,8 +170,7 @@ static struct sym_entry *read_symbol(FILE *in)
/* include the type field in the symbol name, so that it gets
* compressed together */
-
- len = strlen(name) + 1;
+ len++;
sym = malloc(sizeof(*sym) + len + 1);
if (!sym) {
@@ -257,6 +257,8 @@ static void read_map(const char *in)
{
FILE *fp;
struct sym_entry *sym;
+ char *buf = NULL;
+ size_t buflen = 0;
fp = fopen(in, "r");
if (!fp) {
@@ -265,7 +267,7 @@ static void read_map(const char *in)
}
while (!feof(fp)) {
- sym = read_symbol(fp);
+ sym = read_symbol(fp, &buf, &buflen);
if (!sym)
continue;
@@ -284,6 +286,7 @@ static void read_map(const char *in)
table[table_cnt++] = sym;
}
+ free(buf);
fclose(fp);
}
@@ -806,7 +809,7 @@ static void record_relative_base(void)
int main(int argc, char **argv)
{
while (1) {
- static struct option long_options[] = {
+ static const struct option long_options[] = {
{"all-symbols", no_argument, &all_symbols, 1},
{"absolute-percpu", no_argument, &absolute_percpu, 1},
{"base-relative", no_argument, &base_relative, 1},
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 3387ad7508f7..d51cd7ac15d2 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -317,7 +317,7 @@ foreach my $makefile (@makefiles) {
$_ = convert_vars($_, %make_vars);
# collect objects after obj-$(CONFIG_FOO_BAR)
- if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
+ if (/obj-\$[({](CONFIG_[^})]*)[)}]\s*[+:]?=\s*(.*)/) {
$var = $1;
$objs = $2;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index eb70c1fd4e86..d0116c6939dc 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -23,7 +23,7 @@ kernel-doc - Print formatted kernel documentation to stdout
=head1 SYNOPSIS
- kernel-doc [-h] [-v] [-Werror]
+ kernel-doc [-h] [-v] [-Werror] [-Wall] [-Wreturn] [-Wshort-description] [-Wcontents-before-sections]
[ -man |
-rst [-sphinx-version VERSION] [-enable-lineno] |
-none
@@ -133,6 +133,9 @@ my $dohighlight = "";
my $verbose = 0;
my $Werror = 0;
+my $Wreturn = 0;
+my $Wshort_desc = 0;
+my $Wcontents_before_sections = 0;
my $output_mode = "rst";
my $output_preformatted = 0;
my $no_doc_sections = 0;
@@ -187,9 +190,14 @@ if (defined($ENV{'KCFLAGS'})) {
}
}
+# reading this variable is for backwards compat just in case
+# someone was calling it with the variable from outside the
+# kernel's build system
if (defined($ENV{'KDOC_WERROR'})) {
$Werror = "$ENV{'KDOC_WERROR'}";
}
+# other environment variables are converted to command-line
+# arguments in cmd_checkdoc in the build system
# Generated docbook code is inserted in a template at a point where
# docbook v3.1 requires a non-zero sequence of RefEntry's; see:
@@ -318,6 +326,16 @@ while ($ARGV[0] =~ m/^--?(.*)/) {
$verbose = 1;
} elsif ($cmd eq "Werror") {
$Werror = 1;
+ } elsif ($cmd eq "Wreturn") {
+ $Wreturn = 1;
+ } elsif ($cmd eq "Wshort-desc") {
+ $Wshort_desc = 1;
+ } elsif ($cmd eq "Wcontents-before-sections") {
+ $Wcontents_before_sections = 1;
+ } elsif ($cmd eq "Wall") {
+ $Wreturn = 1;
+ $Wshort_desc = 1;
+ $Wcontents_before_sections = 1;
} elsif (($cmd eq "h") || ($cmd eq "help")) {
pod2usage(-exitval => 0, -verbose => 2);
} elsif ($cmd eq 'no-doc-sections') {
@@ -1301,6 +1319,9 @@ sub dump_enum($$) {
my $file = shift;
my $members;
+ # ignore members marked private:
+ $x =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi;
+ $x =~ s/\/\*\s*private:.*}/}/gosi;
$x =~ s@/\*.*?\*/@@gos; # strip comments.
# strip #define macros inside enums
@@ -1748,9 +1769,9 @@ sub dump_function($$) {
# This check emits a lot of warnings at the moment, because many
# functions don't have a 'Return' doc section. So until the number
# of warnings goes sufficiently down, the check is only performed in
- # verbose mode.
+ # -Wreturn mode.
# TODO: always perform the check.
- if ($verbose && !$noret) {
+ if ($Wreturn && !$noret) {
check_return_section($file, $declaration_name, $return_type);
}
@@ -2054,7 +2075,7 @@ sub process_name($$) {
$state = STATE_NORMAL;
}
- if (($declaration_purpose eq "") && $verbose) {
+ if (($declaration_purpose eq "") && $Wshort_desc) {
emit_warning("${file}:$.", "missing initial short description on line:\n$_");
}
@@ -2103,7 +2124,7 @@ sub process_body($$) {
}
if (($contents ne "") && ($contents ne "\n")) {
- if (!$in_doc_sect && $verbose) {
+ if (!$in_doc_sect && $Wcontents_before_sections) {
emit_warning("${file}:$.", "contents before sections\n");
}
dump_section($file, $section, $contents);
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index dfd186372f63..2ade63149466 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -17,8 +17,8 @@ binutils)
echo 2.25.0
;;
gcc)
- if [ "$SRCARCH" = parisc ]; then
- echo 11.0.0
+ if [ "$ARCH" = parisc64 ]; then
+ echo 12.0.0
else
echo 5.1.0
fi
diff --git a/scripts/mksysmap b/scripts/mksysmap
index cb3b1fff3eee..9ba1c9da0a40 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -32,7 +32,7 @@ ${NM} -n ${1} | sed >${2} -e "
# (do not forget a space before each pattern)
# local symbols for ARM, MIPS, etc.
-/ \$/d
+/ \\$/d
# local labels, .LBB, .Ltmpxxx, .L__unnamed_xx, .LASANPC, etc.
/ \.L/d
@@ -40,8 +40,12 @@ ${NM} -n ${1} | sed >${2} -e "
# arm64 EFI stub namespace
/ __efistub_/d
+# arm64 local symbols in PIE namespace
+/ __pi_\\$/d
+/ __pi_\.L/d
+
# arm64 local symbols in non-VHE KVM namespace
-/ __kvm_nvhe_\$/d
+/ __kvm_nvhe_\\$/d
/ __kvm_nvhe_\.L/d
# arm64 lld
@@ -58,6 +62,8 @@ ${NM} -n ${1} | sed >${2} -e "
# CFI type identifiers
/ __kcfi_typeid_/d
+/ __kvm_nvhe___kcfi_typeid_/d
+/ __pi___kcfi_typeid_/d
# CRC from modversions
/ __crc_/d
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index c12150f96b88..b29b29707f10 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -35,6 +35,9 @@ static bool warn_unresolved;
static int sec_mismatch_count;
static bool sec_mismatch_warn_only = true;
+/* Trim EXPORT_SYMBOLs that are unused by in-tree modules */
+static bool trim_unused_exports;
+
/* ignore missing files */
static bool ignore_missing_files;
/* If set to 1, only warn (instead of error) about missing ns imports */
@@ -42,6 +45,8 @@ static bool allow_missing_ns_imports;
static bool error_occurred;
+static bool extra_warn;
+
/*
* Cut off the warnings when there are too many. This typically occurs when
* vmlinux is missing. ('make modules' without building vmlinux.)
@@ -215,7 +220,9 @@ struct symbol {
unsigned int crc;
bool crc_valid;
bool weak;
+ bool is_func;
bool is_gpl_only; /* exported by EXPORT_SYMBOL_GPL */
+ bool used; /* there exists a user of this symbol */
char name[];
};
@@ -297,6 +304,13 @@ static bool contains_namespace(struct list_head *head, const char *namespace)
{
struct namespace_list *list;
+ /*
+ * The default namespace is null string "", which is always implicitly
+ * contained.
+ */
+ if (!namespace[0])
+ return true;
+
list_for_each_entry(list, head, list) {
if (!strcmp(list->namespace, namespace))
return true;
@@ -352,26 +366,8 @@ static const char *sec_name(const struct elf_info *info, unsigned int secindex)
#define strstarts(str, prefix) (strncmp(str, prefix, strlen(prefix)) == 0)
-static void sym_update_namespace(const char *symname, const char *namespace)
-{
- struct symbol *s = find_symbol(symname);
-
- /*
- * That symbol should have been created earlier and thus this is
- * actually an assertion.
- */
- if (!s) {
- error("Could not update namespace(%s) for symbol %s\n",
- namespace, symname);
- return;
- }
-
- free(s->namespace);
- s->namespace = namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
-}
-
static struct symbol *sym_add_exported(const char *name, struct module *mod,
- bool gpl_only)
+ bool gpl_only, const char *namespace)
{
struct symbol *s = find_symbol(name);
@@ -384,6 +380,7 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod,
s = alloc_symbol(name);
s->module = mod;
s->is_gpl_only = gpl_only;
+ s->namespace = NOFAIL(strdup(namespace));
list_add_tail(&s->list, &mod->exported_symbols);
hash_add_symbol(s);
@@ -531,6 +528,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
fatal("%s has NOBITS .modinfo\n", filename);
info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
info->modinfo_len = sechdrs[i].sh_size;
+ } else if (!strcmp(secname, ".export_symbol")) {
+ info->export_symbol_secndx = i;
}
if (sechdrs[i].sh_type == SHT_SYMTAB) {
@@ -653,18 +652,6 @@ static void handle_symbol(struct module *mod, struct elf_info *info,
ELF_ST_BIND(sym->st_info) == STB_WEAK);
break;
default:
- /* All exported symbols */
- if (strstarts(symname, "__ksymtab_")) {
- const char *name, *secname;
-
- name = symname + strlen("__ksymtab_");
- secname = sec_name(info, get_secindex(info, sym));
-
- if (strstarts(secname, "___ksymtab_gpl+"))
- sym_add_exported(name, mod, true);
- else if (strstarts(secname, "___ksymtab+"))
- sym_add_exported(name, mod, false);
- }
if (strcmp(symname, "init_module") == 0)
mod->has_init = true;
if (strcmp(symname, "cleanup_module") == 0)
@@ -838,34 +825,14 @@ static void check_section(const char *modname, struct elf_info *elf,
#define ALL_TEXT_SECTIONS ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \
TEXT_SECTIONS, OTHER_TEXT_SECTIONS
-/* init data sections */
-static const char *const init_data_sections[] =
- { ALL_INIT_DATA_SECTIONS, NULL };
-
-/* all init sections */
-static const char *const init_sections[] = { ALL_INIT_SECTIONS, NULL };
-
-/* all text sections */
-static const char *const text_sections[] = { ALL_TEXT_SECTIONS, NULL };
-
-/* data section */
-static const char *const data_sections[] = { DATA_SECTIONS, NULL };
-
-static const char *const head_sections[] = { ".head.text*", NULL };
-static const char *const linker_symbols[] =
- { "__init_begin", "_sinittext", "_einittext", NULL };
-static const char *const optim_symbols[] = { "*.constprop.*", NULL };
-
enum mismatch {
TEXT_TO_ANY_INIT,
DATA_TO_ANY_INIT,
- TEXT_TO_ANY_EXIT,
- DATA_TO_ANY_EXIT,
+ TEXTDATA_TO_ANY_EXIT,
XXXINIT_TO_SOME_INIT,
XXXEXIT_TO_SOME_EXIT,
ANY_INIT_TO_ANY_EXIT,
ANY_EXIT_TO_ANY_INIT,
- EXPORT_TO_INIT_EXIT,
EXTABLE_TO_NON_TEXT,
};
@@ -881,27 +848,14 @@ enum mismatch {
* targeting sections in this array (white-list). Can be empty.
*
* @mismatch: Type of mismatch.
- *
- * @handler: Specific handler to call when a match is found. If NULL,
- * default_mismatch_handler() will be called.
- *
*/
struct sectioncheck {
const char *fromsec[20];
const char *bad_tosec[20];
const char *good_tosec[20];
enum mismatch mismatch;
- void (*handler)(const char *modname, struct elf_info *elf,
- const struct sectioncheck* const mismatch,
- Elf_Rela *r, Elf_Sym *sym, const char *fromsec);
-
};
-static void extable_mismatch_handler(const char *modname, struct elf_info *elf,
- const struct sectioncheck* const mismatch,
- Elf_Rela *r, Elf_Sym *sym,
- const char *fromsec);
-
static const struct sectioncheck sectioncheck[] = {
/* Do not reference init/exit code/data from
* normal code and data
@@ -913,23 +867,13 @@ static const struct sectioncheck sectioncheck[] = {
},
{
.fromsec = { DATA_SECTIONS, NULL },
- .bad_tosec = { ALL_XXXINIT_SECTIONS, NULL },
- .mismatch = DATA_TO_ANY_INIT,
-},
-{
- .fromsec = { DATA_SECTIONS, NULL },
- .bad_tosec = { INIT_SECTIONS, NULL },
+ .bad_tosec = { ALL_XXXINIT_SECTIONS, INIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_INIT,
},
{
- .fromsec = { TEXT_SECTIONS, NULL },
- .bad_tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = TEXT_TO_ANY_EXIT,
-},
-{
- .fromsec = { DATA_SECTIONS, NULL },
+ .fromsec = { TEXT_SECTIONS, DATA_SECTIONS, NULL },
.bad_tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = DATA_TO_ANY_EXIT,
+ .mismatch = TEXTDATA_TO_ANY_EXIT,
},
/* Do not reference init code/data from meminit code/data */
{
@@ -960,12 +904,6 @@ static const struct sectioncheck sectioncheck[] = {
.bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = ANY_INIT_TO_ANY_EXIT,
},
-/* Do not export init/exit functions or data */
-{
- .fromsec = { "___ksymtab*", NULL },
- .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
- .mismatch = EXPORT_TO_INIT_EXIT,
-},
{
.fromsec = { "__ex_table", NULL },
/* If you're adding any new black-listed sections in here, consider
@@ -974,7 +912,6 @@ static const struct sectioncheck sectioncheck[] = {
.bad_tosec = { ".altinstr_replacement", NULL },
.good_tosec = {ALL_TEXT_SECTIONS , NULL},
.mismatch = EXTABLE_TO_NON_TEXT,
- .handler = extable_mismatch_handler,
}
};
@@ -1048,28 +985,19 @@ static const struct sectioncheck *section_mismatch(
* fromsec = text section
* refsymname = *.constprop.*
*
- * Pattern 6:
- * Hide section mismatch warnings for ELF local symbols. The goal
- * is to eliminate false positive modpost warnings caused by
- * compiler-generated ELF local symbol names such as ".LANCHOR1".
- * Autogenerated symbol names bypass modpost's "Pattern 2"
- * whitelisting, which relies on pattern-matching against symbol
- * names to work. (One situation where gcc can autogenerate ELF
- * local symbols is when "-fsection-anchors" is used.)
**/
-static int secref_whitelist(const struct sectioncheck *mismatch,
- const char *fromsec, const char *fromsym,
+static int secref_whitelist(const char *fromsec, const char *fromsym,
const char *tosec, const char *tosym)
{
/* Check for pattern 1 */
- if (match(tosec, init_data_sections) &&
- match(fromsec, data_sections) &&
+ if (match(tosec, PATTERNS(ALL_INIT_DATA_SECTIONS)) &&
+ match(fromsec, PATTERNS(DATA_SECTIONS)) &&
strstarts(fromsym, "__param"))
return 0;
/* Check for pattern 1a */
if (strcmp(tosec, ".init.text") == 0 &&
- match(fromsec, data_sections) &&
+ match(fromsec, PATTERNS(DATA_SECTIONS)) &&
strstarts(fromsym, "__param_ops_"))
return 0;
@@ -1092,22 +1020,18 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
return 0;
/* Check for pattern 3 */
- if (match(fromsec, head_sections) &&
- match(tosec, init_sections))
+ if (strstarts(fromsec, ".head.text") &&
+ match(tosec, PATTERNS(ALL_INIT_SECTIONS)))
return 0;
/* Check for pattern 4 */
- if (match(tosym, linker_symbols))
+ if (match(tosym, PATTERNS("__init_begin", "_sinittext", "_einittext")))
return 0;
/* Check for pattern 5 */
- if (match(fromsec, text_sections) &&
- match(tosec, init_sections) &&
- match(fromsym, optim_symbols))
- return 0;
-
- /* Check for pattern 6 */
- if (strstarts(fromsym, ".L"))
+ if (match(fromsec, PATTERNS(ALL_TEXT_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_INIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*.constprop.*")))
return 0;
return 1;
@@ -1131,303 +1055,210 @@ static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
return !is_mapping_symbol(name);
}
-/**
- * Find symbol based on relocation record info.
- * In some cases the symbol supplied is a valid symbol so
- * return refsym. If st_name != 0 we assume this is a valid symbol.
- * In other cases the symbol needs to be looked up in the symbol table
- * based on section and address.
- * **/
-static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
- Elf_Sym *relsym)
+/* Look up the nearest symbol based on the section and the address */
+static Elf_Sym *find_nearest_sym(struct elf_info *elf, Elf_Addr addr,
+ unsigned int secndx, bool allow_negative,
+ Elf_Addr min_distance)
{
Elf_Sym *sym;
Elf_Sym *near = NULL;
- Elf64_Sword distance = 20;
- Elf64_Sword d;
- unsigned int relsym_secindex;
-
- if (relsym->st_name != 0)
- return relsym;
+ Elf_Addr sym_addr, distance;
+ bool is_arm = (elf->hdr->e_machine == EM_ARM);
- relsym_secindex = get_secindex(elf, relsym);
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
- if (get_secindex(elf, sym) != relsym_secindex)
- continue;
- if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ if (get_secindex(elf, sym) != secndx)
continue;
if (!is_valid_name(elf, sym))
continue;
- if (sym->st_value == addr)
- return sym;
- /* Find a symbol nearby - addr are maybe negative */
- d = sym->st_value - addr;
- if (d < 0)
- d = addr - sym->st_value;
- if (d < distance) {
- distance = d;
- near = sym;
- }
- }
- /* We need a close match */
- if (distance < 20)
- return near;
- else
- return NULL;
-}
-/*
- * Find symbols before or equal addr and after addr - in the section sec.
- * If we find two symbols with equal offset prefer one with a valid name.
- * The ELF format may have a better way to detect what type of symbol
- * it is, but this works for now.
- **/
-static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
- const char *sec)
-{
- Elf_Sym *sym;
- Elf_Sym *near = NULL;
- Elf_Addr distance = ~0;
+ sym_addr = sym->st_value;
- for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
- const char *symsec;
+ /*
+ * For ARM Thumb instruction, the bit 0 of st_value is set
+ * if the symbol is STT_FUNC type. Mask it to get the address.
+ */
+ if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ sym_addr &= ~1;
- if (is_shndx_special(sym->st_shndx))
- continue;
- symsec = sec_name(elf, get_secindex(elf, sym));
- if (strcmp(symsec, sec) != 0)
- continue;
- if (!is_valid_name(elf, sym))
+ if (addr >= sym_addr)
+ distance = addr - sym_addr;
+ else if (allow_negative)
+ distance = sym_addr - addr;
+ else
continue;
- if (sym->st_value <= addr && addr - sym->st_value <= distance) {
- distance = addr - sym->st_value;
+
+ if (distance <= min_distance) {
+ min_distance = distance;
near = sym;
}
+
+ if (min_distance == 0)
+ break;
}
return near;
}
-static int is_function(Elf_Sym *sym)
+static Elf_Sym *find_fromsym(struct elf_info *elf, Elf_Addr addr,
+ unsigned int secndx)
{
- if (sym)
- return ELF_ST_TYPE(sym->st_info) == STT_FUNC;
- else
- return -1;
+ return find_nearest_sym(elf, addr, secndx, false, ~0);
}
-static inline void get_pretty_name(int is_func, const char** name, const char** name_p)
+static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
{
- switch (is_func) {
- case 0: *name = "variable"; *name_p = ""; break;
- case 1: *name = "function"; *name_p = "()"; break;
- default: *name = "(unknown reference)"; *name_p = ""; break;
- }
+ /* If the supplied symbol has a valid name, return it */
+ if (is_valid_name(elf, sym))
+ return sym;
+
+ /*
+ * Strive to find a better symbol name, but the resulting name may not
+ * match the symbol referenced in the original code.
+ */
+ return find_nearest_sym(elf, addr, get_secindex(elf, sym), true, 20);
}
-/*
- * Print a warning about a section mismatch.
- * Try to find symbols near it so user can find it.
- * Check whitelist before warning - it may be a false positive.
- */
-static void report_sec_mismatch(const char *modname,
- const struct sectioncheck *mismatch,
- const char *fromsec,
- const char *fromsym,
- const char *tosec, const char *tosym)
+static bool is_executable_section(struct elf_info *elf, unsigned int secndx)
{
- sec_mismatch_count++;
+ if (secndx >= elf->num_sections)
+ return false;
- switch (mismatch->mismatch) {
- case TEXT_TO_ANY_INIT:
- case DATA_TO_ANY_INIT:
- case TEXT_TO_ANY_EXIT:
- case DATA_TO_ANY_EXIT:
- case XXXINIT_TO_SOME_INIT:
- case XXXEXIT_TO_SOME_EXIT:
- case ANY_INIT_TO_ANY_EXIT:
- case ANY_EXIT_TO_ANY_INIT:
- warn("%s: section mismatch in reference: %s (section: %s) -> %s (section: %s)\n",
- modname, fromsym, fromsec, tosym, tosec);
- break;
- case EXPORT_TO_INIT_EXIT:
- warn("%s: EXPORT_SYMBOL used for init/exit symbol: %s (section: %s)\n",
- modname, tosym, tosec);
- break;
- case EXTABLE_TO_NON_TEXT:
- fatal("There's a special handler for this mismatch type, we should never get here.\n");
- break;
- }
+ return (elf->sechdrs[secndx].sh_flags & SHF_EXECINSTR) != 0;
}
static void default_mismatch_handler(const char *modname, struct elf_info *elf,
const struct sectioncheck* const mismatch,
- Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
+ Elf_Sym *tsym,
+ unsigned int fsecndx, const char *fromsec, Elf_Addr faddr,
+ const char *tosec, Elf_Addr taddr)
{
- const char *tosec;
- Elf_Sym *to;
Elf_Sym *from;
const char *tosym;
const char *fromsym;
- from = find_elf_symbol2(elf, r->r_offset, fromsec);
+ from = find_fromsym(elf, faddr, fsecndx);
fromsym = sym_name(elf, from);
- tosec = sec_name(elf, get_secindex(elf, sym));
- to = find_elf_symbol(elf, r->r_addend, sym);
- tosym = sym_name(elf, to);
+ tsym = find_tosym(elf, taddr, tsym);
+ tosym = sym_name(elf, tsym);
/* check whitelist - we may ignore it */
- if (secref_whitelist(mismatch,
- fromsec, fromsym, tosec, tosym)) {
- report_sec_mismatch(modname, mismatch,
- fromsec, fromsym, tosec, tosym);
+ if (!secref_whitelist(fromsec, fromsym, tosec, tosym))
+ return;
+
+ sec_mismatch_count++;
+
+ warn("%s: section mismatch in reference: %s+0x%x (section: %s) -> %s (section: %s)\n",
+ modname, fromsym, (unsigned int)(faddr - from->st_value), fromsec, tosym, tosec);
+
+ if (mismatch->mismatch == EXTABLE_TO_NON_TEXT) {
+ if (match(tosec, mismatch->bad_tosec))
+ fatal("The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is black-listed.\n"
+ "Something is seriously wrong and should be fixed.\n"
+ "You might get more information about where this is\n"
+ "coming from by using scripts/check_extable.sh %s\n",
+ fromsec, (long)faddr, tosec, modname);
+ else if (is_executable_section(elf, get_secindex(elf, tsym)))
+ warn("The relocation at %s+0x%lx references\n"
+ "section \"%s\" which is not in the list of\n"
+ "authorized sections. If you're adding a new section\n"
+ "and/or if this reference is valid, add \"%s\" to the\n"
+ "list of authorized sections to jump to on fault.\n"
+ "This can be achieved by adding \"%s\" to\n"
+ "OTHER_TEXT_SECTIONS in scripts/mod/modpost.c.\n",
+ fromsec, (long)faddr, tosec, tosec, tosec);
+ else
+ error("%s+0x%lx references non-executable section '%s'\n",
+ fromsec, (long)faddr, tosec);
}
}
-static int is_executable_section(struct elf_info* elf, unsigned int section_index)
+static void check_export_symbol(struct module *mod, struct elf_info *elf,
+ Elf_Addr faddr, const char *secname,
+ Elf_Sym *sym)
{
- if (section_index > elf->num_sections)
- fatal("section_index is outside elf->num_sections!\n");
+ static const char *prefix = "__export_symbol_";
+ const char *label_name, *name, *data;
+ Elf_Sym *label;
+ struct symbol *s;
+ bool is_gpl;
- return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
-}
+ label = find_fromsym(elf, faddr, elf->export_symbol_secndx);
+ label_name = sym_name(elf, label);
-/*
- * We rely on a gross hack in section_rel[a]() calling find_extable_entry_size()
- * to know the sizeof(struct exception_table_entry) for the target architecture.
- */
-static unsigned int extable_entry_size = 0;
-static void find_extable_entry_size(const char* const sec, const Elf_Rela* r)
-{
- /*
- * If we're currently checking the second relocation within __ex_table,
- * that relocation offset tells us the offsetof(struct
- * exception_table_entry, fixup) which is equal to sizeof(struct
- * exception_table_entry) divided by two. We use that to our advantage
- * since there's no portable way to get that size as every architecture
- * seems to go with different sized types. Not pretty but better than
- * hard-coding the size for every architecture..
- */
- if (!extable_entry_size)
- extable_entry_size = r->r_offset * 2;
-}
+ if (!strstarts(label_name, prefix)) {
+ error("%s: .export_symbol section contains strange symbol '%s'\n",
+ mod->name, label_name);
+ return;
+ }
-static inline bool is_extable_fault_address(Elf_Rela *r)
-{
- /*
- * extable_entry_size is only discovered after we've handled the
- * _second_ relocation in __ex_table, so only abort when we're not
- * handling the first reloc and extable_entry_size is zero.
- */
- if (r->r_offset && extable_entry_size == 0)
- fatal("extable_entry size hasn't been discovered!\n");
-
- return ((r->r_offset == 0) ||
- (r->r_offset % extable_entry_size == 0));
-}
-
-#define is_second_extable_reloc(Start, Cur, Sec) \
- (((Cur) == (Start) + 1) && (strcmp("__ex_table", (Sec)) == 0))
-
-static void report_extable_warnings(const char* modname, struct elf_info* elf,
- const struct sectioncheck* const mismatch,
- Elf_Rela* r, Elf_Sym* sym,
- const char* fromsec, const char* tosec)
-{
- Elf_Sym* fromsym = find_elf_symbol2(elf, r->r_offset, fromsec);
- const char* fromsym_name = sym_name(elf, fromsym);
- Elf_Sym* tosym = find_elf_symbol(elf, r->r_addend, sym);
- const char* tosym_name = sym_name(elf, tosym);
- const char* from_pretty_name;
- const char* from_pretty_name_p;
- const char* to_pretty_name;
- const char* to_pretty_name_p;
-
- get_pretty_name(is_function(fromsym),
- &from_pretty_name, &from_pretty_name_p);
- get_pretty_name(is_function(tosym),
- &to_pretty_name, &to_pretty_name_p);
-
- warn("%s(%s+0x%lx): Section mismatch in reference from the %s %s%s to the %s %s:%s%s\n",
- modname, fromsec, (long)r->r_offset, from_pretty_name,
- fromsym_name, from_pretty_name_p,
- to_pretty_name, tosec, tosym_name, to_pretty_name_p);
-
- if (!match(tosec, mismatch->bad_tosec) &&
- is_executable_section(elf, get_secindex(elf, sym)))
- fprintf(stderr,
- "The relocation at %s+0x%lx references\n"
- "section \"%s\" which is not in the list of\n"
- "authorized sections. If you're adding a new section\n"
- "and/or if this reference is valid, add \"%s\" to the\n"
- "list of authorized sections to jump to on fault.\n"
- "This can be achieved by adding \"%s\" to \n"
- "OTHER_TEXT_SECTIONS in scripts/mod/modpost.c.\n",
- fromsec, (long)r->r_offset, tosec, tosec, tosec);
-}
-
-static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
- const struct sectioncheck* const mismatch,
- Elf_Rela* r, Elf_Sym* sym,
- const char *fromsec)
-{
- const char* tosec = sec_name(elf, get_secindex(elf, sym));
+ if (ELF_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ ELF_ST_BIND(sym->st_info) != STB_WEAK) {
+ error("%s: local symbol '%s' was exported\n", mod->name,
+ label_name + strlen(prefix));
+ return;
+ }
- sec_mismatch_count++;
+ name = sym_name(elf, sym);
+ if (strcmp(label_name + strlen(prefix), name)) {
+ error("%s: .export_symbol section references '%s', but it does not seem to be an export symbol\n",
+ mod->name, name);
+ return;
+ }
- report_extable_warnings(modname, elf, mismatch, r, sym, fromsec, tosec);
-
- if (match(tosec, mismatch->bad_tosec))
- fatal("The relocation at %s+0x%lx references\n"
- "section \"%s\" which is black-listed.\n"
- "Something is seriously wrong and should be fixed.\n"
- "You might get more information about where this is\n"
- "coming from by using scripts/check_extable.sh %s\n",
- fromsec, (long)r->r_offset, tosec, modname);
- else if (!is_executable_section(elf, get_secindex(elf, sym))) {
- if (is_extable_fault_address(r))
- fatal("The relocation at %s+0x%lx references\n"
- "section \"%s\" which is not executable, IOW\n"
- "it is not possible for the kernel to fault\n"
- "at that address. Something is seriously wrong\n"
- "and should be fixed.\n",
- fromsec, (long)r->r_offset, tosec);
- else
- fatal("The relocation at %s+0x%lx references\n"
- "section \"%s\" which is not executable, IOW\n"
- "the kernel will fault if it ever tries to\n"
- "jump to it. Something is seriously wrong\n"
- "and should be fixed.\n",
- fromsec, (long)r->r_offset, tosec);
+ data = sym_get_data(elf, label); /* license */
+ if (!strcmp(data, "GPL")) {
+ is_gpl = true;
+ } else if (!strcmp(data, "")) {
+ is_gpl = false;
+ } else {
+ error("%s: unknown license '%s' was specified for '%s'\n",
+ mod->name, data, name);
+ return;
}
+
+ data += strlen(data) + 1; /* namespace */
+ s = sym_add_exported(name, mod, is_gpl, data);
+
+ /*
+ * We need to be aware whether we are exporting a function or
+ * a data on some architectures.
+ */
+ s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
+
+ if (match(secname, PATTERNS(INIT_SECTIONS)))
+ warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
+ mod->name, name);
+ else if (match(secname, PATTERNS(EXIT_SECTIONS)))
+ warn("%s: %s: EXPORT_SYMBOL used for exit symbol. Remove __exit or EXPORT_SYMBOL.\n",
+ mod->name, name);
}
-static void check_section_mismatch(const char *modname, struct elf_info *elf,
- Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
+static void check_section_mismatch(struct module *mod, struct elf_info *elf,
+ Elf_Sym *sym,
+ unsigned int fsecndx, const char *fromsec,
+ Elf_Addr faddr, Elf_Addr taddr)
{
const char *tosec = sec_name(elf, get_secindex(elf, sym));
- const struct sectioncheck *mismatch = section_mismatch(fromsec, tosec);
+ const struct sectioncheck *mismatch;
- if (mismatch) {
- if (mismatch->handler)
- mismatch->handler(modname, elf, mismatch,
- r, sym, fromsec);
- else
- default_mismatch_handler(modname, elf, mismatch,
- r, sym, fromsec);
+ if (elf->export_symbol_secndx == fsecndx) {
+ check_export_symbol(mod, elf, faddr, tosec, sym);
+ return;
}
-}
-static unsigned int *reloc_location(struct elf_info *elf,
- Elf_Shdr *sechdr, Elf_Rela *r)
-{
- return sym_get_data_by_offset(elf, sechdr->sh_info, r->r_offset);
+ mismatch = section_mismatch(fromsec, tosec);
+ if (!mismatch)
+ return;
+
+ default_mismatch_handler(mod->name, elf, mismatch, sym,
+ fsecndx, fromsec, faddr,
+ tosec, taddr);
}
-static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+static int addend_386_rel(uint32_t *location, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
- unsigned int *location = reloc_location(elf, sechdr, r);
switch (r_typ) {
case R_386_32:
@@ -1436,6 +1267,8 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
case R_386_PC32:
r->r_addend = TO_NATIVE(*location) + 4;
break;
+ default:
+ r->r_addend = (Elf_Addr)(-1);
}
return 0;
}
@@ -1453,45 +1286,131 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
#ifndef R_ARM_THM_JUMP24
#define R_ARM_THM_JUMP24 30
#endif
+
+#ifndef R_ARM_MOVW_ABS_NC
+#define R_ARM_MOVW_ABS_NC 43
+#endif
+
+#ifndef R_ARM_MOVT_ABS
+#define R_ARM_MOVT_ABS 44
+#endif
+
+#ifndef R_ARM_THM_MOVW_ABS_NC
+#define R_ARM_THM_MOVW_ABS_NC 47
+#endif
+
+#ifndef R_ARM_THM_MOVT_ABS
+#define R_ARM_THM_MOVT_ABS 48
+#endif
+
#ifndef R_ARM_THM_JUMP19
#define R_ARM_THM_JUMP19 51
#endif
-static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+static int32_t sign_extend32(int32_t value, int index)
+{
+ uint8_t shift = 31 - index;
+
+ return (int32_t)(value << shift) >> shift;
+}
+
+static int addend_arm_rel(void *loc, Elf_Sym *sym, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
+ uint32_t inst, upper, lower, sign, j1, j2;
+ int32_t offset;
switch (r_typ) {
case R_ARM_ABS32:
- /* From ARM ABI: (S + A) | T */
- r->r_addend = (int)(long)
- (elf->symtab_start + ELF_R_SYM(r->r_info));
+ case R_ARM_REL32:
+ inst = TO_NATIVE(*(uint32_t *)loc);
+ r->r_addend = inst + sym->st_value;
+ break;
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ inst = TO_NATIVE(*(uint32_t *)loc);
+ offset = sign_extend32(((inst & 0xf0000) >> 4) | (inst & 0xfff),
+ 15);
+ r->r_addend = offset + sym->st_value;
break;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
+ inst = TO_NATIVE(*(uint32_t *)loc);
+ offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
+ r->r_addend = offset + sym->st_value + 8;
+ break;
+ case R_ARM_THM_MOVW_ABS_NC:
+ case R_ARM_THM_MOVT_ABS:
+ upper = TO_NATIVE(*(uint16_t *)loc);
+ lower = TO_NATIVE(*((uint16_t *)loc + 1));
+ offset = sign_extend32(((upper & 0x000f) << 12) |
+ ((upper & 0x0400) << 1) |
+ ((lower & 0x7000) >> 4) |
+ (lower & 0x00ff),
+ 15);
+ r->r_addend = offset + sym->st_value;
+ break;
+ case R_ARM_THM_JUMP19:
+ /*
+ * Encoding T3:
+ * S = upper[10]
+ * imm6 = upper[5:0]
+ * J1 = lower[13]
+ * J2 = lower[11]
+ * imm11 = lower[10:0]
+ * imm32 = SignExtend(S:J2:J1:imm6:imm11:'0')
+ */
+ upper = TO_NATIVE(*(uint16_t *)loc);
+ lower = TO_NATIVE(*((uint16_t *)loc + 1));
+
+ sign = (upper >> 10) & 1;
+ j1 = (lower >> 13) & 1;
+ j2 = (lower >> 11) & 1;
+ offset = sign_extend32((sign << 20) | (j2 << 19) | (j1 << 18) |
+ ((upper & 0x03f) << 12) |
+ ((lower & 0x07ff) << 1),
+ 20);
+ r->r_addend = offset + sym->st_value + 4;
+ break;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- case R_ARM_THM_JUMP19:
- /* From ARM ABI: ((S + A) | T) - P */
- r->r_addend = (int)(long)(elf->hdr +
- sechdr->sh_offset +
- (r->r_offset - sechdr->sh_addr));
+ /*
+ * Encoding T4:
+ * S = upper[10]
+ * imm10 = upper[9:0]
+ * J1 = lower[13]
+ * J2 = lower[11]
+ * imm11 = lower[10:0]
+ * I1 = NOT(J1 XOR S)
+ * I2 = NOT(J2 XOR S)
+ * imm32 = SignExtend(S:I1:I2:imm10:imm11:'0')
+ */
+ upper = TO_NATIVE(*(uint16_t *)loc);
+ lower = TO_NATIVE(*((uint16_t *)loc + 1));
+
+ sign = (upper >> 10) & 1;
+ j1 = (lower >> 13) & 1;
+ j2 = (lower >> 11) & 1;
+ offset = sign_extend32((sign << 24) |
+ ((~(j1 ^ sign) & 1) << 23) |
+ ((~(j2 ^ sign) & 1) << 22) |
+ ((upper & 0x03ff) << 12) |
+ ((lower & 0x07ff) << 1),
+ 24);
+ r->r_addend = offset + sym->st_value + 4;
break;
default:
- return 1;
+ r->r_addend = (Elf_Addr)(-1);
}
return 0;
}
-static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+static int addend_mips_rel(uint32_t *location, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
- unsigned int *location = reloc_location(elf, sechdr, r);
- unsigned int inst;
+ uint32_t inst;
- if (r_typ == R_MIPS_HI16)
- return 1; /* skip this */
inst = TO_NATIVE(*location);
switch (r_typ) {
case R_MIPS_LO16:
@@ -1503,6 +1422,8 @@ static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
case R_MIPS_32:
r->r_addend = inst;
break;
+ default:
+ r->r_addend = (Elf_Addr)(-1);
}
return 0;
}
@@ -1523,19 +1444,17 @@ static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
#define R_LARCH_SUB32 55
#endif
-static void section_rela(const char *modname, struct elf_info *elf,
+static void section_rela(struct module *mod, struct elf_info *elf,
Elf_Shdr *sechdr)
{
- Elf_Sym *sym;
Elf_Rela *rela;
Elf_Rela r;
unsigned int r_sym;
- const char *fromsec;
-
+ unsigned int fsecndx = sechdr->sh_info;
+ const char *fromsec = sec_name(elf, fsecndx);
Elf_Rela *start = (void *)elf->hdr + sechdr->sh_offset;
Elf_Rela *stop = (void *)start + sechdr->sh_size;
- fromsec = sec_name(elf, sechdr->sh_info);
/* if from section (name) is know good then skip it */
if (match(fromsec, section_white_list))
return;
@@ -1570,34 +1489,31 @@ static void section_rela(const char *modname, struct elf_info *elf,
continue;
break;
}
- sym = elf->symtab_start + r_sym;
- /* Skip special sections */
- if (is_shndx_special(sym->st_shndx))
- continue;
- if (is_second_extable_reloc(start, rela, fromsec))
- find_extable_entry_size(fromsec, &r);
- check_section_mismatch(modname, elf, &r, sym, fromsec);
+
+ check_section_mismatch(mod, elf, elf->symtab_start + r_sym,
+ fsecndx, fromsec, r.r_offset, r.r_addend);
}
}
-static void section_rel(const char *modname, struct elf_info *elf,
+static void section_rel(struct module *mod, struct elf_info *elf,
Elf_Shdr *sechdr)
{
- Elf_Sym *sym;
Elf_Rel *rel;
Elf_Rela r;
unsigned int r_sym;
- const char *fromsec;
-
+ unsigned int fsecndx = sechdr->sh_info;
+ const char *fromsec = sec_name(elf, fsecndx);
Elf_Rel *start = (void *)elf->hdr + sechdr->sh_offset;
Elf_Rel *stop = (void *)start + sechdr->sh_size;
- fromsec = sec_name(elf, sechdr->sh_info);
/* if from section (name) is know good then skip it */
if (match(fromsec, section_white_list))
return;
for (rel = start; rel < stop; rel++) {
+ Elf_Sym *tsym;
+ void *loc;
+
r.r_offset = TO_NATIVE(rel->r_offset);
#if KERNEL_ELFCLASS == ELFCLASS64
if (elf->hdr->e_machine == EM_MIPS) {
@@ -1615,27 +1531,26 @@ static void section_rel(const char *modname, struct elf_info *elf,
r_sym = ELF_R_SYM(r.r_info);
#endif
r.r_addend = 0;
+
+ loc = sym_get_data_by_offset(elf, fsecndx, r.r_offset);
+ tsym = elf->symtab_start + r_sym;
+
switch (elf->hdr->e_machine) {
case EM_386:
- if (addend_386_rel(elf, sechdr, &r))
- continue;
+ addend_386_rel(loc, &r);
break;
case EM_ARM:
- if (addend_arm_rel(elf, sechdr, &r))
- continue;
+ addend_arm_rel(loc, tsym, &r);
break;
case EM_MIPS:
- if (addend_mips_rel(elf, sechdr, &r))
- continue;
+ addend_mips_rel(loc, &r);
break;
+ default:
+ fatal("Please add code to calculate addend for this architecture\n");
}
- sym = elf->symtab_start + r_sym;
- /* Skip special sections */
- if (is_shndx_special(sym->st_shndx))
- continue;
- if (is_second_extable_reloc(start, rel, fromsec))
- find_extable_entry_size(fromsec, &r);
- check_section_mismatch(modname, elf, &r, sym, fromsec);
+
+ check_section_mismatch(mod, elf, tsym,
+ fsecndx, fromsec, r.r_offset, r.r_addend);
}
}
@@ -1651,19 +1566,19 @@ static void section_rel(const char *modname, struct elf_info *elf,
* to find all references to a section that reference a section that will
* be discarded and warns about it.
**/
-static void check_sec_ref(const char *modname, struct elf_info *elf)
+static void check_sec_ref(struct module *mod, struct elf_info *elf)
{
int i;
Elf_Shdr *sechdrs = elf->sechdrs;
/* Walk through all sections */
for (i = 0; i < elf->num_sections; i++) {
- check_section(modname, elf, &elf->sechdrs[i]);
+ check_section(mod->name, elf, &elf->sechdrs[i]);
/* We want to process only relocation sections and not .init */
if (sechdrs[i].sh_type == SHT_RELA)
- section_rela(modname, elf, &elf->sechdrs[i]);
+ section_rela(mod, elf, &elf->sechdrs[i]);
else if (sechdrs[i].sh_type == SHT_REL)
- section_rel(modname, elf, &elf->sechdrs[i]);
+ section_rel(mod, elf, &elf->sechdrs[i]);
}
}
@@ -1818,6 +1733,8 @@ static void read_symbols(const char *modname)
}
}
+ if (extra_warn && !get_modinfo(&info, "description"))
+ warn("missing MODULE_DESCRIPTION() in %s\n", modname);
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
@@ -1825,16 +1742,7 @@ static void read_symbols(const char *modname)
handle_moddevtable(mod, &info, sym, symname);
}
- for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
- symname = remove_dot(info.strtab + sym->st_name);
-
- /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
- if (strstarts(symname, "__kstrtabns_"))
- sym_update_namespace(symname + strlen("__kstrtabns_"),
- sym_get_data(&info, sym));
- }
-
- check_sec_ref(modname, &info);
+ check_sec_ref(mod, &info);
if (!mod->is_vmlinux) {
version = get_modinfo(&info, "version");
@@ -1925,6 +1833,7 @@ static void check_exports(struct module *mod)
continue;
}
+ exp->used = true;
s->module = exp->module;
s->crc_valid = exp->crc_valid;
s->crc = exp->crc;
@@ -1935,8 +1844,7 @@ static void check_exports(struct module *mod)
else
basename = mod->name;
- if (exp->namespace &&
- !contains_namespace(&mod->imported_namespaces, exp->namespace)) {
+ if (!contains_namespace(&mod->imported_namespaces, exp->namespace)) {
modpost_log(allow_missing_ns_imports ? LOG_WARN : LOG_ERROR,
"module %s uses symbol %s from namespace %s, but does not import it.\n",
basename, exp->name, exp->namespace);
@@ -1949,6 +1857,23 @@ static void check_exports(struct module *mod)
}
}
+static void handle_white_list_exports(const char *white_list)
+{
+ char *buf, *p, *name;
+
+ buf = read_text_file(white_list);
+ p = buf;
+
+ while ((name = strsep(&p, "\n"))) {
+ struct symbol *sym = find_symbol(name);
+
+ if (sym)
+ sym->used = true;
+ }
+
+ free(buf);
+}
+
static void check_modname_len(struct module *mod)
{
const char *mod_name;
@@ -2022,12 +1947,26 @@ static void add_exported_symbols(struct buffer *buf, struct module *mod)
{
struct symbol *sym;
+ /* generate struct for exported symbols */
+ buf_printf(buf, "\n");
+ list_for_each_entry(sym, &mod->exported_symbols, list) {
+ if (trim_unused_exports && !sym->used)
+ continue;
+
+ buf_printf(buf, "KSYMTAB_%s(%s, \"%s\", \"%s\");\n",
+ sym->is_func ? "FUNC" : "DATA", sym->name,
+ sym->is_gpl_only ? "_gpl" : "", sym->namespace);
+ }
+
if (!modversions)
return;
/* record CRCs for exported symbols */
buf_printf(buf, "\n");
list_for_each_entry(sym, &mod->exported_symbols, list) {
+ if (trim_unused_exports && !sym->used)
+ continue;
+
if (!sym->crc_valid)
warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
"Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
@@ -2191,9 +2130,6 @@ static void write_mod_c_file(struct module *mod)
char fname[PATH_MAX];
int ret;
- check_modname_len(mod);
- check_exports(mod);
-
add_header(&buf, mod);
add_exported_symbols(&buf, mod);
add_versions(&buf, mod);
@@ -2265,9 +2201,8 @@ static void read_dump(const char *fname)
mod = new_module(modname, strlen(modname));
mod->from_dump = true;
}
- s = sym_add_exported(symname, mod, gpl_only);
+ s = sym_add_exported(symname, mod, gpl_only, namespace);
sym_set_crc(s, crc);
- sym_update_namespace(symname, namespace);
}
free(buf);
return;
@@ -2286,10 +2221,13 @@ static void write_dump(const char *fname)
if (mod->from_dump)
continue;
list_for_each_entry(sym, &mod->exported_symbols, list) {
+ if (trim_unused_exports && !sym->used)
+ continue;
+
buf_printf(&buf, "0x%08x\t%s\t%s\tEXPORT_SYMBOL%s\t%s\n",
sym->crc, sym->name, mod->name,
sym->is_gpl_only ? "_GPL" : "",
- sym->namespace ?: "");
+ sym->namespace);
}
}
write_buf(&buf, fname);
@@ -2328,12 +2266,13 @@ int main(int argc, char **argv)
{
struct module *mod;
char *missing_namespace_deps = NULL;
+ char *unused_exports_white_list = NULL;
char *dump_write = NULL, *files_source = NULL;
int opt;
LIST_HEAD(dump_lists);
struct dump_list *dl, *dl2;
- while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:")) != -1) {
+ while ((opt = getopt(argc, argv, "ei:mnT:to:au:WwENd:")) != -1) {
switch (opt) {
case 'e':
external_module = true;
@@ -2358,6 +2297,15 @@ int main(int argc, char **argv)
case 'T':
files_source = optarg;
break;
+ case 't':
+ trim_unused_exports = true;
+ break;
+ case 'u':
+ unused_exports_white_list = optarg;
+ break;
+ case 'W':
+ extra_warn = true;
+ break;
case 'w':
warn_unresolved = true;
break;
@@ -2388,6 +2336,17 @@ int main(int argc, char **argv)
read_symbols_from_files(files_source);
list_for_each_entry(mod, &modules, list) {
+ if (mod->from_dump || mod->is_vmlinux)
+ continue;
+
+ check_modname_len(mod);
+ check_exports(mod);
+ }
+
+ if (unused_exports_white_list)
+ handle_white_list_exports(unused_exports_white_list);
+
+ list_for_each_entry(mod, &modules, list) {
if (mod->from_dump)
continue;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 1178f40a73f3..dfdb9484e325 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -137,6 +137,7 @@ struct elf_info {
Elf_Shdr *sechdrs;
Elf_Sym *symtab_start;
Elf_Sym *symtab_stop;
+ unsigned int export_symbol_secndx; /* .export_symbol section */
char *strtab;
char *modinfo;
unsigned int modinfo_len;
@@ -151,11 +152,6 @@ struct elf_info {
Elf32_Word *symtab_shndx_stop;
};
-static inline int is_shndx_special(unsigned int i)
-{
- return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
-}
-
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
static inline unsigned int get_secindex(const struct elf_info *info,
const Elf_Sym *sym)
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 252faaa5561c..032774eb061e 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -62,18 +62,14 @@ install_linux_image () {
${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
fi
- if is_enabled CONFIG_MODULES; then
- ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
- if [ "${SRCARCH}" = um ] ; then
- mkdir -p "${pdir}/usr/lib/uml/modules"
- mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
- fi
- fi
+ ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
+ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
# Install the kernel
if [ "${ARCH}" = um ] ; then
+ mkdir -p "${pdir}/usr/lib/uml/modules"
+ mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
mkdir -p "${pdir}/usr/bin" "${pdir}/usr/share/doc/${pname}"
cp System.map "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}/System.map"
cp ${KCONFIG_CONFIG} "${pdir}/usr/share/doc/${pname}/config"
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index 7f432900671a..f3659ea0335b 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -33,3 +33,7 @@ rm -f rust/target.json
rm -f scripts/bin2c
rm -f .scmversion
+
+rm -rf include/ksym
+
+find . -name '*.usyms' | xargs rm -f
diff --git a/scripts/tags.sh b/scripts/tags.sh
index ea31640b2671..a70d43723146 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -32,6 +32,13 @@ else
tree=${srctree}/
fi
+# gtags(1) refuses to index any file outside of its current working dir.
+# If gtags indexing is requested and the build output directory is not
+# the kernel source tree, index all files in absolute-path form.
+if [[ "$1" == "gtags" && -n "${tree}" ]]; then
+ tree=$(realpath "$tree")/
+fi
+
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
ALLSOURCE_ARCHS=${SRCARCH}
@@ -98,7 +105,7 @@ all_compiled_sources()
{
echo include/generated/autoconf.h
find $ignore -name "*.cmd" -exec \
- sed -n -E 's/^source_.* (.*)/\1/p; s/^ (\S.*) \\/\1/p' {} \+ |
+ grep -Poh '(?<=^ )\S+|(?<== )\S+[^\\](?=$)' {} \+ |
awk '!a[$0]++'
} | xargs realpath -esq $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) |
sort -u
@@ -131,7 +138,7 @@ docscope()
dogtags()
{
- all_target_sources | gtags -i -f -
+ all_target_sources | gtags -i -C "${tree:-.}" -f - "$PWD"
}
# Basic regular expressions with an optional /kind-spec/ for ctags and
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index b498ed302461..6724e2ff6da8 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -28,15 +28,15 @@ unsigned int aa_hash_size(void)
char *aa_calc_hash(void *data, size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
- char *hash = NULL;
- int error = -ENOMEM;
+ char *hash;
+ int error;
if (!apparmor_tfm)
return NULL;
hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!hash)
- goto fail;
+ return ERR_PTR(-ENOMEM);
desc->tfm = apparmor_tfm;
@@ -62,7 +62,7 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
- int error = -ENOMEM;
+ int error;
__le32 le32_version = cpu_to_le32(version);
if (!aa_g_hash_policy)
@@ -73,7 +73,7 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!profile->hash)
- goto fail;
+ return -ENOMEM;
desc->tfm = apparmor_tfm;
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 9119ddda6217..698b124e649f 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -161,6 +161,7 @@ static int path_name(const char *op, struct aa_label *label,
return 0;
}
+struct aa_perms default_perms = {};
/**
* aa_lookup_fperms - convert dfa compressed perms to internal perms
* @dfa: dfa to lookup perms for (NOT NULL)
@@ -171,7 +172,6 @@ static int path_name(const char *op, struct aa_label *label,
*
* Returns: a pointer to a file permission set
*/
-struct aa_perms default_perms = {};
struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
aa_state_t state, struct path_cond *cond)
{
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index f1a29ab7ea1b..73c8a32c6861 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -232,7 +232,7 @@ void aa_policy_destroy(struct aa_policy *policy);
*/
#define fn_label_build(L, P, GFP, FN) \
({ \
- __label__ __cleanup, __done; \
+ __label__ __do_cleanup, __done; \
struct aa_label *__new_; \
\
if ((L)->size > 1) { \
@@ -250,7 +250,7 @@ void aa_policy_destroy(struct aa_policy *policy);
__new_ = (FN); \
AA_BUG(!__new_); \
if (IS_ERR(__new_)) \
- goto __cleanup; \
+ goto __do_cleanup; \
__lvec[__j++] = __new_; \
} \
for (__j = __count = 0; __j < (L)->size; __j++) \
@@ -272,7 +272,7 @@ void aa_policy_destroy(struct aa_policy *policy);
vec_cleanup(profile, __pvec, __count); \
} else \
__new_ = NULL; \
-__cleanup: \
+__do_cleanup: \
vec_cleanup(label, __lvec, (L)->size); \
} else { \
(P) = labels_profile(L); \
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f431251ffb91..c9463bd0307d 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -46,7 +46,7 @@ int apparmor_initialized;
union aa_buffer {
struct list_head list;
- char buffer[1];
+ DECLARE_FLEX_ARRAY(char, buffer);
};
#define RESERVE_COUNT 2
@@ -1647,7 +1647,7 @@ retry:
list_del(&aa_buf->list);
buffer_count--;
spin_unlock(&aa_buffers_lock);
- return &aa_buf->buffer[0];
+ return aa_buf->buffer;
}
if (in_atomic) {
/*
@@ -1670,7 +1670,7 @@ retry:
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
return NULL;
}
- return &aa_buf->buffer[0];
+ return aa_buf->buffer;
}
void aa_put_buffer(char *buf)
@@ -1747,7 +1747,7 @@ static int __init alloc_buffers(void)
destroy_buffers();
return -ENOMEM;
}
- aa_put_buffer(&aa_buf->buffer[0]);
+ aa_put_buffer(aa_buf->buffer);
}
return 0;
}
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 51e8184e0fec..b38f7b2a5e1d 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -430,11 +430,9 @@ static struct aa_policy *__lookup_parent(struct aa_ns *ns,
* @hname: hierarchical profile name to find parent of (NOT NULL)
* @gfp: type of allocation.
*
- * Returns: NULL on error, parent profile on success
- *
* Requires: ns mutex lock held
*
- * Returns: unrefcounted parent policy or NULL if error creating
+ * Return: unrefcounted parent policy on success or %NULL if error creating
* place holder profiles.
*/
static struct aa_policy *__create_missing_ancestors(struct aa_ns *ns,
@@ -591,7 +589,15 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
profile->label.flags |= FLAG_NULL;
rules = list_first_entry(&profile->rules, typeof(*rules), list);
rules->file.dfa = aa_get_dfa(nulldfa);
+ rules->file.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
+ if (!rules->file.perms)
+ goto fail;
+ rules->file.size = 2;
rules->policy.dfa = aa_get_dfa(nulldfa);
+ rules->policy.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
+ if (!rules->policy.perms)
+ goto fail;
+ rules->policy.size = 2;
if (parent) {
profile->path_flags = parent->path_flags;
@@ -602,6 +608,11 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
}
return profile;
+
+fail:
+ aa_free_profile(profile);
+
+ return NULL;
}
/**
@@ -828,7 +839,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
/**
* aa_may_manage_policy - can the current task manage policy
* @label: label to check if it can manage policy
- * @op: the policy manipulation operation being done
+ * @mask: contains the policy manipulation operation being done
*
* Returns: 0 if the task is allowed to manipulate policy else error
*/
@@ -883,7 +894,6 @@ static struct aa_profile *__list_lookup_parent(struct list_head *lh,
* __replace_profile - replace @old with @new on a list
* @old: profile to be replaced (NOT NULL)
* @new: profile to replace @old with (NOT NULL)
- * @share_proxy: transfer @old->proxy to @new
*
* Will duplicate and refcount elements that @new inherits from @old
* and will inherit @old children.
diff --git a/security/apparmor/policy_compat.c b/security/apparmor/policy_compat.c
index cc89d1e88fb7..0cb02da8a319 100644
--- a/security/apparmor/policy_compat.c
+++ b/security/apparmor/policy_compat.c
@@ -146,7 +146,8 @@ static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
*
* Returns: remapped perm table
*/
-static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
+static struct aa_perms *compute_fperms(struct aa_dfa *dfa,
+ u32 *size)
{
aa_state_t state;
unsigned int state_count;
@@ -159,6 +160,7 @@ static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
table = kvcalloc(state_count * 2, sizeof(struct aa_perms), GFP_KERNEL);
if (!table)
return NULL;
+ *size = state_count * 2;
for (state = 0; state < state_count; state++) {
table[state * 2] = compute_fperms_user(dfa, state);
@@ -168,7 +170,8 @@ static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
return table;
}
-static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch)
+static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch,
+ u32 *size)
{
struct aa_perms *perms;
int state;
@@ -179,6 +182,9 @@ static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch)
state_count = xmatch->tables[YYTD_ID_BASE]->td_lolen;
/* DFAs are restricted from having a state_count of less than 2 */
perms = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
+ if (!perms)
+ return NULL;
+ *size = state_count;
/* zero init so skip the trap state (state == 0) */
for (state = 1; state < state_count; state++)
@@ -239,7 +245,8 @@ static struct aa_perms compute_perms_entry(struct aa_dfa *dfa,
return perms;
}
-static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version)
+static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version,
+ u32 *size)
{
unsigned int state;
unsigned int state_count;
@@ -252,6 +259,7 @@ static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version)
table = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
if (!table)
return NULL;
+ *size = state_count;
/* zero init so skip the trap state (state == 0) */
for (state = 1; state < state_count; state++)
@@ -286,7 +294,7 @@ static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
/* TODO: merge different dfa mappings into single map_policy fn */
int aa_compat_map_xmatch(struct aa_policydb *policy)
{
- policy->perms = compute_xmatch_perms(policy->dfa);
+ policy->perms = compute_xmatch_perms(policy->dfa, &policy->size);
if (!policy->perms)
return -ENOMEM;
@@ -297,7 +305,7 @@ int aa_compat_map_xmatch(struct aa_policydb *policy)
int aa_compat_map_policy(struct aa_policydb *policy, u32 version)
{
- policy->perms = compute_perms(policy->dfa, version);
+ policy->perms = compute_perms(policy->dfa, version, &policy->size);
if (!policy->perms)
return -ENOMEM;
@@ -308,7 +316,7 @@ int aa_compat_map_policy(struct aa_policydb *policy, u32 version)
int aa_compat_map_file(struct aa_policydb *policy)
{
- policy->perms = compute_fperms(policy->dfa);
+ policy->perms = compute_fperms(policy->dfa, &policy->size);
if (!policy->perms)
return -ENOMEM;
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index cf2ceec40b28..694fb7a09962 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -448,7 +448,7 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags)
/**
* unpack_trans_table - unpack a profile transition table
* @e: serialized data extent information (NOT NULL)
- * @table: str table to unpack to (NOT NULL)
+ * @strs: str table to unpack to (NOT NULL)
*
* Returns: true if table successfully unpacked or not present
*/
@@ -860,10 +860,12 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
}
profile->attach.xmatch_len = tmp;
profile->attach.xmatch.start[AA_CLASS_XMATCH] = DFA_START;
- error = aa_compat_map_xmatch(&profile->attach.xmatch);
- if (error) {
- info = "failed to convert xmatch permission table";
- goto fail;
+ if (!profile->attach.xmatch.perms) {
+ error = aa_compat_map_xmatch(&profile->attach.xmatch);
+ if (error) {
+ info = "failed to convert xmatch permission table";
+ goto fail;
+ }
}
}
@@ -983,31 +985,54 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
AA_CLASS_FILE);
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
- error = aa_compat_map_policy(&rules->policy, e->version);
- if (error) {
- info = "failed to remap policydb permission table";
- goto fail;
+ if (!rules->policy.perms) {
+ error = aa_compat_map_policy(&rules->policy,
+ e->version);
+ if (error) {
+ info = "failed to remap policydb permission table";
+ goto fail;
+ }
}
- } else
+ } else {
rules->policy.dfa = aa_get_dfa(nulldfa);
-
+ rules->policy.perms = kcalloc(2, sizeof(struct aa_perms),
+ GFP_KERNEL);
+ if (!rules->policy.perms)
+ goto fail;
+ rules->policy.size = 2;
+ }
/* get file rules */
error = unpack_pdb(e, &rules->file, false, true, &info);
if (error) {
goto fail;
} else if (rules->file.dfa) {
- error = aa_compat_map_file(&rules->file);
- if (error) {
- info = "failed to remap file permission table";
- goto fail;
+ if (!rules->file.perms) {
+ error = aa_compat_map_file(&rules->file);
+ if (error) {
+ info = "failed to remap file permission table";
+ goto fail;
+ }
}
} else if (rules->policy.dfa &&
rules->policy.start[AA_CLASS_FILE]) {
rules->file.dfa = aa_get_dfa(rules->policy.dfa);
rules->file.start[AA_CLASS_FILE] = rules->policy.start[AA_CLASS_FILE];
- } else
+ rules->file.perms = kcalloc(rules->policy.size,
+ sizeof(struct aa_perms),
+ GFP_KERNEL);
+ if (!rules->file.perms)
+ goto fail;
+ memcpy(rules->file.perms, rules->policy.perms,
+ rules->policy.size * sizeof(struct aa_perms));
+ rules->file.size = rules->policy.size;
+ } else {
rules->file.dfa = aa_get_dfa(nulldfa);
-
+ rules->file.perms = kcalloc(2, sizeof(struct aa_perms),
+ GFP_KERNEL);
+ if (!rules->file.perms)
+ goto fail;
+ rules->file.size = 2;
+ }
error = -EPROTO;
if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
info = "out of memory";
@@ -1046,8 +1071,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
- rhashtable_insert_fast(profile->data, &data->head,
- profile->data->p);
+ if (rhashtable_insert_fast(profile->data, &data->head,
+ profile->data->p)) {
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ info = "failed to insert data to table";
+ goto fail;
+ }
}
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
@@ -1134,22 +1164,16 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
return 0;
}
-static bool verify_xindex(int xindex, int table_size)
-{
- int index, xtype;
- xtype = xindex & AA_X_TYPE_MASK;
- index = xindex & AA_X_INDEX_MASK;
- if (xtype == AA_X_TABLE && index >= table_size)
- return false;
- return true;
-}
-
-/* verify dfa xindexes are in range of transition tables */
-static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
+/**
+ * verify_dfa_accept_index - verify accept indexes are in range of perms table
+ * @dfa: the dfa to check accept indexes are in range
+ * table_size: the permission table size the indexes should be within
+ */
+static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size)
{
int i;
for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
- if (!verify_xindex(ACCEPT_TABLE(dfa)[i], table_size))
+ if (ACCEPT_TABLE(dfa)[i] >= table_size)
return false;
}
return true;
@@ -1186,11 +1210,13 @@ static bool verify_perms(struct aa_policydb *pdb)
if (!verify_perm(&pdb->perms[i]))
return false;
/* verify indexes into str table */
- if (pdb->perms[i].xindex >= pdb->trans.size)
+ if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE &&
+ (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size)
return false;
- if (pdb->perms[i].tag >= pdb->trans.size)
+ if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size)
return false;
- if (pdb->perms[i].label >= pdb->trans.size)
+ if (pdb->perms[i].label &&
+ pdb->perms[i].label >= pdb->trans.size)
return false;
}
@@ -1212,10 +1238,10 @@ static int verify_profile(struct aa_profile *profile)
if (!rules)
return 0;
- if ((rules->file.dfa && !verify_dfa_xindex(rules->file.dfa,
- rules->file.trans.size)) ||
+ if ((rules->file.dfa && !verify_dfa_accept_index(rules->file.dfa,
+ rules->file.size)) ||
(rules->policy.dfa &&
- !verify_dfa_xindex(rules->policy.dfa, rules->policy.trans.size))) {
+ !verify_dfa_accept_index(rules->policy.dfa, rules->policy.size))) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid named transition", NULL, -EPROTO);
return -EPROTO;
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
index e1bfdab524b7..5c9bde25e56d 100644
--- a/security/apparmor/policy_unpack_test.c
+++ b/security/apparmor/policy_unpack_test.c
@@ -69,31 +69,30 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_STRING_NAME) + 1;
- strcpy(buf + 3, TEST_STRING_NAME);
+ strscpy(buf + 3, TEST_STRING_NAME, e->end - (void *)(buf + 3));
buf = e->start + TEST_STRING_BUF_OFFSET;
*buf = AA_STRING;
*(buf + 1) = strlen(TEST_STRING_DATA) + 1;
- strcpy(buf + 3, TEST_STRING_DATA);
-
+ strscpy(buf + 3, TEST_STRING_DATA, e->end - (void *)(buf + 3));
buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U32_NAME) + 1;
- strcpy(buf + 3, TEST_U32_NAME);
+ strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
*((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U64_NAME) + 1;
- strcpy(buf + 3, TEST_U64_NAME);
+ strscpy(buf + 3, TEST_U64_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
*((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
- strcpy(buf + 3, TEST_BLOB_NAME);
+ strscpy(buf + 3, TEST_BLOB_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
*(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
@@ -102,7 +101,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
- strcpy(buf + 3, TEST_ARRAY_NAME);
+ strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
*((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index 24a0e23f1b2b..83d3d1e6d9dc 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -53,8 +53,7 @@ void aa_secid_update(u32 secid, struct aa_label *label)
xa_unlock_irqrestore(&aa_secids, flags);
}
-/**
- *
+/*
* see label for inverse aa_label_to_secid
*/
struct aa_label *aa_secid_to_label(u32 secid)
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 88493cc31914..03d155ed362b 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -654,6 +654,7 @@ void snd_jack_report(struct snd_jack *jack, int status)
struct snd_jack_kctl *jack_kctl;
unsigned int mask_bits = 0;
#ifdef CONFIG_SND_JACK_INPUT_DEV
+ struct input_dev *idev;
int i;
#endif
@@ -670,17 +671,15 @@ void snd_jack_report(struct snd_jack *jack, int status)
status & jack_kctl->mask_bits);
#ifdef CONFIG_SND_JACK_INPUT_DEV
- mutex_lock(&jack->input_dev_lock);
- if (!jack->input_dev) {
- mutex_unlock(&jack->input_dev_lock);
+ idev = input_get_device(jack->input_dev);
+ if (!idev)
return;
- }
for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
int testbit = ((SND_JACK_BTN_0 >> i) & ~mask_bits);
if (jack->type & testbit)
- input_report_key(jack->input_dev, jack->key[i],
+ input_report_key(idev, jack->key[i],
status & testbit);
}
@@ -688,13 +687,13 @@ void snd_jack_report(struct snd_jack *jack, int status)
int testbit = ((1 << i) & ~mask_bits);
if (jack->type & testbit)
- input_report_switch(jack->input_dev,
+ input_report_switch(idev,
jack_switch_types[i],
status & testbit);
}
- input_sync(jack->input_dev);
- mutex_unlock(&jack->input_dev_lock);
+ input_sync(idev);
+ input_put_device(idev);
#endif /* CONFIG_SND_JACK_INPUT_DEV */
}
EXPORT_SYMBOL(snd_jack_report);
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 7bde7fb64011..a0b951471699 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -31,15 +31,41 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
module_param(max_alloc_per_card, ulong, 0644);
MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
+static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
+{
+ card->total_pcm_alloc_bytes += bytes;
+}
+
+static void update_allocated_size(struct snd_card *card, ssize_t bytes)
+{
+ mutex_lock(&card->memory_mutex);
+ __update_allocated_size(card, bytes);
+ mutex_unlock(&card->memory_mutex);
+}
+
+static void decrease_allocated_size(struct snd_card *card, size_t bytes)
+{
+ mutex_lock(&card->memory_mutex);
+ WARN_ON(card->total_pcm_alloc_bytes < bytes);
+ __update_allocated_size(card, -(ssize_t)bytes);
+ mutex_unlock(&card->memory_mutex);
+}
+
static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
int str, size_t size, struct snd_dma_buffer *dmab)
{
enum dma_data_direction dir;
int err;
+ /* check and reserve the requested size */
+ mutex_lock(&card->memory_mutex);
if (max_alloc_per_card &&
- card->total_pcm_alloc_bytes + size > max_alloc_per_card)
+ card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
+ mutex_unlock(&card->memory_mutex);
return -ENOMEM;
+ }
+ __update_allocated_size(card, size);
+ mutex_unlock(&card->memory_mutex);
if (str == SNDRV_PCM_STREAM_PLAYBACK)
dir = DMA_TO_DEVICE;
@@ -47,9 +73,14 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
dir = DMA_FROM_DEVICE;
err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
if (!err) {
- mutex_lock(&card->memory_mutex);
- card->total_pcm_alloc_bytes += dmab->bytes;
- mutex_unlock(&card->memory_mutex);
+ /* the actual allocation size might be bigger than requested,
+ * and we need to correct the account
+ */
+ if (dmab->bytes != size)
+ update_allocated_size(card, dmab->bytes - size);
+ } else {
+ /* take back on allocation failure */
+ decrease_allocated_size(card, size);
}
return err;
}
@@ -58,10 +89,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
{
if (!dmab->area)
return;
- mutex_lock(&card->memory_mutex);
- WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
- card->total_pcm_alloc_bytes -= dmab->bytes;
- mutex_unlock(&card->memory_mutex);
+ decrease_allocated_size(card, dmab->bytes);
snd_dma_free_pages(dmab);
dmab->area = NULL;
}
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index eb1d86ff6166..7cc84e137999 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -714,7 +714,7 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
{
data->system.status = status;
data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
- data->system.parm1 = event->data.control.value & 0x7f;
+ data->system.parm2 = event->data.control.value & 0x7f;
return 1;
}
diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
index fb61df424a87..de04799fdb69 100644
--- a/sound/core/ump_convert.c
+++ b/sound/core/ump_convert.c
@@ -73,7 +73,7 @@ static int cvt_ump_system_to_legacy(u32 data, unsigned char *buf)
case UMP_SYSTEM_STATUS_MIDI_TIME_CODE:
case UMP_SYSTEM_STATUS_SONG_SELECT:
buf[1] = (data >> 8) & 0x7f;
- return 1;
+ return 2;
case UMP_SYSTEM_STATUS_SONG_POSITION:
buf[1] = (data >> 8) & 0x7f;
buf[2] = data & 0x7f;
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index 82241058ea14..6e84e4787259 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -16,7 +16,7 @@ MODULE_LICENSE("GPL");
static void name_card(struct snd_ff *ff)
{
struct fw_device *fw_dev = fw_parent_device(ff->unit);
- const char *const names[] = {
+ static const char *const names[] = {
[SND_FF_UNIT_VERSION_FF800] = "Fireface800",
[SND_FF_UNIT_VERSION_FF400] = "Fireface400",
[SND_FF_UNIT_VERSION_UFX] = "FirefaceUFX",
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 9523479fa94a..63d40f1a914f 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -44,7 +44,7 @@ struct compat_info {
static bool detect_loud_models(struct fw_unit *unit)
{
- const char *const models[] = {
+ static const char *const models[] = {
"Onyxi",
"Onyx-i",
"Onyx 1640i",
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index f258cb3a6895..9b1bcabd8414 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -596,10 +596,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
*/
void snd_hdac_regmap_sync(struct hdac_device *codec)
{
- if (codec->regmap) {
- mutex_lock(&codec->regmap_lock);
+ mutex_lock(&codec->regmap_lock);
+ if (codec->regmap)
regcache_sync(codec->regmap);
- mutex_unlock(&codec->regmap_lock);
- }
+ mutex_unlock(&codec->regmap_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index afe8253f9a4f..e2f8b608de82 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5883,7 +5883,7 @@ static void alc_fixup_headset_mode_alc255_no_hp_mic(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
alc255_set_default_jack_type(codec);
- }
+ }
else
alc_fixup_headset_mode(codec, fix, action);
}
@@ -7068,6 +7068,9 @@ enum {
ALC285_FIXUP_SPEAKER2_TO_DAC1,
ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
ALC285_FIXUP_ASUS_HEADSET_MIC,
+ ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS,
+ ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
ALC280_FIXUP_HP_HEADSET_MIC,
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
@@ -8058,6 +8061,31 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
},
+ [ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90170120 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC
+ },
+ [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC287_FIXUP_CS35L41_I2C_2
+ },
+ [ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 },
+ { 0x1b, 0x03a11c30 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1
+ },
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -9573,10 +9601,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
@@ -9602,7 +9633,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
- SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
@@ -9731,6 +9763,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -11286,6 +11319,7 @@ enum {
ALC897_FIXUP_HP_HSMIC_VERB,
ALC897_FIXUP_LENOVO_HEADSET_MODE,
ALC897_FIXUP_HEADSET_MIC_PIN2,
+ ALC897_FIXUP_UNIS_H3C_X500S,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -11725,6 +11759,13 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
},
+ [ALC897_FIXUP_UNIS_H3C_X500S] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
+ {}
+ },
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -11886,6 +11927,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
{.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
+ {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
{}
};
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index ec31a5762ddf..64bebe1a72bb 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -94,7 +94,7 @@ static int sdw_params_stream(struct device *dev,
struct sdw_intel_stream_params_data *params_data)
{
struct snd_soc_dai *d = params_data->dai;
- struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->stream);
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
data.dai_index = (params_data->link_id << 8) | d->id;
@@ -158,6 +158,7 @@ static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
static int hda_sdw_probe(struct snd_sof_dev *sdev)
{
+ const struct sof_intel_dsp_desc *chip;
struct sof_intel_hda_dev *hdev;
struct sdw_intel_res res;
void *sdw;
@@ -166,16 +167,38 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev)
memset(&res, 0, sizeof(res));
- res.hw_ops = &sdw_intel_cnl_hw_ops;
- res.mmio_base = sdev->bar[HDA_DSP_BAR];
- res.shim_base = hdev->desc->sdw_shim_base;
- res.alh_base = hdev->desc->sdw_alh_base;
+ chip = get_chip_info(sdev->pdata);
+ if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) {
+ res.mmio_base = sdev->bar[HDA_DSP_BAR];
+ res.hw_ops = &sdw_intel_cnl_hw_ops;
+ res.shim_base = hdev->desc->sdw_shim_base;
+ res.alh_base = hdev->desc->sdw_alh_base;
+ res.ext = false;
+ } else {
+ /*
+ * retrieve eml_lock needed to protect shared registers
+ * in the HDaudio multi-link areas
+ */
+ res.eml_lock = hdac_bus_eml_get_mutex(sof_to_bus(sdev), true,
+ AZX_REG_ML_LEPTR_ID_SDW);
+ if (!res.eml_lock)
+ return -ENODEV;
+
+ res.mmio_base = sdev->bar[HDA_DSP_HDA_BAR];
+ /*
+ * the SHIM and SoundWire register offsets are link-specific
+ * and will be determined when adding auxiliary devices
+ */
+ res.hw_ops = &sdw_intel_lnl_hw_ops;
+ res.ext = true;
+ }
res.irq = sdev->ipc_irq;
res.handle = hdev->info.handle;
res.parent = sdev->dev;
res.ops = &sdw_callback;
res.dev = sdev->dev;
res.clock_stop_quirks = sdw_clock_stop_quirks;
+ res.hbus = sof_to_bus(sdev);
/*
* ops and arg fields are not populated for now,
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
index 48428ccbcfe0..207df48e27cf 100644
--- a/sound/soc/sof/intel/shim.h
+++ b/sound/soc/sof/intel/shim.h
@@ -21,6 +21,7 @@ enum sof_intel_hw_ip_version {
SOF_INTEL_CAVS_2_0, /* IceLake, JasperLake */
SOF_INTEL_CAVS_2_5, /* TigerLake, AlderLake */
SOF_INTEL_ACE_1_0, /* MeteorLake */
+ SOF_INTEL_ACE_2_0, /* LunarLake */
};
/*
diff --git a/tools/arch/arm64/include/uapi/asm/bitsperlong.h b/tools/arch/arm64/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 485d60bee26c..000000000000
--- a/tools/arch/arm64/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_BITSPERLONG_H
-#define __ASM_BITSPERLONG_H
-
-#define __BITS_PER_LONG 64
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_BITSPERLONG_H */
diff --git a/tools/arch/hexagon/include/uapi/asm/bitsperlong.h b/tools/arch/hexagon/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 5adca0d26913..000000000000
--- a/tools/arch/hexagon/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef __ASM_HEXAGON_BITSPERLONG_H
-#define __ASM_HEXAGON_BITSPERLONG_H
-
-#define __BITS_PER_LONG 32
-
-#include <asm-generic/bitsperlong.h>
-
-#endif
diff --git a/tools/arch/loongarch/include/uapi/asm/bitsperlong.h b/tools/arch/loongarch/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 00b4ba1e5cdf..000000000000
--- a/tools/arch/loongarch/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_LOONGARCH_BITSPERLONG_H
-#define __ASM_LOONGARCH_BITSPERLONG_H
-
-#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/tools/arch/microblaze/include/uapi/asm/bitsperlong.h b/tools/arch/microblaze/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 76da34b10f59..000000000000
--- a/tools/arch/microblaze/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 0b9b58b57ff6..000000000000
--- a/tools/arch/riscv/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2015 Regents of the University of California
- */
-
-#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
-#define _UAPI_ASM_RISCV_BITSPERLONG_H
-
-#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/counter/.gitignore b/tools/counter/.gitignore
new file mode 100644
index 000000000000..9fd290d4bf43
--- /dev/null
+++ b/tools/counter/.gitignore
@@ -0,0 +1,2 @@
+/counter_example
+/include/linux/counter.h
diff --git a/tools/counter/Makefile b/tools/counter/Makefile
index 8843f0fa6119..a0f4cab71fe5 100644
--- a/tools/counter/Makefile
+++ b/tools/counter/Makefile
@@ -40,6 +40,7 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/linux/counter.h
+ rmdir -p $(OUTPUT)include/linux
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
diff --git a/tools/include/uapi/asm-generic/bitsperlong.h b/tools/include/uapi/asm-generic/bitsperlong.h
index 23e6c416b85f..352cb81947b8 100644
--- a/tools/include/uapi/asm-generic/bitsperlong.h
+++ b/tools/include/uapi/asm-generic/bitsperlong.h
@@ -1,6 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
#define _UAPI__ASM_GENERIC_BITS_PER_LONG
+#ifndef __BITS_PER_LONG
+/*
+ * In order to keep safe and avoid regression, only unify uapi
+ * bitsperlong.h for some archs which are using newer toolchains
+ * that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
+ * See the following link for more info:
+ * https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
+ */
+#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
+#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@@ -8,8 +20,8 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
-#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
+#endif
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 45fa180cc56a..dd7d8e10f16d 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
-/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
-
-/* fs/eventfd.c */
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
-
-/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
-
-/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
-
-/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
-/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
-
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
-
-/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
-/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
-/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
-
-/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
-/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
+
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
-/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
-/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
-/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
-
-/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
-/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
+
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
-/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
-/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
-
-/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
-
-/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
+
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
+
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
-/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
-
-/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
-/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
+
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
-
-/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
-/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
-
-/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
+
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
-
-/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
-
-/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
+
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
-
-/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
-
-/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
-
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
+
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
+
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -770,15 +698,19 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
+
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
@@ -844,13 +776,14 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
-
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
@@ -865,7 +798,6 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
@@ -877,12 +809,11 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
+
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index da5206517158..c65267afc341 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#if defined(__i386__) || defined(__x86_64__)
#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
-#elif defined(__aarch64__)
-#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
#elif defined(__powerpc__)
#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
#elif defined(__s390__)
@@ -13,12 +11,8 @@
#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
#elif defined(__ia64__)
#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
-#elif defined(__riscv)
-#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
#elif defined(__alpha__)
#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
-#elif defined(__loongarch__)
-#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index e56ae54805a8..1478068ad5dd 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -669,7 +669,7 @@ llvm.*::
"$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
- "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
+ "-c \"$CLANG_SOURCE\" --target=bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
llvm.clang-opt::
Options passed to clang.
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 6e5ba3cd2b72..30eea576721f 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -36,6 +36,9 @@ COMMON OPTIONS
--input=<file>::
Input file name. (default: perf.data unless stdin is a fifo)
+--output=<file>::
+ Output file name for perf lock contention and report.
+
-v::
--verbose::
Be more verbose (show symbol address, etc).
@@ -200,6 +203,11 @@ CONTENTION OPTIONS
Note that it matches the substring so 'rq' would match both 'raw_spin_rq_lock'
and 'irq_enter_rcu'.
+-x::
+--field-separator=<SEP>::
+ Show results using a CSV-style output to make it easy to import directly
+ into spreadsheets. Columns are separated by the string specified in SEP.
+
SEE ALSO
--------
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 9c5aa14a44cf..0609c19caabd 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -315,6 +315,9 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
+
CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -ggdb3
CORE_CFLAGS += -funwind-tables
@@ -344,8 +347,8 @@ ifneq ($(TCMALLOC),)
endif
ifeq ($(FEATURES_DUMP),)
-# We will display at the end of this Makefile.config, using $(call feature_display_entries),
-# as we may retry some feature detection here.
+# We will display at the end of this Makefile.config, using $(call feature_display_entries)
+# As we may retry some feature detection here, see the disassembler-four-args case, for instance
FEATURE_DISPLAY_DEFERRED := 1
include $(srctree)/tools/build/Makefile.feature
else
@@ -680,6 +683,10 @@ ifdef BUILD_BPF_SKEL
CFLAGS += -DHAVE_BPF_SKEL
endif
+ifndef GEN_VMLINUX_H
+ VMLINUX_H=$(src-perf)/util/bpf_skel/vmlinux/vmlinux.h
+endif
+
dwarf-post-unwind := 1
dwarf-post-unwind-text := BUG
@@ -903,9 +910,13 @@ ifdef BUILD_NONDISTRO
ifeq ($(feature-libbfd-liberty), 1)
EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ FEATURE_CHECK_LDFLAGS-disassembler-init-styled += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ FEATURE_CHECK_LDFLAGS-disassembler-init-styled += -liberty -lz -ldl
endif
endif
$(call feature_check,disassembler-four-args)
@@ -1329,6 +1340,6 @@ endif
# re-generate FEATURE-DUMP as we may have called feature_check, found out
# extra libraries to add to LDFLAGS of some other test and then redo those
-# tests.
+# tests, see the block about libbfd, disassembler-four-args, for instance.
$(shell rm -f $(FEATURE_DUMP_FILENAME))
$(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index b1e62a621f92..097316ef38e6 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -132,6 +132,8 @@ include ../scripts/utilities.mak
# Define EXTRA_TESTS to enable building extra tests useful mainly to perf
# developers, such as:
# x86 instruction decoder - new instructions test
+#
+# Define GEN_VMLINUX_H to generate vmlinux.h from the BTF.
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -197,6 +199,7 @@ FLEX ?= flex
BISON ?= bison
STRIP = strip
AWK = awk
+READELF ?= readelf
# include Makefile.config by default and rule out
# non-config cases
@@ -1061,7 +1064,7 @@ $(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_
ifdef BUILD_BPF_SKEL
BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
# Get Clang's default includes on this system, as opposed to those seen by
-# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# '--target=bpf'. This fixes "missing" files on some architectures/distros,
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
#
# Use '-idirafter': Don't interfere with include mechanics except where the
@@ -1084,8 +1087,44 @@ $(BPFTOOL): | $(SKEL_TMP_OUT)
$(Q)CFLAGS= $(MAKE) -C ../bpf/bpftool \
OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
-$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
- $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
+# Paths to search for a kernel to generate vmlinux.h from.
+VMLINUX_BTF_ELF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+
+# Paths to BTF information.
+VMLINUX_BTF_BTF_PATHS ?= /sys/kernel/btf/vmlinux
+
+# Filter out kernels that don't exist or without a BTF section.
+VMLINUX_BTF_ELF_ABSPATHS ?= $(abspath $(wildcard $(VMLINUX_BTF_ELF_PATHS)))
+VMLINUX_BTF_PATHS ?= $(shell for file in $(VMLINUX_BTF_ELF_ABSPATHS); \
+ do \
+ if [ -f $$file ] && ($(READELF) -S "$$file" | grep -q .BTF); \
+ then \
+ echo "$$file"; \
+ fi; \
+ done) \
+ $(wildcard $(VMLINUX_BTF_BTF_PATHS))
+
+# Select the first as the source of vmlinux.h.
+VMLINUX_BTF ?= $(firstword $(VMLINUX_BTF_PATHS))
+
+ifeq ($(VMLINUX_H),)
+ ifeq ($(VMLINUX_BTF),)
+ $(error Missing bpftool input for generating vmlinux.h)
+ endif
+endif
+
+$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
+ifeq ($(VMLINUX_H),)
+ $(QUIET_GEN)$(BPFTOOL) btf dump file $< format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT)
+ $(QUIET_CLANG)$(CLANG) -g -O2 --target=bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@
$(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 512c2d885d24..81d22657922a 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -102,3 +102,23 @@ void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
}
}
}
+
+int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
+{
+ if (!x86__is_amd_cpu())
+ return 0;
+
+ if (!evsel->core.attr.precise_ip &&
+ !(evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3)))
+ return 0;
+
+ /* More verbose IBS errors. */
+ if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
+ evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
+ evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
+ return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
+ "again without the privilege modifiers (like 'k') at the end.");
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index f5674d824a40..83954af36753 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -1524,7 +1524,7 @@ int cmd_daemon(int argc, const char **argv)
if (argc) {
if (!strcmp(argv[0], "start"))
ret = __cmd_start(&__daemon, daemon_options, argc, argv);
- if (!strcmp(argv[0], "signal"))
+ else if (!strcmp(argv[0], "signal"))
ret = __cmd_signal(&__daemon, daemon_options, argc, argv);
else if (!strcmp(argv[0], "stop"))
ret = __cmd_stop(&__daemon, daemon_options, argc, argv);
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index 3751df744577..7f75c5b73f26 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -62,7 +62,6 @@ int cmd_kallsyms(int argc, const char **argv)
if (argc < 1)
usage_with_options(kallsyms_usage, options);
- symbol_conf.sort_by_name = true;
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init(NULL) < 0)
return -1;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 8b505e1e5002..c15386cb1033 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -27,6 +27,7 @@
#include "util/map.h"
#include "util/util.h"
+#include <stdio.h>
#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
@@ -65,6 +66,8 @@ static int max_stack_depth = CONTENTION_STACK_DEPTH;
static int stack_skip = CONTENTION_STACK_SKIP;
static int print_nr_entries = INT_MAX / 2;
static LIST_HEAD(callstack_filters);
+static const char *output_name = NULL;
+static FILE *lock_output;
struct callstack_filter {
struct list_head list;
@@ -225,22 +228,28 @@ static void lock_stat_key_print_time(unsigned long long nsec, int len)
{ 0, NULL },
};
+ /* for CSV output */
+ if (len == 0) {
+ fprintf(lock_output, "%llu", nsec);
+ return;
+ }
+
for (int i = 0; table[i].unit; i++) {
if (nsec < table[i].base)
continue;
- pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
+ fprintf(lock_output, "%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
return;
}
- pr_info("%*llu %s", len - 3, nsec, "ns");
+ fprintf(lock_output, "%*llu %s", len - 3, nsec, "ns");
}
#define PRINT_KEY(member) \
static void lock_stat_key_print_ ## member(struct lock_key *key, \
struct lock_stat *ls) \
{ \
- pr_info("%*llu", key->len, (unsigned long long)ls->member); \
+ fprintf(lock_output, "%*llu", key->len, (unsigned long long)ls->member);\
}
#define PRINT_TIME(member) \
@@ -1329,12 +1338,12 @@ static void print_bad_events(int bad, int total)
if (quiet || total == 0 || (broken == 0 && verbose <= 0))
return;
- pr_info("\n=== output for debug ===\n\n");
- pr_info("bad: %d, total: %d\n", bad, total);
- pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
- pr_info("histogram of events caused bad sequence\n");
+ fprintf(lock_output, "\n=== output for debug ===\n\n");
+ fprintf(lock_output, "bad: %d, total: %d\n", bad, total);
+ fprintf(lock_output, "bad rate: %.2f %%\n", (double)bad / (double)total * 100);
+ fprintf(lock_output, "histogram of events caused bad sequence\n");
for (i = 0; i < BROKEN_MAX; i++)
- pr_info(" %10s: %d\n", name[i], bad_hist[i]);
+ fprintf(lock_output, " %10s: %d\n", name[i], bad_hist[i]);
}
/* TODO: various way to print, coloring, nano or milli sec */
@@ -1346,10 +1355,10 @@ static void print_result(void)
int bad, total, printed;
if (!quiet) {
- pr_info("%20s ", "Name");
+ fprintf(lock_output, "%20s ", "Name");
list_for_each_entry(key, &lock_keys, list)
- pr_info("%*s ", key->len, key->header);
- pr_info("\n\n");
+ fprintf(lock_output, "%*s ", key->len, key->header);
+ fprintf(lock_output, "\n\n");
}
bad = total = printed = 0;
@@ -1374,7 +1383,7 @@ static void print_result(void)
name = thread__comm_str(t);
}
- pr_info("%20s ", name);
+ fprintf(lock_output, "%20s ", name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
@@ -1382,14 +1391,14 @@ static void print_result(void)
cut_name[18] = '.';
cut_name[19] = '\0';
/* cut off name for saving output style */
- pr_info("%20s ", cut_name);
+ fprintf(lock_output, "%20s ", cut_name);
}
list_for_each_entry(key, &lock_keys, list) {
key->print(key, st);
- pr_info(" ");
+ fprintf(lock_output, " ");
}
- pr_info("\n");
+ fprintf(lock_output, "\n");
if (++printed >= print_nr_entries)
break;
@@ -1406,13 +1415,13 @@ static void dump_threads(void)
struct rb_node *node;
struct thread *t;
- pr_info("%10s: comm\n", "Thread ID");
+ fprintf(lock_output, "%10s: comm\n", "Thread ID");
node = rb_first(&thread_stats);
while (node) {
st = container_of(node, struct thread_stat, rb);
t = perf_session__findnew(session, st->tid);
- pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
+ fprintf(lock_output, "%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
}
@@ -1438,7 +1447,7 @@ static void dump_map(void)
unsigned int i;
struct lock_stat *st;
- pr_info("Address of instance: name of class\n");
+ fprintf(lock_output, "Address of instance: name of class\n");
for (i = 0; i < LOCKHASH_SIZE; i++) {
hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare_maps);
@@ -1446,7 +1455,7 @@ static void dump_map(void)
}
while ((st = pop_from_result()))
- pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
+ fprintf(lock_output, " %#llx: %s\n", (unsigned long long)st->addr, st->name);
}
static int dump_info(void)
@@ -1626,58 +1635,242 @@ static void sort_contention_result(void)
sort_result();
}
-static void print_bpf_events(int total, struct lock_contention_fails *fails)
+static void print_header_stdio(void)
+{
+ struct lock_key *key;
+
+ list_for_each_entry(key, &lock_keys, list)
+ fprintf(lock_output, "%*s ", key->len, key->header);
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_TASK:
+ fprintf(lock_output, " %10s %s\n\n", "pid",
+ show_lock_owner ? "owner" : "comm");
+ break;
+ case LOCK_AGGR_CALLER:
+ fprintf(lock_output, " %10s %s\n\n", "type", "caller");
+ break;
+ case LOCK_AGGR_ADDR:
+ fprintf(lock_output, " %16s %s\n\n", "address", "symbol");
+ break;
+ default:
+ break;
+ }
+}
+
+static void print_header_csv(const char *sep)
+{
+ struct lock_key *key;
+
+ fprintf(lock_output, "# output: ");
+ list_for_each_entry(key, &lock_keys, list)
+ fprintf(lock_output, "%s%s ", key->header, sep);
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_TASK:
+ fprintf(lock_output, "%s%s %s\n", "pid", sep,
+ show_lock_owner ? "owner" : "comm");
+ break;
+ case LOCK_AGGR_CALLER:
+ fprintf(lock_output, "%s%s %s", "type", sep, "caller");
+ if (verbose > 0)
+ fprintf(lock_output, "%s %s", sep, "stacktrace");
+ fprintf(lock_output, "\n");
+ break;
+ case LOCK_AGGR_ADDR:
+ fprintf(lock_output, "%s%s %s%s %s\n", "address", sep, "symbol", sep, "type");
+ break;
+ default:
+ break;
+ }
+}
+
+static void print_header(void)
+{
+ if (!quiet) {
+ if (symbol_conf.field_sep)
+ print_header_csv(symbol_conf.field_sep);
+ else
+ print_header_stdio();
+ }
+}
+
+static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat *st)
+{
+ struct lock_key *key;
+ struct thread *t;
+ int pid;
+
+ list_for_each_entry(key, &lock_keys, list) {
+ key->print(key, st);
+ fprintf(lock_output, " ");
+ }
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_CALLER:
+ fprintf(lock_output, " %10s %s\n", get_type_str(st->flags), st->name);
+ break;
+ case LOCK_AGGR_TASK:
+ pid = st->addr;
+ t = perf_session__findnew(session, pid);
+ fprintf(lock_output, " %10d %s\n",
+ pid, pid == -1 ? "Unknown" : thread__comm_str(t));
+ break;
+ case LOCK_AGGR_ADDR:
+ fprintf(lock_output, " %016llx %s (%s)\n", (unsigned long long)st->addr,
+ st->name, get_type_name(st->flags));
+ break;
+ default:
+ break;
+ }
+
+ if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
+ struct map *kmap;
+ struct symbol *sym;
+ char buf[128];
+ u64 ip;
+
+ for (int i = 0; i < max_stack_depth; i++) {
+ if (!st->callstack || !st->callstack[i])
+ break;
+
+ ip = st->callstack[i];
+ sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
+ get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
+ fprintf(lock_output, "\t\t\t%#lx %s\n", (unsigned long)ip, buf);
+ }
+ }
+}
+
+static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *st,
+ const char *sep)
+{
+ struct lock_key *key;
+ struct thread *t;
+ int pid;
+
+ list_for_each_entry(key, &lock_keys, list) {
+ key->print(key, st);
+ fprintf(lock_output, "%s ", sep);
+ }
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_CALLER:
+ fprintf(lock_output, "%s%s %s", get_type_str(st->flags), sep, st->name);
+ if (verbose <= 0)
+ fprintf(lock_output, "\n");
+ break;
+ case LOCK_AGGR_TASK:
+ pid = st->addr;
+ t = perf_session__findnew(session, pid);
+ fprintf(lock_output, "%d%s %s\n", pid, sep,
+ pid == -1 ? "Unknown" : thread__comm_str(t));
+ break;
+ case LOCK_AGGR_ADDR:
+ fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
+ st->name, sep, get_type_name(st->flags));
+ break;
+ default:
+ break;
+ }
+
+ if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
+ struct map *kmap;
+ struct symbol *sym;
+ char buf[128];
+ u64 ip;
+
+ for (int i = 0; i < max_stack_depth; i++) {
+ if (!st->callstack || !st->callstack[i])
+ break;
+
+ ip = st->callstack[i];
+ sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
+ get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
+ fprintf(lock_output, "%s %#lx %s", i ? ":" : sep, (unsigned long) ip, buf);
+ }
+ fprintf(lock_output, "\n");
+ }
+}
+
+static void print_lock_stat(struct lock_contention *con, struct lock_stat *st)
+{
+ if (symbol_conf.field_sep)
+ print_lock_stat_csv(con, st, symbol_conf.field_sep);
+ else
+ print_lock_stat_stdio(con, st);
+}
+
+static void print_footer_stdio(int total, int bad, struct lock_contention_fails *fails)
{
/* Output for debug, this have to be removed */
int broken = fails->task + fails->stack + fails->time + fails->data;
+ if (!use_bpf)
+ print_bad_events(bad, total);
+
if (quiet || total == 0 || (broken == 0 && verbose <= 0))
return;
total += broken;
- pr_info("\n=== output for debug ===\n\n");
- pr_info("bad: %d, total: %d\n", broken, total);
- pr_info("bad rate: %.2f %%\n", (double)broken / (double)total * 100);
+ fprintf(lock_output, "\n=== output for debug ===\n\n");
+ fprintf(lock_output, "bad: %d, total: %d\n", broken, total);
+ fprintf(lock_output, "bad rate: %.2f %%\n", 100.0 * broken / total);
- pr_info("histogram of failure reasons\n");
- pr_info(" %10s: %d\n", "task", fails->task);
- pr_info(" %10s: %d\n", "stack", fails->stack);
- pr_info(" %10s: %d\n", "time", fails->time);
- pr_info(" %10s: %d\n", "data", fails->data);
+ fprintf(lock_output, "histogram of failure reasons\n");
+ fprintf(lock_output, " %10s: %d\n", "task", fails->task);
+ fprintf(lock_output, " %10s: %d\n", "stack", fails->stack);
+ fprintf(lock_output, " %10s: %d\n", "time", fails->time);
+ fprintf(lock_output, " %10s: %d\n", "data", fails->data);
+}
+
+static void print_footer_csv(int total, int bad, struct lock_contention_fails *fails,
+ const char *sep)
+{
+ /* Output for debug, this have to be removed */
+ if (use_bpf)
+ bad = fails->task + fails->stack + fails->time + fails->data;
+
+ if (quiet || total == 0 || (bad == 0 && verbose <= 0))
+ return;
+
+ total += bad;
+ fprintf(lock_output, "# debug: total=%d%s bad=%d", total, sep, bad);
+
+ if (use_bpf) {
+ fprintf(lock_output, "%s bad_%s=%d", sep, "task", fails->task);
+ fprintf(lock_output, "%s bad_%s=%d", sep, "stack", fails->stack);
+ fprintf(lock_output, "%s bad_%s=%d", sep, "time", fails->time);
+ fprintf(lock_output, "%s bad_%s=%d", sep, "data", fails->data);
+ } else {
+ int i;
+ const char *name[4] = { "acquire", "acquired", "contended", "release" };
+
+ for (i = 0; i < BROKEN_MAX; i++)
+ fprintf(lock_output, "%s bad_%s=%d", sep, name[i], bad_hist[i]);
+ }
+ fprintf(lock_output, "\n");
+}
+
+static void print_footer(int total, int bad, struct lock_contention_fails *fails)
+{
+ if (symbol_conf.field_sep)
+ print_footer_csv(total, bad, fails, symbol_conf.field_sep);
+ else
+ print_footer_stdio(total, bad, fails);
}
static void print_contention_result(struct lock_contention *con)
{
struct lock_stat *st;
- struct lock_key *key;
int bad, total, printed;
- if (!quiet) {
- list_for_each_entry(key, &lock_keys, list)
- pr_info("%*s ", key->len, key->header);
-
- switch (aggr_mode) {
- case LOCK_AGGR_TASK:
- pr_info(" %10s %s\n\n", "pid",
- show_lock_owner ? "owner" : "comm");
- break;
- case LOCK_AGGR_CALLER:
- pr_info(" %10s %s\n\n", "type", "caller");
- break;
- case LOCK_AGGR_ADDR:
- pr_info(" %16s %s\n\n", "address", "symbol");
- break;
- default:
- break;
- }
- }
+ if (!quiet)
+ print_header();
bad = total = printed = 0;
while ((st = pop_from_result())) {
- struct thread *t;
- int pid;
-
total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;
@@ -1685,45 +1878,7 @@ static void print_contention_result(struct lock_contention *con)
if (!st->wait_time_total)
continue;
- list_for_each_entry(key, &lock_keys, list) {
- key->print(key, st);
- pr_info(" ");
- }
-
- switch (aggr_mode) {
- case LOCK_AGGR_CALLER:
- pr_info(" %10s %s\n", get_type_str(st->flags), st->name);
- break;
- case LOCK_AGGR_TASK:
- pid = st->addr;
- t = perf_session__findnew(session, pid);
- pr_info(" %10d %s\n",
- pid, pid == -1 ? "Unknown" : thread__comm_str(t));
- break;
- case LOCK_AGGR_ADDR:
- pr_info(" %016llx %s (%s)\n", (unsigned long long)st->addr,
- st->name, get_type_name(st->flags));
- break;
- default:
- break;
- }
-
- if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
- struct map *kmap;
- struct symbol *sym;
- char buf[128];
- u64 ip;
-
- for (int i = 0; i < max_stack_depth; i++) {
- if (!st->callstack || !st->callstack[i])
- break;
-
- ip = st->callstack[i];
- sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
- get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
- pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf);
- }
- }
+ print_lock_stat(con, st);
if (++printed >= print_nr_entries)
break;
@@ -1740,10 +1895,7 @@ static void print_contention_result(struct lock_contention *con)
/* some entries are collected but hidden by the callstack filter */
total += con->nr_filtered;
- if (use_bpf)
- print_bpf_events(total, &con->fails);
- else
- print_bad_events(bad, total);
+ print_footer(total, bad, &con->fails);
}
static bool force;
@@ -1773,8 +1925,6 @@ static int __cmd_report(bool display_info)
return PTR_ERR(session);
}
- /* for lock function check */
- symbol_conf.sort_by_name = true;
symbol_conf.allow_aliases = true;
symbol__init(&session->header.env);
@@ -1849,6 +1999,16 @@ static int check_lock_contention_options(const struct option *options,
return -1;
}
+ if (symbol_conf.field_sep) {
+ if (strstr(symbol_conf.field_sep, ":") || /* part of type flags */
+ strstr(symbol_conf.field_sep, "+") || /* part of caller offset */
+ strstr(symbol_conf.field_sep, ".")) { /* can be in a symbol name */
+ pr_err("Cannot use the separator that is already used\n");
+ parse_options_usage(usage, options, "x", 1);
+ return -1;
+ }
+ }
+
if (show_lock_owner)
show_thread_stats = true;
@@ -1903,8 +2063,6 @@ static int __cmd_contention(int argc, const char **argv)
if (con.aggr_mode == LOCK_AGGR_CALLER)
con.save_callstack = true;
- /* for lock function check */
- symbol_conf.sort_by_name = true;
symbol_conf.allow_aliases = true;
symbol__init(&session->header.env);
@@ -1966,6 +2124,15 @@ static int __cmd_contention(int argc, const char **argv)
if (select_key(true))
goto out_delete;
+ if (symbol_conf.field_sep) {
+ int i;
+ struct lock_key *keys = contention_keys;
+
+ /* do not align output in CSV format */
+ for (i = 0; keys[i].name; i++)
+ keys[i].len = 0;
+ }
+
if (use_bpf) {
lock_contention_start();
if (argc)
@@ -2264,10 +2431,29 @@ static int parse_call_stack(const struct option *opt __maybe_unused, const char
return ret;
}
+static int parse_output(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ const char **name = (const char **)opt->value;
+
+ if (str == NULL)
+ return -1;
+
+ lock_output = fopen(str, "w");
+ if (lock_output == NULL) {
+ pr_err("Cannot open %s\n", str);
+ return -1;
+ }
+
+ *name = str;
+ return 0;
+}
+
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_CALLBACK(0, "output", &output_name, "file", "output file name", parse_output),
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -2334,6 +2520,8 @@ int cmd_lock(int argc, const char **argv)
OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
"Filter specific function in the callstack", parse_call_stack),
OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
+ OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep, "separator",
+ "print result in CSV format with custom separator"),
OPT_PARENT(lock_options)
};
@@ -2365,6 +2553,7 @@ int cmd_lock(int argc, const char **argv)
for (i = 0; i < LOCKHASH_SIZE; i++)
INIT_HLIST_HEAD(lockhash_table + i);
+ lock_output = stderr;
argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index a31a23af5547..dcedfe00f04d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1676,7 +1676,6 @@ repeat:
* See symbol__browser_index.
*/
symbol_conf.priv_size += sizeof(u32);
- symbol_conf.sort_by_name = true;
}
annotation_config__init(&report.annotation_opts);
}
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index 85fb975b6f56..daf9458f0b77 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -92,28 +92,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 0e2e446ced7a..fbb111e40829 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1830,28 +1830,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
index 04f08e4d2402..095904c77001 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
@@ -245,27 +253,34 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_ANY]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
@@ -297,6 +312,24 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 31a1663d57f8..66d686cc933e 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -361,10 +361,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -488,11 +488,11 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -506,6 +506,15 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
index 725780fb3990..1a342dff1503 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
@@ -6606,7 +6606,7 @@
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
"PerPkg": "1",
- "PublicDescription": "ad and bl messages were actually slotted into the same flit in paralle",
+ "PublicDescription": "ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
"Unit": "M3UPI"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index f761856d738e..d82d2cca6f0a 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -2735,7 +2735,7 @@
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts?",
+ "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "iMC"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 79b9f02a4b63..d26c4efe35f0 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -155,18 +155,18 @@
"UMask": "0x21"
},
{
- "BriefDescription": "All requests that miss L2 cache. This event is not supported on ICL and ICX products, only supported on RKL products.",
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
- "PublicDescription": "Counts all requests that miss L2 cache. This event is not supported on ICL and ICX products, only supported on RKL products.",
"SampleAfterValue": "200003",
"UMask": "0x3f"
},
{
- "BriefDescription": "All L2 requests. This event is not supported on ICL and ICX products, only supported on RKL products.",
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
- "PublicDescription": "Counts all L2 requests. This event is not supported on ICL and ICX products, only supported on RKL products.",
"SampleAfterValue": "200003",
"UMask": "0xff"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index 3e3d2b002170..2b539a08d2bf 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CounterMask": "1",
"EdgeDetect": "1",
@@ -213,10 +221,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
"SampleAfterValue": "500009",
"UMask": "0x4"
},
@@ -237,10 +245,26 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index cc4edf855064..8fcc05c4e0a1 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -1516,28 +1516,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index 154fee4b60fb..375b78044f14 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -318,10 +318,10 @@
"UMask": "0x40"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "500009",
"UMask": "0x1"
},
@@ -556,7 +556,7 @@
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
- "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
"UMask": "0x8"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
index 71498044f1cb..f6edc4222f42 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CounterMask": "1",
"EdgeDetect": "1",
@@ -213,10 +221,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
"SampleAfterValue": "500009",
"UMask": "0x4"
},
@@ -237,10 +245,26 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 6f25b5b7aaf6..9bb7e3f20f7f 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -1812,28 +1812,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index 442a4c7539dd..176e5ef2a24a 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -318,10 +318,10 @@
"UMask": "0x40"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "500009",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
index 8ac5907762e1..f87ea3f66d1b 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
@@ -9311,7 +9311,7 @@
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
"PerPkg": "1",
- "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in paralle",
+ "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
"Unit": "M3UPI"
},
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 6543a68d4a17..6650100830c4 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -5,7 +5,7 @@ GenuineIntel-6-(1C|26|27|35|36),v4,bonnell,core
GenuineIntel-6-(3D|47),v28,broadwell,core
GenuineIntel-6-56,v10,broadwellde,core
GenuineIntel-6-4F,v21,broadwellx,core
-GenuineIntel-6-55-[56789ABCDEF],v1.18,cascadelakex,core
+GenuineIntel-6-55-[56789ABCDEF],v1.19,cascadelakex,core
GenuineIntel-6-9[6C],v1.04,elkhartlake,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
@@ -13,23 +13,24 @@ GenuineIntel-6-B6,v1.00,grandridge,core
GenuineIntel-6-A[DE],v1.01,graniterapids,core
GenuineIntel-6-(3C|45|46),v33,haswell,core
GenuineIntel-6-3F,v27,haswellx,core
-GenuineIntel-6-(7D|7E|A7),v1.18,icelake,core
-GenuineIntel-6-6[AC],v1.20,icelakex,core
+GenuineIntel-6-7[DE],v1.19,icelake,core
+GenuineIntel-6-6[AC],v1.21,icelakex,core
GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v23,ivytown,core
GenuineIntel-6-2D,v23,jaketown,core
GenuineIntel-6-(57|85),v10,knightslanding,core
-GenuineIntel-6-A[AC],v1.01,meteorlake,core
+GenuineIntel-6-A[AC],v1.03,meteorlake,core
GenuineIntel-6-1[AEF],v3,nehalemep,core
GenuineIntel-6-2E,v3,nehalemex,core
+GenuineIntel-6-A7,v1.01,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-(8F|CF),v1.13,sapphirerapids,core
+GenuineIntel-6-(8F|CF),v1.14,sapphirerapids,core
GenuineIntel-6-AF,v1.00,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
-GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v56,skylake,core
-GenuineIntel-6-55-[01234],v1.30,skylakex,core
+GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core
+GenuineIntel-6-55-[01234],v1.31,skylakex,core
GenuineIntel-6-86,v1.21,snowridgex,core
-GenuineIntel-6-8[CD],v1.12,tigerlake,core
+GenuineIntel-6-8[CD],v1.13,tigerlake,core
GenuineIntel-6-2C,v4,westmereep-dp,core
GenuineIntel-6-25,v3,westmereep-sp,core
GenuineIntel-6-2F,v3,westmereex,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index bf24d3f25a3d..e1ae7c92f38e 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -1,5 +1,115 @@
[
{
+ "BriefDescription": "L1D.HWPF_MISS",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xdf",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "L2 code requests",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_CODE_RD",
@@ -18,6 +128,139 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf0",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HIT",
+ "PublicDescription": "Counts all requests that hit L2 cache. [This event is alias to L2_REQUEST.HIT]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xdf",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0x23",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
@@ -54,6 +297,72 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an instruction cache or TLB miss.",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.L2_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x68",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT",
+ "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x68",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired load instructions.",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -76,6 +385,352 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that hit the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that hit the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1c",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ALL",
+ "SampleAfterValue": "20003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.RSV",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired.",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -99,6 +754,18 @@
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Data_LA": "1",
"EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
@@ -123,6 +790,18 @@
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
"Data_LA": "1",
"EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
@@ -192,6 +871,46 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x43",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split store uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x42",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
"Data_LA": "1",
"EventCode": "0xd0",
@@ -200,5 +919,97 @@
"SampleAfterValue": "1000003",
"UMask": "0x6",
"Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "EventCode": "0x2c",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
new file mode 100644
index 000000000000..616489f0974a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/floating-point.json
@@ -0,0 +1,143 @@
+[
+ {
+ "BriefDescription": "This event counts the cycles the floating point divider is busy.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index 66e5609699ea..0f064518d1c0 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -1,5 +1,260 @@
[
{
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired ANT branches",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PEBS": "1",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted Retired ANT branches",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MISP_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PEBS": "1",
+ "PublicDescription": "ANT retired branches that got just mispredicted",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
@@ -16,6 +271,131 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.HIT",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
"EventCode": "0x9c",
"EventName": "IDQ_BUBBLES.CORE",
@@ -23,5 +403,35 @@
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
index 20c2efe70eeb..67e949b4c789 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -1,5 +1,101 @@
[
{
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ANY_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_BOUND_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xf4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.OTHER_AT_RET",
+ "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc0",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.PGWALK_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xa0",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ST_ADDR_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x84",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "PublicDescription": "Execution stalls while L2 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding.",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "PublicDescription": "Execution stalls while L3 cache miss demand cacheable load request is outstanding (will not count for uncacheable demand requests e.g. bus lock).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"Data_LA": "1",
"EventCode": "0xcd",
@@ -115,43 +211,29 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBFC00001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
+ "BriefDescription": "Counts misaligned loads that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBFC00001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBFC00002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
+ "BriefDescription": "Counts misaligned stores that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBFC00002",
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "UMask": "0x1",
+ "UMask": "0x10",
"Unit": "cpu_core"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
index 14e648bf11c5..2ec57f487525 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -1,41 +1,50 @@
[
{
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
+ "MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1",
- "Unit": "cpu_atom"
+ "Unit": "cpu_core"
},
{
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
"SampleAfterValue": "100003",
- "UMask": "0x1",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
}
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index 639789478073..eeaa7a97f71c 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -1,5 +1,33 @@
[
{
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1b",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
@@ -18,6 +46,104 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xbf",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
@@ -36,6 +162,174 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x44",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x51",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x42",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xc0",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x60",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET_COST",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x48",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
"EventName": "CPU_CLK_UNHALTED.CORE",
"SampleAfterValue": "2000003",
@@ -50,6 +344,33 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
@@ -65,6 +386,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Reference cycles when the core is not in halt state.",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
@@ -104,6 +434,133 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
@@ -138,12 +595,240 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CounterMask": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
- "UMask": "0x82",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SLOW",
+ "SampleAfterValue": "20003",
+ "UMask": "0x6f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
"Unit": "cpu_core"
},
{
@@ -156,6 +841,32 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
"EventName": "TOPDOWN.SLOTS",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
@@ -181,6 +892,30 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
@@ -188,6 +923,30 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to mrbl stall. A 'marble' refers to a physical register file entry, also known as the physical destination (PDST).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REGISTER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
@@ -195,6 +954,79 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8d",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x72",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to TOPDOWN_FE_BOUND.ITLB_MISS]",
+ "Deprecated": "1",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss [This event is alias to TOPDOWN_FE_BOUND.ITLB]",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
"EventCode": "0x72",
"EventName": "TOPDOWN_RETIRING.ALL",
@@ -203,6 +1035,269 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Number of non dec-by-all uops decoded by decoder",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.SLOTS",
@@ -210,5 +1305,25 @@
"SampleAfterValue": "2000003",
"UMask": "0x2",
"Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json
new file mode 100644
index 000000000000..188843be4caf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-cache.json
@@ -0,0 +1,18 @@
+[
+ {
+ "BriefDescription": "Number of all entries allocated. Includes also retries.",
+ "EventCode": "0x35",
+ "EventName": "UNC_HAC_CBO_TOR_ALLOCATION.ALL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HAC_CBO"
+ },
+ {
+ "BriefDescription": "Asserted on coherent DRD + DRdPref allocations into the queue. Cacheable only",
+ "EventCode": "0x35",
+ "EventName": "UNC_HAC_CBO_TOR_ALLOCATION.DRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HAC_CBO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json
new file mode 100644
index 000000000000..08b5c7574cfc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-interconnect.json
@@ -0,0 +1,42 @@
+[
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "EventCode": "0x81",
+ "EventName": "UNC_HAC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HAC_ARB"
+ },
+ {
+ "BriefDescription": "Number of all CMI transactions",
+ "EventCode": "0x8a",
+ "EventName": "UNC_HAC_ARB_TRANSACTIONS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HAC_ARB"
+ },
+ {
+ "BriefDescription": "Number of all CMI reads",
+ "EventCode": "0x8a",
+ "EventName": "UNC_HAC_ARB_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HAC_ARB"
+ },
+ {
+ "BriefDescription": "Number of all CMI writes not including Mflush",
+ "EventCode": "0x8a",
+ "EventName": "UNC_HAC_ARB_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HAC_ARB"
+ },
+ {
+ "BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x81",
+ "EventName": "UNC_HAC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HAC_ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
new file mode 100644
index 000000000000..c9d248d1042e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
@@ -0,0 +1,126 @@
+[
+ {
+ "BriefDescription": "Counts every CAS read command sent from the Memory Controller 0 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every CAS read command sent from the Memory Controller 0 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every read and write request entering the Memory Controller 0.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every read and write request entering the Memory Controller 0 (sum of all channels). All requests are counted as one, whether they are 32B or 64B Read/Write or partial/full line writes. Some write requests to the same address may merge to a single write command to DRAM. Therefore, the total request count may be higher than total DRAM BW.",
+ "UMask": "0x10",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every CAS write command sent from the Memory Controller 0 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every CAS write command sent from the Memory Controller 0 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every CAS read command sent from the Memory Controller 1 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every CAS read command sent from the Memory Controller 1 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every read and write request entering the Memory Controller 1.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every read and write request entering the Memory Controller 1 (sum of all channels). All requests are counted as one, whether they are 32B or 64B Read/Write or partial/full line writes. Some write requests to the same address may merge to a single write command to DRAM. Therefore, the total request count may be higher than total DRAM BW.",
+ "UMask": "0x10",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every CAS write command sent from the Memory Controller 1 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every CAS write command sent from the Memory Controller 1 to DRAM (sum of all channels). Each CAS commands can be for 32B or 64B of data.",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "ACT command for a read request sent to DRAM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_ACT_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command sent to DRAM",
+ "EventCode": "0x26",
+ "EventName": "UNC_M_ACT_COUNT_TOTAL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command for a write request sent to DRAM",
+ "EventCode": "0x25",
+ "EventName": "UNC_M_ACT_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS command sent to DRAM",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_CAS_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS command sent to DRAM",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_CAS_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "EventCode": "0x28",
+ "EventName": "UNC_M_PRE_COUNT_IDLE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "EventCode": "0x27",
+ "EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of bytes read from DRAM, in 32B chunks. Counter increments by 1 after receiving 32B chunk data.",
+ "EventCode": "0x3A",
+ "EventName": "UNC_M_RD_DATA",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Total number of read and write byte transfers to/from DRAM, in 32B chunks. Counter increments by 1 after sending or receiving 32B chunk data.",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M_TOTAL_DATA",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of bytes written to DRAM, in 32B chunks. Counter increments by 1 after sending 32B chunk data.",
+ "EventCode": "0x3B",
+ "EventName": "UNC_M_WR_DATA",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
index 556e4292fcc8..056c2a885a32 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
@@ -1,5 +1,40 @@
[
{
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x12",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
@@ -9,6 +44,95 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for Loads (demand or SW prefetch) in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all pages sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x13",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
@@ -18,6 +142,86 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
@@ -34,5 +238,58 @@
"SampleAfterValue": "100003",
"UMask": "0xe",
"Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.DTLB_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x90",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/cache.json b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
new file mode 100644
index 000000000000..b0f54a6650fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
@@ -0,0 +1,894 @@
+[
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "All requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts all requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All L2 requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all L2 requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or Bus Lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C2380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts memory transactions sent to the uncore.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Cycles where at least 1 outstanding data read request is pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Cycles where at least 1 outstanding Demand RFO request is pending. RFOs are initiated by a core as part of a data store operation. Demand RFO requests include RFOs, locks, and ItoM transactions. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
+ "PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json b/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json
new file mode 100644
index 000000000000..85c26c889088
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/floating-point.json
@@ -0,0 +1,105 @@
+[
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json b/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json
new file mode 100644
index 000000000000..2b539a08d2bf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/frontend.json
@@ -0,0 +1,377 @@
+[
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
new file mode 100644
index 000000000000..e8d2ec1c029b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
@@ -0,0 +1,394 @@
+[
+ {
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times HLE abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution successfully committed",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times HLE commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution started.",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC08000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json
new file mode 100644
index 000000000000..a151ba9cccb0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/metricgroups.json
@@ -0,0 +1,113 @@
+{
+ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BigFoot": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSBmiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DataSharing": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Fed": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpScalar": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Frontend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "HPC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IcMiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryTLB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_BW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_Lat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MicroSeq": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "OS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Offcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PGO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Server": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Snoop": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SoC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL1": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL2": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL3mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TopdownL1": "Metrics for top-down breakdown at level 1",
+ "TopdownL2": "Metrics for top-down breakdown at level 2",
+ "TopdownL3": "Metrics for top-down breakdown at level 3",
+ "TopdownL4": "Metrics for top-down breakdown at level 4",
+ "TopdownL5": "Metrics for top-down breakdown at level 5",
+ "TopdownL6": "Metrics for top-down breakdown at level 6",
+ "tma_L1_group": "Metrics for top-down breakdown at level 1",
+ "tma_L2_group": "Metrics for top-down breakdown at level 2",
+ "tma_L3_group": "Metrics for top-down breakdown at level 3",
+ "tma_L4_group": "Metrics for top-down breakdown at level 4",
+ "tma_L5_group": "Metrics for top-down breakdown at level 5",
+ "tma_L6_group": "Metrics for top-down breakdown at level 6",
+ "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category",
+ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
+ "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
+ "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
+ "tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
+ "tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
+ "tma_fetch_bandwidth_group": "Metrics contributing to tma_fetch_bandwidth category",
+ "tma_fetch_latency_group": "Metrics contributing to tma_fetch_latency category",
+ "tma_fp_arith_group": "Metrics contributing to tma_fp_arith category",
+ "tma_fp_vector_group": "Metrics contributing to tma_fp_vector category",
+ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
+ "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category",
+ "tma_issue2P": "Metrics related by the issue $issue2P",
+ "tma_issueBC": "Metrics related by the issue $issueBC",
+ "tma_issueBM": "Metrics related by the issue $issueBM",
+ "tma_issueBW": "Metrics related by the issue $issueBW",
+ "tma_issueD0": "Metrics related by the issue $issueD0",
+ "tma_issueFB": "Metrics related by the issue $issueFB",
+ "tma_issueFL": "Metrics related by the issue $issueFL",
+ "tma_issueL1": "Metrics related by the issue $issueL1",
+ "tma_issueLat": "Metrics related by the issue $issueLat",
+ "tma_issueMC": "Metrics related by the issue $issueMC",
+ "tma_issueMS": "Metrics related by the issue $issueMS",
+ "tma_issueMV": "Metrics related by the issue $issueMV",
+ "tma_issueRFO": "Metrics related by the issue $issueRFO",
+ "tma_issueSL": "Metrics related by the issue $issueSL",
+ "tma_issueSO": "Metrics related by the issue $issueSO",
+ "tma_issueSmSt": "Metrics related by the issue $issueSmSt",
+ "tma_issueSpSt": "Metrics related by the issue $issueSpSt",
+ "tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
+ "tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
+ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category",
+ "tma_light_operations_group": "Metrics contributing to tma_light_operations category",
+ "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category",
+ "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category",
+ "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category",
+ "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category",
+ "tma_mite_group": "Metrics contributing to tma_mite category",
+ "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category",
+ "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category",
+ "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category",
+ "tma_retiring_group": "Metrics contributing to tma_retiring category",
+ "tma_serializing_operation_group": "Metrics contributing to tma_serializing_operation category",
+ "tma_store_bound_group": "Metrics contributing to tma_store_bound category",
+ "tma_store_op_utilization_group": "Metrics contributing to tma_store_op_utilization category"
+}
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/other.json b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
new file mode 100644
index 000000000000..cfb590632918
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
@@ -0,0 +1,242 @@
+[
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json
new file mode 100644
index 000000000000..375b78044f14
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/pipeline.json
@@ -0,0 +1,801 @@
+[
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired instructions.",
+ "CounterMask": "1",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "EventCode": "0x56",
+ "EventName": "UOPS_DECODED.DEC0",
+ "PublicDescription": "Uops exclusively fetched by decoder 0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 6",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
new file mode 100644
index 000000000000..1bb9cededa56
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -0,0 +1,1571 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * ASSISTS.ANY / tma_info_thread_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_thread_slots",
+ "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
+ "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_branch_instructions",
+ "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(29 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "23.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "32.5 * tma_info_system_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "(5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / tma_info_thread_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_thread_slots",
+ "MetricGroup": "Default;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_thread_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_bad_spec_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_indirect",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_ret",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmispredict",
+ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_botlnk_l0_core_bound_likely",
+ "MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_botlnk_l2_ic_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_bottleneck_big_code",
+ "MetricThreshold": "tma_info_bottleneck_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_bottleneck_branching_overhead"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_thread_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_bottleneck_branching_overhead",
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_bottleneck_big_code"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_bottleneck_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_bottleneck_memory_bandwidth",
+ "MetricThreshold": "tma_info_bottleneck_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_bottleneck_memory_data_tlbs",
+ "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_bottleneck_memory_latency",
+ "MetricThreshold": "tma_info_bottleneck_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_bottleneck_mispredictions",
+ "MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_callret"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_tk"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_jump"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_branches_cond_nt + tma_info_branches_cond_tk + tma_info_branches_callret + tma_info_branches_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_other_branches"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_core_coreipc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_core_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_core_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_core_ilp"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_frontend_dsb_coverage",
+ "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 5 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_frontend_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_frontend_fetch_upc"
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_frontend_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_frontend_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_inst_mix_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_frontend_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "LSD.UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "tma_info_frontend_lsd_coverage"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_bptkbranch"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_inst_mix_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_iparith",
+ "MetricThreshold": "tma_info_inst_mix_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx128",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx256",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx512",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_inst_mix_ipbranch",
+ "MetricThreshold": "tma_info_inst_mix_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_ipcall",
+ "MetricThreshold": "tma_info_inst_mix_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_ipflop",
+ "MetricThreshold": "tma_info_inst_mix_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipload",
+ "MetricThreshold": "tma_info_inst_mix_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipstore",
+ "MetricThreshold": "tma_info_inst_mix_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_inst_mix_ipswpf",
+ "MetricThreshold": "tma_info_inst_mix_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 11",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_core_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_fb_hpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l1mpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l2mpki_load"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_memory_l3mpki"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_memory_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_memory_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_oro_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_oro_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_oro_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_oro_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_core_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_thread_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_core_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_thread_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_core_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_thread_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_core_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_thread_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * tma_info_core_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_page_walks_utilization",
+ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_pipeline_execute"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_pipeline_retire"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_system_average_frequency"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_system_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_system_ipfarbranch",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_utilization",
+ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_system_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_system_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_system_mem_request_latency"
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_power_license1_utilization",
+ "MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_power_license2_utilization",
+ "MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_system_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_thread_clks"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_thread_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_thread_cpi"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_thread_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_thread_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_thread_ipc"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_thread_uoppi",
+ "MetricThreshold": "tma_info_thread_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_thread_uptb",
+ "MetricThreshold": "tma_info_thread_uptb < 7.5"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_thread_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "9 * tma_info_system_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3 / (2 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
+ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_core_clks / 2",
+ "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_lsd",
+ "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
+ "MetricName": "tma_mite_4wide",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_thread_ipc / 5 > 0.35))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_5 / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_thread_slots",
+ "MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL1;Default",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json
new file mode 100644
index 000000000000..8027590f1776
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-interconnect.json
@@ -0,0 +1,74 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json
new file mode 100644
index 000000000000..c6596ba09195
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "UNC_CLOCK.SOCKET",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json
new file mode 100644
index 000000000000..b28f62ce1f39
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/virtual-memory.json
@@ -0,0 +1,165 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
index 72e9bdfa9f80..6dcf3b763af4 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -706,7 +706,7 @@
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
- "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
"UMask": "0x8"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index c732982f70b5..c207c851a9f9 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -1938,28 +1938,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
index b91cebf81f50..3fa660694bc7 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -3157,6 +3157,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008101",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts; DRd hits from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
@@ -3372,6 +3389,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts for DRd misses from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
@@ -3398,6 +3432,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "DRds issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8168201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
@@ -3443,6 +3494,15 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8268201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
@@ -3452,6 +3512,15 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8a68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
@@ -3470,6 +3539,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8968201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
@@ -3604,6 +3690,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccd68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
@@ -3613,6 +3716,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8868201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
@@ -3702,6 +3822,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8068201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts RFO misses from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
@@ -3720,6 +3857,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccc68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
@@ -4428,6 +4582,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that hit the LLC.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008101",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy; DRd hits from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
@@ -4645,6 +4816,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for All requests issued from IA cores to CXL accelerator memory regions that miss the LLC.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c0018201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c0008201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy for DRd misses from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
@@ -4673,6 +4861,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 memory expander card.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8178201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8168201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
@@ -4718,6 +4923,15 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8268201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
@@ -4727,6 +4941,15 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8a68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
@@ -4745,6 +4968,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for L2 data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8978201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8968201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
@@ -4879,6 +5119,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for LLC data prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccd78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccd68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
@@ -4888,6 +5145,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8878201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8868201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
@@ -4977,6 +5251,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for RFOs issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8078201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10c8068201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy; RFO misses from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
@@ -4995,6 +5286,23 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "TOR Occupancy for LLC RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10ccc78201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_CXL_ACC_LOCAL",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "UMask": "0x10ccc68201",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
index 6800de05c836..09d840c7da4c 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
@@ -3326,7 +3326,7 @@
"EventCode": "0x50",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
"PerPkg": "1",
- "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in paralle",
+ "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
"Unit": "M3UPI"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index 04f08e4d2402..095904c77001 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
@@ -245,27 +253,34 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_ANY]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
@@ -297,6 +312,24 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index cc800fb8180a..cd3e737bf4a1 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -352,10 +352,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -479,11 +479,11 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -497,6 +497,15 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 2ed88842b880..94cb38540b5a 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1466,28 +1466,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 04f08e4d2402..095904c77001 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
"EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.COUNT",
@@ -245,27 +253,34 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.DSB_CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.DSB_CYCLES_ANY]",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.DSB_CYCLES_ANY]",
"SampleAfterValue": "2000003",
"UMask": "0x18"
},
@@ -297,6 +312,24 @@
"UMask": "0x8"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_ANY_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ. [This event is alias to IDQ.ALL_DSB_CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
"EventName": "IDQ.DSB_UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 31a1663d57f8..66d686cc933e 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -361,10 +361,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -488,11 +488,11 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_OK]",
"CounterMask": "4",
"EventCode": "0xA8",
"EventName": "LSD.CYCLES_4_UOPS",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_OK]",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -506,6 +506,15 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder. [This event is alias to LSD.CYCLES_4_UOPS]",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector). [This event is alias to LSD.CYCLES_4_UOPS]",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
"EventName": "LSD.UOPS",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 507d39efacc8..fa4209809c57 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1774,28 +1774,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
index 26a5a20bf37a..3eece8a728b5 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
@@ -6504,7 +6504,7 @@
"EventCode": "0x52",
"EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
"PerPkg": "1",
- "PublicDescription": "ad and bl messages were actually slotted into the same flit in paralle",
+ "PublicDescription": "ad and bl messages were actually slotted into the same flit in parallel",
"UMask": "0x8",
"Unit": "M3UPI"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
index 6f8ff2262ce7..7a40aa0f1018 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
@@ -1952,7 +1952,7 @@
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts?",
+ "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
"Unit": "iMC"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
index 23b8528590b3..d7b972452c0e 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
@@ -8,6 +8,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to ILD_STALL.LCP]",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to ILD_STALL.LCP]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CounterMask": "1",
"EdgeDetect": "1",
@@ -213,10 +221,10 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_DATA.STALLS]",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_DATA.STALLS]",
"SampleAfterValue": "500009",
"UMask": "0x4"
},
@@ -237,10 +245,26 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
"EventCode": "0x83",
"EventName": "ICACHE_64B.IFTAG_STALL",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_TAG.STALLS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity. [This event is alias to ICACHE_16B.IFDATA_STALL]",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss. [This event is alias to ICACHE_64B.IFTAG_STALL]",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
index 020801cbd7e3..541bf1dd1679 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -335,10 +335,10 @@
"UMask": "0x80"
},
{
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction. [This event is alias to DECODE.LCP]",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk. [This event is alias to DECODE.LCP]",
"SampleAfterValue": "500009",
"UMask": "0x1"
},
@@ -564,7 +564,7 @@
"BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
- "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the speculative path as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
"UMask": "0x8"
},
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index 83346911aa63..c7c2d6ab1a93 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -1530,28 +1530,28 @@
},
{
"BriefDescription": "Percentage of cycles in aborted transactions.",
- "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricExpr": "(max(cycles\\-t - cycles\\-ct, 0) / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_aborted_cycles",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricExpr": "(cycles\\-t / el\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_elision",
"ScaleUnit": "1cycles / elision"
},
{
"BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
- "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricExpr": "(cycles\\-t / tx\\-start if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_cycles_per_transaction",
"ScaleUnit": "1cycles / transaction"
},
{
"BriefDescription": "Percentage of cycles within a transaction region.",
- "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricExpr": "(cycles\\-t / cycles if has_event(cycles\\-t) else 0)",
"MetricGroup": "transaction",
"MetricName": "tsx_transactional_cycles",
"ScaleUnit": "100%"
diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py
index af58b74d1644..85a3545f5b6a 100644
--- a/tools/perf/pmu-events/metric.py
+++ b/tools/perf/pmu-events/metric.py
@@ -408,6 +408,12 @@ def source_count(event: Event) -> Function:
return Function('source_count', event)
+def has_event(event: Event) -> Function:
+ # pylint: disable=redefined-builtin
+ # pylint: disable=invalid-name
+ return Function('has_event', event)
+
+
class Metric:
"""An individual metric that will specifiable on the perf command line."""
groups: Set[str]
@@ -539,7 +545,7 @@ def ParsePerfJson(orig: str) -> Expression:
r'Event(r"\1")', py)
py = re.sub(r'#Event\(r"([^"]*)"\)', r'Literal("#\1")', py)
py = re.sub(r'([0-9]+)Event\(r"(e[0-9]+)"\)', r'\1\2', py)
- keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count']
+ keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event']
for kw in keywords:
py = re.sub(rf'Event\(r"{kw}"\)', kw, py)
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index aa44fdc84763..1f6557ce3b0a 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -542,7 +542,6 @@ int cmd_test(int argc, const char **argv)
return run_workload(workload, argc, argv);
symbol_conf.priv_size = sizeof(int);
- symbol_conf.sort_by_name = true;
symbol_conf.try_vmlinux_path = true;
if (symbol__init(NULL) < 0)
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 3d01eb5e2512..c1c3fcbc2753 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -254,6 +254,10 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", &val_ptr));
+ /* has_event returns 1 when an event exists. */
+ expr__add_id_val(ctx, strdup("cycles"), 2);
+ ret = test(ctx, "has_event(cycles)", 1);
+
expr__ctx_free(ctx);
return 0;
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 885cd321d67b..58cf96d762d0 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -70,6 +70,8 @@ make_python_perf_so := $(python_perf_so)
make_debug := DEBUG=1
make_nondistro := BUILD_NONDISTRO=1
make_extra_tests := EXTRA_TESTS=1
+make_bpf_skel := BUILD_BPF_SKEL=1
+make_gen_vmlinux_h := BUILD_BPF_SKEL=1 GEN_VMLINUX_H=1
make_no_libperl := NO_LIBPERL=1
make_no_libpython := NO_LIBPYTHON=1
make_no_scripts := NO_LIBPYTHON=1 NO_LIBPERL=1
@@ -137,6 +139,8 @@ endif
run += make_python_perf_so
run += make_debug
run += make_nondistro
+run += make_build_bpf_skel
+run += make_gen_vmlinux_h
run += make_no_libperl
run += make_no_libpython
run += make_no_scripts
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 133218e51ab4..b2f82847e4c3 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -20,6 +20,20 @@
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+static int num_core_entries(void)
+{
+ /*
+ * If the kernel supports extended type, expect events to be
+ * opened once for each core PMU type. Otherwise fall back to the legacy
+ * behavior of opening only one event even though there are multiple
+ * PMUs
+ */
+ if (perf_pmus__supports_extended_type())
+ return perf_pmus__num_core_pmus();
+
+ return 1;
+}
+
static bool test_config(const struct evsel *evsel, __u64 expected_config)
{
__u32 type = evsel->core.attr.type;
@@ -108,10 +122,21 @@ static int test__checkevent_raw(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
- struct perf_pmu *pmu = NULL;
+ struct perf_pmu *pmu __maybe_unused = NULL;
bool type_matched = false;
TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 0x1a));
+ TEST_ASSERT_VAL("event not parsed as raw type",
+ evsel->attr.type == PERF_TYPE_RAW);
+#if defined(__aarch64__)
+ /*
+ * Arm doesn't have a real raw type PMU in sysfs, so raw events
+ * would never match any PMU. However, RAW events on Arm will
+ * always successfully open on the first available core PMU
+ * so no need to test for a matching type here.
+ */
+ type_matched = raw_type_match = true;
+#else
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (pmu->type == evsel->attr.type) {
TEST_ASSERT_VAL("PMU type expected once", !type_matched);
@@ -120,6 +145,7 @@ static int test__checkevent_raw(struct evlist *evlist)
raw_type_match = true;
}
}
+#endif
TEST_ASSERT_VAL("No PMU found for type", type_matched);
}
TEST_ASSERT_VAL("Raw PMU not matched", raw_type_match);
@@ -327,7 +353,7 @@ static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == perf_pmus__num_core_pmus());
+ evlist->core.nr_entries == num_core_entries());
perf_evlist__for_each_entry(&evlist->core, evsel) {
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
@@ -830,11 +856,11 @@ static int test__group1(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (perf_pmus__num_core_pmus() * 2));
+ evlist->core.nr_entries == (num_core_entries() * 2));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == perf_pmus__num_core_pmus());
+ evlist__nr_groups(evlist) == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* instructions:k */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -873,7 +899,7 @@ static int test__group2(struct evlist *evlist)
struct evsel *evsel, *leader = NULL;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus() + 1));
+ evlist->core.nr_entries == (2 * num_core_entries() + 1));
/*
* TODO: Currently the software event won't be grouped with the hardware
* event except for 1 PMU.
@@ -1039,11 +1065,11 @@ static int test__group4(struct evlist *evlist __maybe_unused)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (perf_pmus__num_core_pmus() * 2));
+ evlist->core.nr_entries == (num_core_entries() * 2));
TEST_ASSERT_VAL("wrong number of groups",
- perf_pmus__num_core_pmus() == evlist__nr_groups(evlist));
+ num_core_entries() == evlist__nr_groups(evlist));
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles:u + p */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1084,11 +1110,11 @@ static int test__group5(struct evlist *evlist __maybe_unused)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (5 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (5 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == (2 * perf_pmus__num_core_pmus()));
+ evlist__nr_groups(evlist) == (2 * num_core_entries()));
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles + G */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1119,7 +1145,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
}
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G */
evsel = leader = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1149,7 +1175,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
}
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1170,11 +1196,11 @@ static int test__group_gh1(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == perf_pmus__num_core_pmus());
+ evlist__nr_groups(evlist) == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles + :H group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1211,11 +1237,11 @@ static int test__group_gh2(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == perf_pmus__num_core_pmus());
+ evlist__nr_groups(evlist) == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles + :G group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1252,11 +1278,11 @@ static int test__group_gh3(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == perf_pmus__num_core_pmus());
+ evlist__nr_groups(evlist) == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G + :u group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1293,11 +1319,11 @@ static int test__group_gh4(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (2 * num_core_entries()));
TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == perf_pmus__num_core_pmus());
+ evlist__nr_groups(evlist) == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G + :uG group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1334,9 +1360,9 @@ static int test__leader_sample1(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (3 * num_core_entries()));
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1386,9 +1412,9 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (2 * num_core_entries()));
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* instructions - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1425,9 +1451,9 @@ static int test__checkevent_pinned_modifier(struct evlist *evlist)
struct evsel *evsel = NULL;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == perf_pmus__num_core_pmus());
+ evlist->core.nr_entries == num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -1443,9 +1469,9 @@ static int test__pinned_group(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == (3 * num_core_entries()));
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1488,9 +1514,9 @@ static int test__exclusive_group(struct evlist *evlist)
struct evsel *evsel = NULL, *leader;
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == 3 * num_core_entries());
- for (int i = 0; i < perf_pmus__num_core_pmus(); i++) {
+ for (int i = 0; i < num_core_entries(); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -1562,7 +1588,7 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (1 + perf_pmus__num_core_pmus()));
+ evlist->core.nr_entries == 1 + num_core_entries());
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_TASK_CLOCK));
return TEST_OK;
diff --git a/tools/perf/tests/pe-file-parsing.c b/tools/perf/tests/pe-file-parsing.c
index c09a9fae1689..fff58b220c07 100644
--- a/tools/perf/tests/pe-file-parsing.c
+++ b/tools/perf/tests/pe-file-parsing.c
@@ -34,6 +34,7 @@ static int run_dir(const char *d)
struct dso *dso;
struct symbol *sym;
int ret;
+ size_t idx;
scnprintf(filename, PATH_MAX, "%s/pe-file.exe", d);
ret = filename__read_build_id(filename, &bid);
@@ -61,7 +62,7 @@ static int run_dir(const char *d)
TEST_ASSERT_VAL("Failed to load symbols", ret == 0);
dso__sort_by_name(dso);
- sym = dso__find_symbol_by_name(dso, "main");
+ sym = dso__find_symbol_by_name(dso, "main", &idx);
TEST_ASSERT_VAL("Failed to find main", sym);
dso__delete(dso);
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index f2cc187b6186..4a194420416e 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -233,6 +233,41 @@ test_aggr_task_stack_filter()
fi
}
+test_csv_output()
+{
+ echo "Testing perf lock contention CSV output"
+ perf lock contention -i ${perfdata} -E 1 -x , --output ${result}
+ # count the number of commas in the header
+ # it should have 5: contended, total-wait, max-wait, avg-wait, type, caller
+ header=$(grep "# output:" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "5" ]; then
+ echo "[Fail] Recorded result does not have enough output columns: ${header} != 5"
+ err=1
+ exit
+ fi
+ # count the number of commas in the output
+ output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "${output}" ]; then
+ echo "[Fail] Recorded result does not match the number of commas: ${header} != ${output}"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging > /dev/null 2>&1
+ output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "${output}" ]; then
+ echo "[Fail] BPF result does not match the number of commas: ${header} != ${output}"
+ err=1
+ exit
+ fi
+}
+
check
test_record
@@ -244,5 +279,6 @@ test_type_filter
test_lock_filter
test_stack_filter
test_aggr_task_stack_filter
+test_csv_output
exit ${err}
diff --git a/tools/perf/util/bpf_skel/.gitignore b/tools/perf/util/bpf_skel/.gitignore
index 7a1c832825de..cd01455e1b53 100644
--- a/tools/perf/util/bpf_skel/.gitignore
+++ b/tools/perf/util/bpf_skel/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
.tmp
*.skel.h
+vmlinux.h
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 1d48226ae75d..8d3cfbb3cc65 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -416,8 +416,6 @@ int contention_end(u64 *ctx)
return 0;
}
-struct rq {};
-
extern struct rq runqueues __ksym;
struct rq___old {
diff --git a/tools/perf/util/bpf_skel/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
index c7ed51b0c1ef..ab84a6e1da5e 100644
--- a/tools/perf/util/bpf_skel/vmlinux.h
+++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
@@ -171,4 +171,14 @@ struct bpf_perf_event_data_kern {
struct perf_sample_data *data;
struct perf_event *event;
} __attribute__((preserve_access_index));
+
+/*
+ * If 'struct rq' isn't defined for lock_contention.bpf.c, for the sake of
+ * rq___old and rq___new, then the type for the 'runqueue' variable ends up
+ * being a forward declaration (BTF_KIND_FWD) while the kernel has it defined
+ * (BTF_KIND_STRUCT). The definition appears in vmlinux.h rather than
+ * lock_contention.bpf.c for consistency with a generated vmlinux.h.
+ */
+struct rq {};
+
#endif // __VMLINUX_H
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 046fbfcfdaab..bdfead36b83a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1320,7 +1320,9 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->id = *id;
dso__set_long_name_id(dso, dso->name, id, false);
dso__set_short_name(dso, dso->name, false);
- dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
+ dso->symbols = RB_ROOT_CACHED;
+ dso->symbol_names = NULL;
+ dso->symbol_names_len = 0;
dso->data.cache = RB_ROOT;
dso->inlined_nodes = RB_ROOT_CACHED;
dso->srclines = RB_ROOT_CACHED;
@@ -1364,7 +1366,8 @@ void dso__delete(struct dso *dso)
inlines__tree_delete(&dso->inlined_nodes);
srcline__tree_delete(&dso->srclines);
symbols__delete(&dso->symbols);
-
+ dso->symbol_names_len = 0;
+ zfree(&dso->symbol_names);
if (dso->short_name_allocated) {
zfree((char **)&dso->short_name);
dso->short_name_allocated = false;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index b23a157c914d..b41c9782c754 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -150,7 +150,8 @@ struct dso {
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
struct rb_root_cached symbols;
- struct rb_root_cached symbol_names;
+ struct symbol **symbol_names;
+ size_t symbol_names_len;
struct rb_root_cached inlined_nodes;
struct rb_root_cached srclines;
struct {
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3860b0c74829..4cbb092e0684 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -581,15 +581,14 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
maps__zput(al->maps);
map__zput(al->map);
thread__zput(al->thread);
+ al->thread = thread__get(thread);
al->addr = addr;
al->cpumode = cpumode;
al->filtered = 0;
- if (machine == NULL) {
- al->map = NULL;
+ if (machine == NULL)
return NULL;
- }
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
@@ -605,7 +604,6 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
al->level = 'u';
} else {
al->level = 'H';
- al->map = NULL;
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
@@ -619,7 +617,6 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
al->maps = maps__get(maps);
- al->thread = thread__get(thread);
al->map = map__get(maps__find(maps, al->addr));
if (al->map != NULL) {
/*
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f607b5bddc76..762e2b2634a5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2924,25 +2924,19 @@ static bool find_process(const char *name)
return ret ? false : true;
}
-static bool is_amd(const char *arch, const char *cpuid)
+int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
+ char *msg __maybe_unused,
+ size_t size __maybe_unused)
{
- return arch && !strcmp("x86", arch) && cpuid && strstarts(cpuid, "AuthenticAMD");
-}
-
-static bool is_amd_ibs(struct evsel *evsel)
-{
- return evsel->core.attr.precise_ip
- || (evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3));
+ return 0;
}
int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
- struct perf_env *env = evsel__env(evsel);
- const char *arch = perf_env__arch(env);
- const char *cpuid = perf_env__cpuid(env);
char sbuf[STRERR_BUFSIZE];
int printed = 0, enforced = 0;
+ int ret;
switch (err) {
case EPERM:
@@ -3044,16 +3038,6 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size,
"Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
evsel__name(evsel));
- if (is_amd(arch, cpuid)) {
- if (is_amd_ibs(evsel)) {
- if (evsel->core.attr.exclude_kernel)
- return scnprintf(msg, size,
- "AMD IBS can't exclude kernel events. Try running at a higher privilege level.");
- if (!evsel->core.system_wide)
- return scnprintf(msg, size,
- "AMD IBS may only be available in system-wide/per-cpu mode. Try using -a, or -C and workload affinity");
- }
- }
break;
case ENODATA:
@@ -3063,6 +3047,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
break;
}
+ ret = arch_evsel__open_strerror(evsel, msg, size);
+ if (ret)
+ return ret;
+
return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg | grep -i perf may provide additional information.\n",
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 9f06d6cd5379..848534ec74fa 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -311,6 +311,7 @@ void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
void arch_evsel__set_sample_weight(struct evsel *evsel);
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
+int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size);
int evsel__set_filter(struct evsel *evsel, const char *filter);
int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index f4e52919324e..4814262e3805 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -8,6 +8,7 @@
#include "cpumap.h"
#include "cputopo.h"
#include "debug.h"
+#include "evlist.h"
#include "expr.h"
#include "expr-bison.h"
#include "expr-flex.h"
@@ -474,3 +475,23 @@ out:
pr_debug2("literal: %s = %f\n", literal, result);
return result;
}
+
+/* Does the event 'id' parse? Determine via ctx->ids if possible. */
+double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id)
+{
+ struct evlist *tmp;
+ double ret;
+
+ if (hashmap__find(ctx->ids, id, /*value=*/NULL))
+ return 1.0;
+
+ if (!compute_ids)
+ return 0.0;
+
+ tmp = evlist__new();
+ if (!tmp)
+ return NAN;
+ ret = parse_event(tmp, id) ? 0 : 1;
+ evlist__delete(tmp);
+ return ret;
+}
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index eaa44b24c555..3c1e49b3e35d 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -54,5 +54,6 @@ int expr__find_ids(const char *expr, const char *one,
double expr_id_data__value(const struct expr_id_data *data);
double expr_id_data__source_count(const struct expr_id_data *data);
double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx);
+double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id);
#endif
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index 4fbf353e78e7..dbb117414710 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -113,6 +113,7 @@ min { return MIN; }
if { return IF; }
else { return ELSE; }
source_count { return SOURCE_COUNT; }
+has_event { return HAS_EVENT; }
{literal} { return literal(yyscanner, sctx); }
{number} { return value(yyscanner); }
{symbol} { return str(yyscanner, ID, sctx->runtime); }
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index f04963eb6be0..dd504afd8f36 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -37,7 +37,7 @@
} ids;
}
-%token ID NUMBER MIN MAX IF ELSE LITERAL D_RATIO SOURCE_COUNT EXPR_ERROR
+%token ID NUMBER MIN MAX IF ELSE LITERAL D_RATIO SOURCE_COUNT HAS_EVENT EXPR_ERROR
%left MIN MAX IF
%left '|'
%left '^'
@@ -199,6 +199,12 @@ expr: NUMBER
}
| ID { $$ = handle_id(ctx, $1, compute_ids, /*source_count=*/false); }
| SOURCE_COUNT '(' ID ')' { $$ = handle_id(ctx, $3, compute_ids, /*source_count=*/true); }
+| HAS_EVENT '(' ID ')'
+{
+ $$.val = expr__has_event(ctx, compute_ids, $3);
+ $$.ids = NULL;
+ free($3);
+}
| expr '|' expr
{
if (is_const($1.val) && is_const($3.val)) {
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 4e8e243a6e4b..c6c9c2228578 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -25,7 +25,7 @@
"$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
- "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -g -O2 -o - $LLVM_OPTIONS_PIPE"
+ "-c \"$CLANG_SOURCE\" --target=bpf $CLANG_EMIT_LLVM -g -O2 -o - $LLVM_OPTIONS_PIPE"
struct llvm_param llvm_param = {
.clang_path = "clang",
@@ -569,7 +569,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
pr_err("ERROR:\tunable to compile %s\n", path);
pr_err("Hint:\tCheck error message shown above.\n");
pr_err("Hint:\tYou can also pre-compile it into .o using:\n");
- pr_err(" \t\tclang -target bpf -O2 -c %s\n", path);
+ pr_err(" \t\tclang --target=bpf -O2 -c %s\n", path);
pr_err(" \twith proper -I and -D options.\n");
goto errout;
}
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index f30d34903aa4..f64b83004421 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -390,7 +390,7 @@ struct symbol *map__find_symbol(struct map *map, u64 addr)
return dso__find_symbol(map__dso(map), addr);
}
-struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
+struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx)
{
struct dso *dso;
@@ -398,10 +398,16 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
return NULL;
dso = map__dso(map);
- if (!dso__sorted_by_name(dso))
- dso__sort_by_name(dso);
+ dso__sort_by_name(dso);
+
+ return dso__find_symbol_by_name(dso, name, idx);
+}
+
+struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
+{
+ size_t idx;
- return dso__find_symbol_by_name(dso, name);
+ return map__find_symbol_by_name_idx(map, name, &idx);
}
struct map *map__clone(struct map *from)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 66a87b3d9965..1b53d53adc86 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -148,16 +148,17 @@ struct thread;
* @map: the 'struct map *' in which symbols are iterated
* @sym_name: the symbol name
* @pos: the 'struct symbol *' to use as a loop cursor
+ * @idx: the cursor index in the symbol names array
*/
-#define __map__for_each_symbol_by_name(map, sym_name, pos) \
- for (pos = map__find_symbol_by_name(map, sym_name); \
+#define __map__for_each_symbol_by_name(map, sym_name, pos, idx) \
+ for (pos = map__find_symbol_by_name_idx(map, sym_name, &idx); \
pos && \
!symbol__match_symbol_name(pos->name, sym_name, \
SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); \
- pos = symbol__next_by_name(pos))
+ pos = dso__next_symbol_by_name(map__dso(map), &idx))
-#define map__for_each_symbol_by_name(map, sym_name, pos) \
- __map__for_each_symbol_by_name(map, sym_name, (pos))
+#define map__for_each_symbol_by_name(map, sym_name, pos, idx) \
+ __map__for_each_symbol_by_name(map, sym_name, (pos), idx)
void map__init(struct map *map,
u64 start, u64 end, u64 pgoff, struct dso *dso);
@@ -202,6 +203,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
int map__load(struct map *map);
struct symbol *map__find_symbol(struct map *map, u64 addr);
struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
+struct symbol *map__find_symbol_by_name_idx(struct map *map, const char *name, size_t *idx);
void map__fixup_start(struct map *map);
void map__fixup_end(struct map *map);
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 7e5e7b30510d..2247991451f3 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
+#include <stdlib.h>
#include <stdbool.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include "util/evsel_fprintf.h"
+#include "trace-event.h"
struct bit_names {
int bit;
@@ -71,6 +73,170 @@ static void __p_read_format(char *buf, size_t size, u64 value)
__p_bits(buf, size, value, bits);
}
+#define ENUM_ID_TO_STR_CASE(x) case x: return (#x);
+static const char *stringify_perf_type_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_HW_CACHE)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_RAW)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_BREAKPOINT)
+ default:
+ return NULL;
+ }
+}
+
+static const char *stringify_perf_hw_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CPU_CYCLES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_INSTRUCTIONS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_REFERENCES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_MISSES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BRANCH_MISSES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_BUS_CYCLES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_STALLED_CYCLES_FRONTEND)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_STALLED_CYCLES_BACKEND)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_REF_CPU_CYCLES)
+ default:
+ return NULL;
+ }
+}
+
+static const char *stringify_perf_hw_cache_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_L1D)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_L1I)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_LL)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_DTLB)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_ITLB)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_BPU)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_NODE)
+ default:
+ return NULL;
+ }
+}
+
+static const char *stringify_perf_hw_cache_op_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_READ)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_WRITE)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_OP_PREFETCH)
+ default:
+ return NULL;
+ }
+}
+
+static const char *stringify_perf_hw_cache_op_result_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_RESULT_ACCESS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_HW_CACHE_RESULT_MISS)
+ default:
+ return NULL;
+ }
+}
+
+static const char *stringify_perf_sw_id(u64 value)
+{
+ switch (value) {
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CPU_CLOCK)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_TASK_CLOCK)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CONTEXT_SWITCHES)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CPU_MIGRATIONS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS_MIN)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_PAGE_FAULTS_MAJ)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_ALIGNMENT_FAULTS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_EMULATION_FAULTS)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_DUMMY)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_BPF_OUTPUT)
+ ENUM_ID_TO_STR_CASE(PERF_COUNT_SW_CGROUP_SWITCHES)
+ default:
+ return NULL;
+ }
+}
+#undef ENUM_ID_TO_STR_CASE
+
+#define PRINT_ID(_s, _f) \
+do { \
+ const char *__s = _s; \
+ if (__s == NULL) \
+ snprintf(buf, size, _f, value); \
+ else \
+ snprintf(buf, size, _f" (%s)", value, __s); \
+} while (0)
+#define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64)
+#define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64)
+
+static void __p_type_id(char *buf, size_t size, u64 value)
+{
+ print_id_unsigned(stringify_perf_type_id(value));
+}
+
+static void __p_config_hw_id(char *buf, size_t size, u64 value)
+{
+ print_id_hex(stringify_perf_hw_id(value));
+}
+
+static void __p_config_sw_id(char *buf, size_t size, u64 value)
+{
+ print_id_hex(stringify_perf_sw_id(value));
+}
+
+static void __p_config_hw_cache_id(char *buf, size_t size, u64 value)
+{
+ const char *hw_cache_str = stringify_perf_hw_cache_id(value & 0xff);
+ const char *hw_cache_op_str =
+ stringify_perf_hw_cache_op_id((value & 0xff00) >> 8);
+ const char *hw_cache_op_result_str =
+ stringify_perf_hw_cache_op_result_id((value & 0xff0000) >> 16);
+
+ if (hw_cache_str == NULL || hw_cache_op_str == NULL ||
+ hw_cache_op_result_str == NULL) {
+ snprintf(buf, size, "%#"PRIx64, value);
+ } else {
+ snprintf(buf, size, "%#"PRIx64" (%s | %s | %s)", value,
+ hw_cache_op_result_str, hw_cache_op_str, hw_cache_str);
+ }
+}
+
+#ifdef HAVE_LIBTRACEEVENT
+static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
+{
+ char *str = tracepoint_id_to_name(value);
+
+ print_id_hex(str);
+ free(str);
+}
+#endif
+
+static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
+{
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ return __p_config_hw_id(buf, size, value);
+ case PERF_TYPE_SOFTWARE:
+ return __p_config_sw_id(buf, size, value);
+ case PERF_TYPE_HW_CACHE:
+ return __p_config_hw_cache_id(buf, size, value);
+ case PERF_TYPE_TRACEPOINT:
+#ifdef HAVE_LIBTRACEEVENT
+ return __p_config_tracepoint_id(buf, size, value);
+#endif
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_BREAKPOINT:
+ default:
+ snprintf(buf, size, "%#"PRIx64, value);
+ return;
+ }
+}
+
#define BUF_SIZE 1024
#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
@@ -79,16 +245,18 @@ static void __p_read_format(char *buf, size_t size, u64 value)
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
+#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val)
+#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val)
-#define PRINT_ATTRn(_n, _f, _p) \
+#define PRINT_ATTRn(_n, _f, _p, _a) \
do { \
- if (attr->_f) { \
+ if (_a || attr->_f) { \
_p(attr->_f); \
ret += attr__fprintf(fp, _n, buf, priv);\
} \
} while (0)
-#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
+#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p, false)
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv)
@@ -96,10 +264,10 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
char buf[BUF_SIZE];
int ret = 0;
- PRINT_ATTRf(type, p_unsigned);
+ PRINT_ATTRn("type", type, p_type_id, true);
PRINT_ATTRf(size, p_unsigned);
- PRINT_ATTRf(config, p_hex);
- PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
+ PRINT_ATTRn("config", config, p_config_id, true);
+ PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned, false);
PRINT_ATTRf(sample_type, p_sample_type);
PRINT_ATTRf(read_format, p_read_format);
@@ -141,10 +309,10 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(remove_on_exec, p_unsigned);
PRINT_ATTRf(sigtrap, p_unsigned);
- PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
+ PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned, false);
PRINT_ATTRf(bp_type, p_unsigned);
- PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
- PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
+ PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex, false);
+ PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex, false);
PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
PRINT_ATTRf(sample_regs_user, p_hex);
PRINT_ATTRf(sample_stack_user, p_unsigned);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 64fa568a5426..7f984a7f16ca 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -928,6 +928,31 @@ err:
return NULL;
}
+/* Creates the PMU when sysfs scanning fails. */
+struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus)
+{
+ struct perf_pmu *pmu = zalloc(sizeof(*pmu));
+
+ if (!pmu)
+ return NULL;
+
+ pmu->name = strdup("cpu");
+ if (!pmu->name) {
+ free(pmu);
+ return NULL;
+ }
+
+ pmu->is_core = true;
+ pmu->type = PERF_TYPE_RAW;
+ pmu->cpus = cpu_map__online();
+
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
+ INIT_LIST_HEAD(&pmu->caps);
+ list_add_tail(&pmu->list, core_pmus);
+ return pmu;
+}
+
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
{
struct perf_pmu_format *format;
@@ -1427,7 +1452,7 @@ bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu)
bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
{
- return pmu->is_core && perf_pmus__num_core_pmus() == 1;
+ return !pmu->is_core || perf_pmus__num_core_pmus() == 1;
}
bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 8807a624e918..203b92860e3c 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -286,6 +286,7 @@ int perf_pmu__event_source_devices_fd(void);
int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags);
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
+struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 0866dee3fc62..3cd9de42139e 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -153,7 +153,12 @@ static void pmu_read_sysfs(bool core_only)
closedir(dir);
if (core_only) {
- read_sysfs_core_pmus = true;
+ if (!list_empty(&core_pmus))
+ read_sysfs_core_pmus = true;
+ else {
+ if (perf_pmu__create_placeholder_core_pmu(&core_pmus))
+ read_sysfs_core_pmus = true;
+ }
} else {
read_sysfs_core_pmus = true;
read_sysfs_all_pmus = true;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 6e2110d605fb..16822a8a540f 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -74,7 +74,6 @@ int init_probe_symbol_maps(bool user_only)
{
int ret;
- symbol_conf.sort_by_name = true;
symbol_conf.allow_aliases = true;
ret = symbol__init(NULL);
if (ret < 0) {
@@ -382,6 +381,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
struct symbol *sym;
u64 address = 0;
int ret = -ENOENT;
+ size_t idx;
/* This can work only for function-name based one */
if (!pp->function || pp->file)
@@ -392,7 +392,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
return -EINVAL;
/* Find the address of given function */
- map__for_each_symbol_by_name(map, pp->function, sym) {
+ map__for_each_symbol_by_name(map, pp->function, sym, idx) {
if (uprobes) {
address = sym->start;
if (sym->type == STT_GNU_IFUNC)
@@ -3738,7 +3738,6 @@ out:
int show_available_funcs(const char *target, struct nsinfo *nsi,
struct strfilter *_filter, bool user)
{
- struct rb_node *nd;
struct map *map;
struct dso *dso;
int ret;
@@ -3767,17 +3766,16 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
goto end;
}
dso = map__dso(map);
- if (!dso__sorted_by_name(dso))
- dso__sort_by_name(dso);
+ dso__sort_by_name(dso);
/* Show all (filtered) symbols */
setup_pager();
- for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
- struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+ for (size_t i = 0; i < dso->symbol_names_len; i++) {
+ struct symbol *pos = dso->symbol_names[i];
- if (strfilter__compare(_filter, pos->sym.name))
- printf("%s\n", pos->sym.name);
+ if (strfilter__compare(_filter, pos->name))
+ printf("%s\n", pos->name);
}
end:
map__put(map);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a7b2cb05dc86..4eed8ec23994 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -124,6 +124,14 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
}
/*
+ * Add this one here not to drag util/trace-event-info.c
+ */
+char *tracepoint_id_to_name(u64 config)
+{
+ return NULL;
+}
+
+/*
* XXX: All these evsel destructors need some better mechanism, like a linked
* list of destructors registered when the relevant code indeed is used instead
* of having more and more calls in perf_evsel__delete(). -- acme
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index d275d3bef7d5..f849f9ef68e6 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -440,38 +440,35 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
+static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
{
- struct rb_node **p = &symbols->rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct symbol_name_rb_node *symn, *s;
- bool leftmost = true;
+ const struct symbol *lhs = *((const struct symbol **)vlhs);
+ const struct symbol *rhs = *((const struct symbol **)vrhs);
- symn = container_of(sym, struct symbol_name_rb_node, sym);
-
- while (*p != NULL) {
- parent = *p;
- s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
- if (strcmp(sym->name, s->sym.name) < 0)
- p = &(*p)->rb_left;
- else {
- p = &(*p)->rb_right;
- leftmost = false;
- }
- }
- rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
+ return strcmp(lhs->name, rhs->name);
}
-static void symbols__sort_by_name(struct rb_root_cached *symbols,
- struct rb_root_cached *source)
+static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
{
struct rb_node *nd;
+ struct symbol **result;
+ size_t i = 0, size = 0;
+
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
+ size++;
+
+ result = malloc(sizeof(*result) * size);
+ if (!result)
+ return NULL;
for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
- symbols__insert_by_name(symbols, pos);
+
+ result[i++] = pos;
}
+ qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
+ *len = size;
+ return result;
}
int symbol__match_symbol_name(const char *name, const char *str,
@@ -491,48 +488,53 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
+static struct symbol *symbols__find_by_name(struct symbol *symbols[],
+ size_t symbols_len,
const char *name,
- enum symbol_tag_include includes)
+ enum symbol_tag_include includes,
+ size_t *found_idx)
{
- struct rb_node *n;
- struct symbol_name_rb_node *s = NULL;
+ size_t i, lower = 0, upper = symbols_len;
+ struct symbol *s = NULL;
- if (symbols == NULL)
- return NULL;
+ if (found_idx)
+ *found_idx = SIZE_MAX;
- n = symbols->rb_root.rb_node;
+ if (!symbols_len)
+ return NULL;
- while (n) {
+ while (lower < upper) {
int cmp;
- s = rb_entry(n, struct symbol_name_rb_node, rb_node);
- cmp = symbol__match_symbol_name(s->sym.name, name, includes);
+ i = (lower + upper) / 2;
+ cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
if (cmp > 0)
- n = n->rb_left;
+ upper = i;
else if (cmp < 0)
- n = n->rb_right;
- else
+ lower = i + 1;
+ else {
+ if (found_idx)
+ *found_idx = i;
+ s = symbols[i];
break;
+ }
}
-
- if (n == NULL)
- return NULL;
-
- if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
+ if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
/* return first symbol that has same name (if any) */
- for (n = rb_prev(n); n; n = rb_prev(n)) {
- struct symbol_name_rb_node *tmp;
-
- tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
- if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
+ for (; i > 0; i--) {
+ struct symbol *tmp = symbols[i - 1];
+
+ if (!arch__compare_symbol_names(tmp->name, s->name)) {
+ if (found_idx)
+ *found_idx = i - 1;
+ s = tmp;
+ } else
break;
-
- s = tmp;
}
-
- return &s->sym;
+ }
+ assert(!found_idx || !s || s == symbols[*found_idx]);
+ return s;
}
void dso__reset_find_symbol_cache(struct dso *dso)
@@ -590,31 +592,41 @@ struct symbol *dso__next_symbol(struct symbol *sym)
return symbols__next(sym);
}
-struct symbol *symbol__next_by_name(struct symbol *sym)
+struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
{
- struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
- struct rb_node *n = rb_next(&s->rb_node);
+ if (*idx + 1 >= dso->symbol_names_len)
+ return NULL;
- return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+ ++*idx;
+ return dso->symbol_names[*idx];
}
/*
* Returns first symbol that matched with @name.
*/
-struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
+struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
{
- struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
- SYMBOL_TAG_INCLUDE__NONE);
+ struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
+ name, SYMBOL_TAG_INCLUDE__NONE, idx);
if (!s)
- s = symbols__find_by_name(&dso->symbol_names, name,
- SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
+ s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
+ name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
return s;
}
void dso__sort_by_name(struct dso *dso)
{
- dso__set_sorted_by_name(dso);
- return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
+ mutex_lock(&dso->lock);
+ if (!dso__sorted_by_name(dso)) {
+ size_t len;
+
+ dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len);
+ if (dso->symbol_names) {
+ dso->symbol_names_len = len;
+ dso__set_sorted_by_name(dso);
+ }
+ }
+ mutex_unlock(&dso->lock);
}
/*
@@ -2656,10 +2668,6 @@ int symbol__init(struct perf_env *env)
symbol__elf_init();
- if (symbol_conf.sort_by_name)
- symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
- sizeof(struct symbol));
-
if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
return -1;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 5ca8665dd2c1..af87c46b3f89 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -43,8 +43,7 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
/**
* A symtab entry. When allocated this may be preceded by an annotation (see
- * symbol__annotation), a browser_index (see symbol__browser_index) and rb_node
- * to sort by name (see struct symbol_name_rb_node).
+ * symbol__annotation) and/or a browser_index (see symbol__browser_index).
*/
struct symbol {
struct rb_node rb_node;
@@ -95,11 +94,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
-struct symbol_name_rb_node {
- struct rb_node rb_node;
- struct symbol sym;
-};
-
static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
{
return path__join(bf, size, symbol_conf.symfs, path);
@@ -136,9 +130,9 @@ void dso__delete_symbol(struct dso *dso,
struct symbol *dso__find_symbol(struct dso *dso, u64 addr);
struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr);
-struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name);
-struct symbol *symbol__next_by_name(struct symbol *sym);
+struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx);
+struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx);
struct symbol *dso__first_symbol(struct dso *dso);
struct symbol *dso__last_symbol(struct dso *dso);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index f26f81eb8252..0b589570d1d0 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -18,7 +18,6 @@ struct symbol_conf {
show_kernel_path,
use_modules,
allow_aliases,
- sort_by_name,
show_nr_samples,
show_total_period,
use_callchain,
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index d9e5ad040b6a..088f4abf230f 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -63,13 +63,11 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
FILE *fp)
{
size_t ret = 0;
- struct rb_node *nd;
- struct symbol_name_rb_node *pos;
- for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
- pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
- ret += fprintf(fp, "%s\n", pos->sym.name);
- }
+ for (size_t i = 0; i < dso->symbol_names_len; i++) {
+ struct symbol *pos = dso->symbol_names[i];
+ ret += fprintf(fp, "%s\n", pos->name);
+ }
return ret;
}
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index c24b3a15e319..319ccf09a435 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -466,6 +466,18 @@ next:
return NULL;
}
+char *tracepoint_id_to_name(u64 config)
+{
+ struct tracepoint_path *path = tracepoint_id_to_path(config);
+ char *buf = NULL;
+
+ if (path && asprintf(&buf, "%s:%s", path->system, path->name) < 0)
+ buf = NULL;
+
+ put_tracepoints_path(path);
+ return buf;
+}
+
static struct tracepoint_path *tracepoint_name_to_path(const char *name)
{
struct tracepoint_path *path = zalloc(sizeof(*path));
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a0cff184b1cd..a69ee29419f3 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -62,6 +62,12 @@ unsigned long long eval_flag(const char *flag);
int read_tracing_data(int fd, struct list_head *pattrs);
+/*
+ * Return the tracepoint name in the format "subsystem:event_name",
+ * callers should free the returned string.
+ */
+char *tracepoint_id_to_name(u64 config);
+
struct tracing_data {
/* size is only valid if temp is 'true' */
ssize_t size;
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 83eea968482e..2a96df4c8d42 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -66,9 +66,13 @@ static int __report_module(struct addr_location *al, u64 ip,
mod = 0;
}
- if (!mod)
- mod = dwfl_report_elf(ui->dwfl, dso->short_name, dso->long_name, -1,
+ if (!mod) {
+ char filename[PATH_MAX];
+
+ __symbol__join_symfs(filename, sizeof(filename), dso->long_name);
+ mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
map__start(al->map) - map__pgoff(al->map), false);
+ }
if (!mod) {
char filename[PATH_MAX];
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 6f9347ade82c..90f3c9802ffb 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -6,13 +6,13 @@ ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=devm_cxl_setup_hdm
-ldflags-y += --wrap=devm_cxl_enable_hdm
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
ldflags-y += --wrap=cxl_hdm_decode_init
ldflags-y += --wrap=cxl_dvsec_rr_decode
-ldflags-y += --wrap=cxl_rcrb_to_component
+ldflags-y += --wrap=devm_cxl_add_rch_dport
+ldflags-y += --wrap=cxl_rcd_component_reg_phys
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
@@ -57,6 +57,7 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
+cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index bf00dc52fe96..0e78d8e19895 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -713,7 +713,7 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 1;
cxld->interleave_granularity = 256;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
}
@@ -754,7 +754,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
/* check is endpoint is attach to host-bridge0 */
port = cxled_to_port(cxled);
do {
- if (port->uport == &cxl_host_bridge[0]->dev) {
+ if (port->uport_dev == &cxl_host_bridge[0]->dev) {
hb0 = true;
break;
}
@@ -787,7 +787,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 2;
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
cxled->state = CXL_DECODER_STATE_AUTO;
port->commit_end = cxld->id;
@@ -820,7 +820,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
} else
cxlsd->target[0] = dport;
cxld = &cxlsd->cxld;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
iter->commit_end = 0;
/*
@@ -889,7 +889,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
mock_init_hdm_decoder(cxld);
if (target_count) {
- rc = device_for_each_child(port->uport, &ctx,
+ rc = device_for_each_child(port->uport_dev, &ctx,
map_targets);
if (rc) {
put_device(&cxld->dev);
@@ -919,29 +919,29 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
int i, array_size;
if (port->depth == 1) {
- if (is_multi_bridge(port->uport)) {
+ if (is_multi_bridge(port->uport_dev)) {
array_size = ARRAY_SIZE(cxl_root_port);
array = cxl_root_port;
- } else if (is_single_bridge(port->uport)) {
+ } else if (is_single_bridge(port->uport_dev)) {
array_size = ARRAY_SIZE(cxl_root_single);
array = cxl_root_single;
} else {
dev_dbg(&port->dev, "%s: unknown bridge type\n",
- dev_name(port->uport));
+ dev_name(port->uport_dev));
return -ENXIO;
}
} else if (port->depth == 2) {
struct cxl_port *parent = to_cxl_port(port->dev.parent);
- if (is_multi_bridge(parent->uport)) {
+ if (is_multi_bridge(parent->uport_dev)) {
array_size = ARRAY_SIZE(cxl_switch_dport);
array = cxl_switch_dport;
- } else if (is_single_bridge(parent->uport)) {
+ } else if (is_single_bridge(parent->uport_dev)) {
array_size = ARRAY_SIZE(cxl_swd_single);
array = cxl_swd_single;
} else {
dev_dbg(&port->dev, "%s: unknown bridge type\n",
- dev_name(port->uport));
+ dev_name(port->uport_dev));
return -ENXIO;
}
} else {
@@ -954,9 +954,9 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
struct platform_device *pdev = array[i];
struct cxl_dport *dport;
- if (pdev->dev.parent != port->uport) {
+ if (pdev->dev.parent != port->uport_dev) {
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
- dev_name(port->uport),
+ dev_name(port->uport_dev),
dev_name(pdev->dev.parent));
continue;
}
@@ -971,15 +971,6 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return 0;
}
-resource_size_t mock_cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which)
-{
- dev_dbg(dev, "rcrb: %pa which: %d\n", &rcrb, which);
-
- return (resource_size_t) which + 1;
-}
-
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@@ -988,7 +979,6 @@ static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_dev = is_mock_dev,
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
- .cxl_rcrb_to_component = mock_cxl_rcrb_to_component,
.acpi_pci_find_root = mock_acpi_pci_find_root,
.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 34b48027b3de..464fc39ed277 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -8,11 +8,14 @@
#include <linux/sizes.h>
#include <linux/bits.h>
#include <asm/unaligned.h>
+#include <crypto/sha2.h>
#include <cxlmem.h>
#include "trace.h"
#define LSA_SIZE SZ_128K
+#define FW_SIZE SZ_64M
+#define FW_SLOTS 3
#define DEV_SIZE SZ_2G
#define EFFECT(x) (1U << x)
@@ -21,42 +24,70 @@
static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
+enum cxl_command_effects {
+ CONF_CHANGE_COLD_RESET = 0,
+ CONF_CHANGE_IMMEDIATE,
+ DATA_CHANGE_IMMEDIATE,
+ POLICY_CHANGE_IMMEDIATE,
+ LOG_CHANGE_IMMEDIATE,
+ SECURITY_CHANGE_IMMEDIATE,
+ BACKGROUND_OP,
+ SECONDARY_MBOX_SUPPORTED,
+};
+
+#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
+
static struct cxl_cel_entry mock_cel[] = {
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
- .effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
+ EFFECT(DATA_CHANGE_IMMEDIATE)),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
- .effect = cpu_to_le16(0),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
- .effect = cpu_to_le16(0),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(BACKGROUND_OP)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(CONF_CHANGE_IMMEDIATE)),
},
};
@@ -102,13 +133,17 @@ struct mock_event_log {
};
struct mock_event_store {
- struct cxl_dev_state *cxlds;
+ struct cxl_memdev_state *mds;
struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
u32 ev_status;
};
struct cxl_mockmem_data {
void *lsa;
+ void *fw;
+ int fw_slot;
+ int fw_staged;
+ size_t fw_size;
u32 security_state;
u8 user_pass[NVDIMM_PASSPHRASE_LEN];
u8 master_pass[NVDIMM_PASSPHRASE_LEN];
@@ -180,8 +215,7 @@ static void mes_add_event(struct mock_event_store *mes,
log->nr_events++;
}
-static int mock_get_event(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
struct mock_event_log *log;
@@ -201,7 +235,7 @@ static int mock_get_event(struct cxl_dev_state *cxlds,
memset(cmd->payload_out, 0, cmd->size_out);
- log = event_find_log(cxlds->dev, log_type);
+ log = event_find_log(dev, log_type);
if (!log || event_log_empty(log))
return 0;
@@ -234,8 +268,7 @@ static int mock_get_event(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_clear_event(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
struct mock_event_log *log;
@@ -246,7 +279,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
if (log_type >= CXL_EVENT_TYPE_MAX)
return -EINVAL;
- log = event_find_log(cxlds->dev, log_type);
+ log = event_find_log(dev, log_type);
if (!log)
return 0; /* No mock data in this log */
@@ -256,7 +289,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
* However, this is not good behavior for the host so test it.
*/
if (log->clear_idx + pl->nr_recs > log->cur_idx) {
- dev_err(cxlds->dev,
+ dev_err(dev,
"Attempting to clear more events than returned!\n");
return -EINVAL;
}
@@ -266,7 +299,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
nr < pl->nr_recs;
nr++, handle++) {
if (handle != le16_to_cpu(pl->handles[nr])) {
- dev_err(cxlds->dev, "Clearing events out of order\n");
+ dev_err(dev, "Clearing events out of order\n");
return -EINVAL;
}
}
@@ -293,7 +326,7 @@ static void cxl_mock_event_trigger(struct device *dev)
event_reset_log(log);
}
- cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
+ cxl_mem_get_event_records(mes->mds, mes->ev_status);
}
struct cxl_event_record_raw maint_needed = {
@@ -453,7 +486,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
@@ -463,7 +496,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
- if (length > cxlds->payload_size)
+ if (length > mds->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
@@ -477,7 +510,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
@@ -495,7 +528,7 @@ static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_id(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
@@ -517,8 +550,7 @@ static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_partition_info(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_partition_info(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_partition_info pi = {
.active_volatile_cap =
@@ -535,11 +567,52 @@ static int mock_partition_info(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_get_security_state(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_sanitize(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
+ if (cmd->size_in != 0)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ return 0; /* assume less than 2 secs, no bg */
+}
+
+static int mock_secure_erase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in != 0)
+ return -EINVAL;
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mock_get_security_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
if (cmd->size_in)
return -EINVAL;
@@ -569,9 +642,9 @@ static void user_plimit_check(struct cxl_mockmem_data *mdata)
mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
}
-static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_set_pass *set_pass;
if (cmd->size_in != sizeof(*set_pass))
@@ -629,9 +702,9 @@ static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return -EINVAL;
}
-static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_disable_pass *dis_pass;
if (cmd->size_in != sizeof(*dis_pass))
@@ -700,10 +773,9 @@ static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_
return 0;
}
-static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_freeze_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
-
if (cmd->size_in != 0)
return -EINVAL;
@@ -717,10 +789,9 @@ static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return 0;
}
-static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_unlock_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
-
if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
return -EINVAL;
@@ -759,10 +830,9 @@ static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return 0;
}
-static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
+static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_pass_erase *erase;
if (cmd->size_in != sizeof(*erase))
@@ -858,10 +928,10 @@ static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_get_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
void *lsa = mdata->lsa;
u32 offset, length;
@@ -878,10 +948,10 @@ static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_set_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
void *lsa = mdata->lsa;
u32 offset, length;
@@ -896,8 +966,7 @@ static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_health_info(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_health_info(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_health_info health_info = {
/* set flags for maint needed, perf degraded, hw replacement */
@@ -1114,9 +1183,90 @@ static struct attribute *cxl_mock_mem_core_attrs[] = {
};
ATTRIBUTE_GROUPS(cxl_mock_mem_core);
-static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_fw_info(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_fw_info fw_info = {
+ .num_slots = FW_SLOTS,
+ .slot_info = (mdata->fw_slot & 0x7) |
+ ((mdata->fw_staged & 0x7) << 3),
+ .activation_cap = 0,
+ };
+
+ strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
+ strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
+ strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
+ strcpy(fw_info.slot_4_revision, "");
+
+ if (cmd->size_out < sizeof(fw_info))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
+ return 0;
+}
+
+static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
+ struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
+ void *fw = mdata->fw;
+ size_t offset, length;
+
+ offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
+ length = cmd->size_in - sizeof(*transfer);
+ if (offset + length > FW_SIZE)
+ return -EINVAL;
+
+ switch (transfer->action) {
+ case CXL_FW_TRANSFER_ACTION_FULL:
+ if (offset != 0)
+ return -EINVAL;
+ fallthrough;
+ case CXL_FW_TRANSFER_ACTION_END:
+ if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
+ return -EINVAL;
+ mdata->fw_size = offset + length;
+ break;
+ case CXL_FW_TRANSFER_ACTION_INITIATE:
+ case CXL_FW_TRANSFER_ACTION_CONTINUE:
+ break;
+ case CXL_FW_TRANSFER_ACTION_ABORT:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(fw + offset, transfer->data, length);
+ return 0;
+}
+
+static int mock_activate_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_activate_fw *activate = cmd->payload_in;
+
+ if (activate->slot == 0 || activate->slot > FW_SLOTS)
+ return -EINVAL;
+
+ switch (activate->action) {
+ case CXL_FW_ACTIVATE_ONLINE:
+ mdata->fw_slot = activate->slot;
+ mdata->fw_staged = 0;
+ return 0;
+ case CXL_FW_ACTIVATE_OFFLINE:
+ mdata->fw_staged = activate->slot;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
int rc = -EIO;
switch (cmd->opcode) {
@@ -1127,49 +1277,55 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
rc = mock_gsl(cmd);
break;
case CXL_MBOX_OP_GET_LOG:
- rc = mock_get_log(cxlds, cmd);
+ rc = mock_get_log(mds, cmd);
break;
case CXL_MBOX_OP_IDENTIFY:
if (cxlds->rcd)
- rc = mock_rcd_id(cxlds, cmd);
+ rc = mock_rcd_id(cmd);
else
- rc = mock_id(cxlds, cmd);
+ rc = mock_id(cmd);
break;
case CXL_MBOX_OP_GET_LSA:
- rc = mock_get_lsa(cxlds, cmd);
+ rc = mock_get_lsa(mdata, cmd);
break;
case CXL_MBOX_OP_GET_PARTITION_INFO:
- rc = mock_partition_info(cxlds, cmd);
+ rc = mock_partition_info(cmd);
break;
case CXL_MBOX_OP_GET_EVENT_RECORD:
- rc = mock_get_event(cxlds, cmd);
+ rc = mock_get_event(dev, cmd);
break;
case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
- rc = mock_clear_event(cxlds, cmd);
+ rc = mock_clear_event(dev, cmd);
break;
case CXL_MBOX_OP_SET_LSA:
- rc = mock_set_lsa(cxlds, cmd);
+ rc = mock_set_lsa(mdata, cmd);
break;
case CXL_MBOX_OP_GET_HEALTH_INFO:
- rc = mock_health_info(cxlds, cmd);
+ rc = mock_health_info(cmd);
+ break;
+ case CXL_MBOX_OP_SANITIZE:
+ rc = mock_sanitize(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SECURE_ERASE:
+ rc = mock_secure_erase(mdata, cmd);
break;
case CXL_MBOX_OP_GET_SECURITY_STATE:
- rc = mock_get_security_state(cxlds, cmd);
+ rc = mock_get_security_state(mdata, cmd);
break;
case CXL_MBOX_OP_SET_PASSPHRASE:
- rc = mock_set_passphrase(cxlds, cmd);
+ rc = mock_set_passphrase(mdata, cmd);
break;
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
- rc = mock_disable_passphrase(cxlds, cmd);
+ rc = mock_disable_passphrase(mdata, cmd);
break;
case CXL_MBOX_OP_FREEZE_SECURITY:
- rc = mock_freeze_security(cxlds, cmd);
+ rc = mock_freeze_security(mdata, cmd);
break;
case CXL_MBOX_OP_UNLOCK:
- rc = mock_unlock_security(cxlds, cmd);
+ rc = mock_unlock_security(mdata, cmd);
break;
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
- rc = mock_passphrase_secure_erase(cxlds, cmd);
+ rc = mock_passphrase_secure_erase(mdata, cmd);
break;
case CXL_MBOX_OP_GET_POISON:
rc = mock_get_poison(cxlds, cmd);
@@ -1180,6 +1336,15 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
case CXL_MBOX_OP_CLEAR_POISON:
rc = mock_clear_poison(cxlds, cmd);
break;
+ case CXL_MBOX_OP_GET_FW_INFO:
+ rc = mock_fw_info(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_TRANSFER_FW:
+ rc = mock_transfer_fw(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_ACTIVATE_FW:
+ rc = mock_activate_fw(mdata, cmd);
+ break;
default:
break;
}
@@ -1195,6 +1360,11 @@ static void label_area_release(void *lsa)
vfree(lsa);
}
+static void fw_buf_release(void *buf)
+{
+ vfree(buf);
+}
+
static bool is_rcd(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -1215,6 +1385,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cxl_memdev *cxlmd;
+ struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
int rc;
@@ -1227,52 +1398,67 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mdata->lsa = vmalloc(LSA_SIZE);
if (!mdata->lsa)
return -ENOMEM;
+ mdata->fw = vmalloc(FW_SIZE);
+ if (!mdata->fw)
+ return -ENOMEM;
+ mdata->fw_slot = 2;
+
rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
if (rc)
return rc;
- cxlds = cxl_dev_state_create(dev);
- if (IS_ERR(cxlds))
- return PTR_ERR(cxlds);
+ rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
+ if (rc)
+ return rc;
+
+ mds = cxl_memdev_state_create(dev);
+ if (IS_ERR(mds))
+ return PTR_ERR(mds);
+ mds->mbox_send = cxl_mock_mbox_send;
+ mds->payload_size = SZ_4K;
+ mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
+
+ cxlds = &mds->cxlds;
cxlds->serial = pdev->id;
- cxlds->mbox_send = cxl_mock_mbox_send;
- cxlds->payload_size = SZ_4K;
- cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
if (is_rcd(pdev)) {
cxlds->rcd = true;
cxlds->component_reg_phys = CXL_RESOURCE_NONE;
}
- rc = cxl_enumerate_cmds(cxlds);
+ rc = cxl_enumerate_cmds(mds);
if (rc)
return rc;
- rc = cxl_poison_state_init(cxlds);
+ rc = cxl_poison_state_init(mds);
if (rc)
return rc;
- rc = cxl_set_timestamp(cxlds);
+ rc = cxl_set_timestamp(mds);
if (rc)
return rc;
cxlds->media_ready = true;
- rc = cxl_dev_state_identify(cxlds);
+ rc = cxl_dev_state_identify(mds);
if (rc)
return rc;
- rc = cxl_mem_create_range_info(cxlds);
+ rc = cxl_mem_create_range_info(mds);
if (rc)
return rc;
- mdata->mes.cxlds = cxlds;
+ mdata->mes.mds = mds;
cxl_mock_add_event_logs(&mdata->mes);
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+ rc = cxl_memdev_setup_fw_upload(mds);
+ if (rc)
+ return rc;
+
+ cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
return 0;
}
@@ -1310,9 +1496,40 @@ static ssize_t security_lock_store(struct device *dev, struct device_attribute *
static DEVICE_ATTR_RW(security_lock);
+static ssize_t fw_buf_checksum_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ u8 hash[SHA256_DIGEST_SIZE];
+ unsigned char *hstr, *hptr;
+ struct sha256_state sctx;
+ ssize_t written = 0;
+ int i;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, mdata->fw, mdata->fw_size);
+ sha256_final(&sctx, hash);
+
+ hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
+ if (!hstr)
+ return -ENOMEM;
+
+ hptr = hstr;
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++)
+ hptr += sprintf(hptr, "%02x", hash[i]);
+
+ written = sysfs_emit(buf, "%s\n", hstr);
+
+ kfree(hstr);
+ return written;
+}
+
+static DEVICE_ATTR_RO(fw_buf_checksum);
+
static struct attribute *cxl_mock_mem_attrs[] = {
&dev_attr_security_lock.attr,
&dev_attr_event_trigger.attr,
+ &dev_attr_fw_buf_checksum.attr,
NULL
};
ATTRIBUTE_GROUPS(cxl_mock_mem);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 284416527644..1a61e68e3095 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -139,7 +139,7 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_hdm *cxlhdm;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
cxlhdm = ops->devm_cxl_setup_hdm(port, info);
else
cxlhdm = devm_cxl_setup_hdm(port, info);
@@ -149,27 +149,12 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL);
-int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
-{
- int index, rc;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport))
- rc = 0;
- else
- rc = devm_cxl_enable_hdm(port, cxlhdm);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL);
-
int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_add_passthrough_decoder(port);
else
rc = devm_cxl_add_passthrough_decoder(port);
@@ -186,7 +171,7 @@ int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_port *port = cxlhdm->port;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info);
else
rc = devm_cxl_enumerate_decoders(cxlhdm, info);
@@ -201,7 +186,7 @@ int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_port_enumerate_dports(port);
else
rc = devm_cxl_port_enumerate_dports(port);
@@ -259,24 +244,46 @@ int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, CXL);
-resource_size_t __wrap_cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which)
+struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
+ struct device *dport_dev,
+ int port_id,
+ resource_size_t rcrb)
+{
+ int index;
+ struct cxl_dport *dport;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_port(dport_dev)) {
+ dport = devm_cxl_add_dport(port, dport_dev, port_id,
+ CXL_RESOURCE_NONE);
+ if (!IS_ERR(dport)) {
+ dport->rcrb.base = rcrb;
+ dport->rch = true;
+ }
+ } else
+ dport = devm_cxl_add_rch_dport(port, dport_dev, port_id, rcrb);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_rch_dport, CXL);
+
+resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
+ struct cxl_dport *dport)
{
int index;
resource_size_t component_reg_phys;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(dev))
- component_reg_phys =
- ops->cxl_rcrb_to_component(dev, rcrb, which);
+ component_reg_phys = CXL_RESOURCE_NONE;
else
- component_reg_phys = cxl_rcrb_to_component(dev, rcrb, which);
+ component_reg_phys = cxl_rcd_component_reg_phys(dev, dport);
put_cxl_mock_ops(index);
return component_reg_phys;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcrb_to_component, CXL);
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, CXL);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index bef8817b01f2..a94223750346 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -15,9 +15,6 @@ struct cxl_mock_ops {
acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data);
- resource_size_t (*cxl_rcrb_to_component)(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which);
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct device *dev);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index e4e2d1650dd5..005043bd9623 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -3240,11 +3240,6 @@ static int nfit_test_probe(struct platform_device *pdev)
return 0;
}
-static int nfit_test_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static void nfit_test_release(struct device *dev)
{
struct nfit_test *nfit_test = to_nfit_test(dev);
@@ -3259,7 +3254,6 @@ static const struct platform_device_id nfit_test_id[] = {
static struct platform_driver nfit_test_driver = {
.probe = nfit_test_probe,
- .remove = nfit_test_remove,
.driver = {
.name = KBUILD_MODNAME,
},
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index b5f7a996c4d0..b00583d1eace 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -207,7 +207,36 @@ typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4);
+void __iomem *__wrap_devm_ioremap(struct device *dev,
+ resource_size_t offset, unsigned long size);
+void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
+void *__wrap_memremap(resource_size_t offset, size_t size,
+ unsigned long flags);
+void __wrap_devm_memunmap(struct device *dev, void *addr);
+void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size);
+void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size);
void __wrap_iounmap(volatile void __iomem *addr);
+void __wrap_memunmap(void *addr);
+struct resource *__wrap___request_region(struct resource *parent,
+ resource_size_t start, resource_size_t n, const char *name,
+ int flags);
+int __wrap_insert_resource(struct resource *parent, struct resource *res);
+int __wrap_remove_resource(struct resource *res);
+struct resource *__wrap___devm_request_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n, const char *name);
+void __wrap___release_region(struct resource *parent, resource_size_t start,
+ resource_size_t n);
+void __wrap___devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n);
+acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
+ struct acpi_object_list *p, struct acpi_buffer *buf);
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 func, union acpi_object *argv4);
+
void nfit_test_setup(nfit_test_lookup_fn lookup,
nfit_test_evaluate_dsm_fn evaluate);
void nfit_test_teardown(void);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 6b456c5ecec1..666b56f22a41 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -87,6 +87,7 @@ TARGETS += timers
endif
TARGETS += tmpfs
TARGETS += tpm2
+TARGETS += tty
TARGETS += user
TARGETS += vDSO
TARGETS += mm
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4761b768b773..c692cc86e7da 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -61,6 +61,7 @@ TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
# Compiled test targets
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
@@ -164,6 +165,7 @@ TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/resets
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
+TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
@@ -184,6 +186,8 @@ TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
LIBKVM += $(LIBKVM_$(ARCH_DIR))
+OVERRIDE_TARGETS = 1
+
# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
# which causes the environment variable to override the makefile).
@@ -198,7 +202,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
- -Wno-gnu-variable-sized-type-not-at-end \
+ -Wno-gnu-variable-sized-type-not-at-end -MD\
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
@@ -225,7 +229,18 @@ LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
-EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
+TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
+TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
+TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
+TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
+-include $(TEST_DEP_FILES)
+
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
+$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 2439c4043fed..09c116a82a84 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -128,6 +128,7 @@ static void prefault_mem(void *alias, uint64_t len)
static void run_test(enum vm_guest_mode mode, void *arg)
{
+ struct memstress_vcpu_args *vcpu_args;
struct test_params *p = arg;
struct uffd_desc **uffd_descs = NULL;
struct timespec start;
@@ -145,24 +146,24 @@ static void run_test(enum vm_guest_mode mode, void *arg)
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
+ if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ for (i = 0; i < nr_vcpus; i++) {
+ vcpu_args = &memstress_args.vcpu_args[i];
+ prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
+ vcpu_args->pages * memstress_args.guest_page_size);
+ }
+ }
+
if (p->uffd_mode) {
uffd_descs = malloc(nr_vcpus * sizeof(struct uffd_desc *));
TEST_ASSERT(uffd_descs, "Memory allocation failed");
-
for (i = 0; i < nr_vcpus; i++) {
- struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva;
- void *vcpu_alias;
vcpu_args = &memstress_args.vcpu_args[i];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
- vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
-
- prefault_mem(vcpu_alias,
- vcpu_args->pages * memstress_args.guest_page_size);
-
/*
* Set up user fault fd to handle demand paging
* requests.
@@ -207,10 +208,11 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
- " [-b memory] [-s type] [-v vcpus] [-o]\n", name);
+ " [-b memory] [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name);
guest_modes_help();
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
+ kvm_print_vcpu_pinning_help();
printf(" -d: add a delay in usec to the User Fault\n"
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
@@ -228,6 +230,7 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ const char *cpulist = NULL;
struct test_params p = {
.src_type = DEFAULT_VM_MEM_SRC,
.partition_vcpu_memory_access = true,
@@ -236,7 +239,7 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:c:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -263,6 +266,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'c':
+ cpulist = optarg;
+ break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
@@ -278,6 +284,12 @@ int main(int argc, char *argv[])
TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
}
+ if (cpulist) {
+ kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
+ nr_vcpus);
+ memstress_args.pin_vcpus = true;
+ }
+
for_each_guest_mode(run_test, &p);
return 0;
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index e9d6d1aecf89..d374dbcf9a53 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -136,77 +136,6 @@ struct test_params {
bool random_access;
};
-static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
- int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
-
- vm_mem_region_set_flags(vm, slot, flags);
- }
-}
-
-static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
-{
- toggle_dirty_logging(vm, slots, true);
-}
-
-static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
-{
- toggle_dirty_logging(vm, slots, false);
-}
-
-static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
-
- kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
- }
-}
-
-static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
-
- kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
- }
-}
-
-static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
-{
- unsigned long **bitmaps;
- int i;
-
- bitmaps = malloc(slots * sizeof(bitmaps[0]));
- TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
-
- for (i = 0; i < slots; i++) {
- bitmaps[i] = bitmap_zalloc(pages_per_slot);
- TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
- }
-
- return bitmaps;
-}
-
-static void free_bitmaps(unsigned long *bitmaps[], int slots)
-{
- int i;
-
- for (i = 0; i < slots; i++)
- free(bitmaps[i]);
-
- free(bitmaps);
-}
-
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
@@ -236,7 +165,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / p->slots;
- bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
+ bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
@@ -277,7 +206,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- enable_dirty_logging(vm, p->slots);
+ memstress_enable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -306,7 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
- get_dirty_log(vm, bitmaps, p->slots);
+ memstress_get_dirty_log(vm, bitmaps, p->slots);
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
@@ -315,7 +244,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
- clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
+ memstress_clear_dirty_log(vm, bitmaps, p->slots,
+ pages_per_slot);
ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
@@ -334,7 +264,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- disable_dirty_logging(vm, p->slots);
+ memstress_disable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -359,7 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
- free_bitmaps(bitmaps, p->slots);
+ memstress_free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm);
memstress_destroy_vm(vm);
}
@@ -402,17 +332,7 @@ static void help(char *name)
" so -w X means each page has an X%% chance of writing\n"
" and a (100-X)%% chance of reading.\n"
" (default: 100 i.e. all pages are written to.)\n");
- printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
- " values (target pCPU), one for each vCPU, plus an optional\n"
- " entry for the main application task (specified via entry\n"
- " <nr_vcpus + 1>). If used, entries must be provided for all\n"
- " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
- " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
- " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
- " ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
- " To leave the application task unpinned, drop the final entry:\n\n"
- " ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
- " (default: no pinning)\n");
+ kvm_print_vcpu_pinning_help();
puts("");
exit(0);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index a089c356f354..07732a157ccd 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -733,6 +733,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
+void kvm_print_vcpu_pinning_help(void);
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus);
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
index 72e3e358ef7b..ce4e603050ea 100644
--- a/tools/testing/selftests/kvm/include/memstress.h
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -72,4 +72,12 @@ void memstress_guest_code(uint32_t vcpu_id);
uint64_t memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
+void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
+void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
+void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+ int slots, uint64_t pages_per_slot);
+unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
+void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
+
#endif /* SELFTEST_KVM_MEMSTRESS_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 298c4372fb1a..9741a7ff6380 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -494,6 +494,23 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
return pcpu;
}
+void kvm_print_vcpu_pinning_help(void)
+{
+ const char *name = program_invocation_name;
+
+ printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
+ " values (target pCPU), one for each vCPU, plus an optional\n"
+ " entry for the main application task (specified via entry\n"
+ " <nr_vcpus + 1>). If used, entries must be provided for all\n"
+ " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
+ " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
+ " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
+ " %s -v 3 -c 22,23,24,50\n\n"
+ " To leave the application task unpinned, drop the final entry:\n\n"
+ " %s -v 3 -c 22,23,24\n\n"
+ " (default: no pinning)\n", name, name);
+}
+
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus)
{
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 5f1d3173c238..df457452d146 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -5,6 +5,7 @@
#define _GNU_SOURCE
#include <inttypes.h>
+#include <linux/bitmap.h>
#include "kvm_util.h"
#include "memstress.h"
@@ -64,6 +65,9 @@ void memstress_guest_code(uint32_t vcpu_idx)
GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
while (true) {
+ for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
+ (void) *((volatile char *)args + i);
+
for (i = 0; i < pages; i++) {
if (args->random_access)
page = guest_random_u32(&rand_state) % pages;
@@ -320,3 +324,74 @@ void memstress_join_vcpu_threads(int nr_vcpus)
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}
+
+static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+ int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
+
+ vm_mem_region_set_flags(vm, slot, flags);
+ }
+}
+
+void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
+{
+ toggle_dirty_logging(vm, slots, true);
+}
+
+void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
+{
+ toggle_dirty_logging(vm, slots, false);
+}
+
+void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+
+ kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
+ }
+}
+
+void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+ int slots, uint64_t pages_per_slot)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+
+ kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
+ }
+}
+
+unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+ unsigned long **bitmaps;
+ int i;
+
+ bitmaps = malloc(slots * sizeof(bitmaps[0]));
+ TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+ for (i = 0; i < slots; i++) {
+ bitmaps[i] = bitmap_zalloc(pages_per_slot);
+ TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+ }
+
+ return bitmaps;
+}
+
+void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+ int i;
+
+ for (i = 0; i < slots; i++)
+ free(bitmaps[i]);
+
+ free(bitmaps);
+}
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 92cef20902f1..271f63891581 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -70,7 +70,7 @@ static void *uffd_handler_thread_fn(void *arg)
r = read(pollfd[1].fd, &tmp_chr, 1);
TEST_ASSERT(r == 1,
"Error reading pipefd in UFFD thread\n");
- return NULL;
+ break;
}
if (!(pollfd[0].revents & POLLIN))
@@ -103,7 +103,7 @@ static void *uffd_handler_thread_fn(void *arg)
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
pages, ts_diff.tv_sec, ts_diff.tv_nsec,
- pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+ pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC));
return NULL;
}
diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c
new file mode 100644
index 000000000000..1d73e78e8fa7
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/cmma_test.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test for s390x CMMA migration
+ *
+ * Copyright IBM Corp. 2023
+ *
+ * Authors:
+ * Nico Boehr <nrb@linux.ibm.com>
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+
+#define MAIN_PAGE_COUNT 512
+
+#define TEST_DATA_PAGE_COUNT 512
+#define TEST_DATA_MEMSLOT 1
+#define TEST_DATA_START_GFN 4096
+
+#define TEST_DATA_TWO_PAGE_COUNT 256
+#define TEST_DATA_TWO_MEMSLOT 2
+#define TEST_DATA_TWO_START_GFN 8192
+
+static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
+
+/**
+ * Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
+ * so use_cmma goes on and the CMMA related ioctls do something.
+ */
+static void guest_do_one_essa(void)
+{
+ asm volatile(
+ /* load TEST_DATA_START_GFN into r1 */
+ " llilf 1,%[start_gfn]\n"
+ /* calculate the address from the gfn */
+ " sllg 1,1,12(0)\n"
+ /* set the first page in TEST_DATA memslot to STABLE */
+ " .insn rrf,0xb9ab0000,2,1,1,0\n"
+ /* hypercall */
+ " diag 0,0,0x501\n"
+ "0: j 0b"
+ :
+ : [start_gfn] "L"(TEST_DATA_START_GFN)
+ : "r1", "r2", "memory", "cc"
+ );
+}
+
+/**
+ * Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
+ * state.
+ */
+static void guest_dirty_test_data(void)
+{
+ asm volatile(
+ /* r1 = TEST_DATA_START_GFN */
+ " xgr 1,1\n"
+ " llilf 1,%[start_gfn]\n"
+ /* r5 = TEST_DATA_PAGE_COUNT */
+ " lghi 5,%[page_count]\n"
+ /* r5 += r1 */
+ "2: agfr 5,1\n"
+ /* r2 = r1 << 12 */
+ "1: sllg 2,1,12(0)\n"
+ /* essa(r4, r2, SET_STABLE) */
+ " .insn rrf,0xb9ab0000,4,2,1,0\n"
+ /* i++ */
+ " agfi 1,1\n"
+ /* if r1 < r5 goto 1 */
+ " cgrjl 1,5,1b\n"
+ /* hypercall */
+ " diag 0,0,0x501\n"
+ "0: j 0b"
+ :
+ : [start_gfn] "L"(TEST_DATA_START_GFN),
+ [page_count] "L"(TEST_DATA_PAGE_COUNT)
+ :
+ /* the counter in our loop over the pages */
+ "r1",
+ /* the calculated page physical address */
+ "r2",
+ /* ESSA output register */
+ "r4",
+ /* last page */
+ "r5",
+ "cc", "memory"
+ );
+}
+
+static struct kvm_vm *create_vm(void)
+{
+ return ____vm_create(VM_MODE_DEFAULT);
+}
+
+static void create_main_memslot(struct kvm_vm *vm)
+{
+ int i;
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
+ /* set the array of memslots to zero like __vm_create does */
+ for (i = 0; i < NR_MEM_REGIONS; i++)
+ vm->memslots[i] = 0;
+}
+
+static void create_test_memslot(struct kvm_vm *vm)
+{
+ vm_userspace_mem_region_add(vm,
+ VM_MEM_SRC_ANONYMOUS,
+ TEST_DATA_START_GFN << vm->page_shift,
+ TEST_DATA_MEMSLOT,
+ TEST_DATA_PAGE_COUNT,
+ 0
+ );
+ vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
+}
+
+static void create_memslots(struct kvm_vm *vm)
+{
+ /*
+ * Our VM has the following memory layout:
+ * +------+---------------------------+
+ * | GFN | Memslot |
+ * +------+---------------------------+
+ * | 0 | |
+ * | ... | MAIN (Code, Stack, ...) |
+ * | 511 | |
+ * +------+---------------------------+
+ * | 4096 | |
+ * | ... | TEST_DATA |
+ * | 4607 | |
+ * +------+---------------------------+
+ */
+ create_main_memslot(vm);
+ create_test_memslot(vm);
+}
+
+static void finish_vm_setup(struct kvm_vm *vm)
+{
+ struct userspace_mem_region *slot0;
+
+ kvm_vm_elf_load(vm, program_invocation_name);
+
+ slot0 = memslot2region(vm, 0);
+ ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
+
+ kvm_arch_vm_post_create(vm);
+}
+
+static struct kvm_vm *create_vm_two_memslots(void)
+{
+ struct kvm_vm *vm;
+
+ vm = create_vm();
+
+ create_memslots(vm);
+
+ finish_vm_setup(vm);
+
+ return vm;
+}
+
+static void enable_cmma(struct kvm_vm *vm)
+{
+ int r;
+
+ r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
+ TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
+}
+
+static void enable_dirty_tracking(struct kvm_vm *vm)
+{
+ vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
+ vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+static int __enable_migration_mode(struct kvm_vm *vm)
+{
+ return __kvm_device_attr_set(vm->fd,
+ KVM_S390_VM_MIGRATION,
+ KVM_S390_VM_MIGRATION_START,
+ NULL
+ );
+}
+
+static void enable_migration_mode(struct kvm_vm *vm)
+{
+ int r = __enable_migration_mode(vm);
+
+ TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
+}
+
+static bool is_migration_mode_on(struct kvm_vm *vm)
+{
+ u64 out;
+ int r;
+
+ r = __kvm_device_attr_get(vm->fd,
+ KVM_S390_VM_MIGRATION,
+ KVM_S390_VM_MIGRATION_STATUS,
+ &out
+ );
+ TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
+ return out;
+}
+
+static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
+{
+ struct kvm_s390_cmma_log args;
+ int rc;
+
+ errno = 0;
+
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = flags,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+
+ *errno_out = errno;
+ return rc;
+}
+
+static void test_get_cmma_basic(void)
+{
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_vcpu *vcpu;
+ int rc, errno_out;
+
+ /* GET_CMMA_BITS without CMMA enabled should fail */
+ rc = vm_get_cmma_bits(vm, 0, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, ENXIO);
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+
+ vcpu_run(vcpu);
+
+ /* GET_CMMA_BITS without migration mode and without peeking should fail */
+ rc = vm_get_cmma_bits(vm, 0, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, EINVAL);
+
+ /* GET_CMMA_BITS without migration mode and with peeking should work */
+ rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
+ ASSERT_EQ(rc, 0);
+ ASSERT_EQ(errno_out, 0);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ /* GET_CMMA_BITS with invalid flags */
+ rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
+{
+ ASSERT_EQ(vcpu->run->exit_reason, 13);
+ ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
+ ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
+ ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
+}
+
+static void test_migration_mode(void)
+{
+ struct kvm_vm *vm = create_vm();
+ struct kvm_vcpu *vcpu;
+ u64 orig_psw;
+ int rc;
+
+ /* enabling migration mode on a VM without memory should fail */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
+ errno = 0;
+
+ create_memslots(vm);
+ finish_vm_setup(vm);
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+ orig_psw = vcpu->run->psw_addr;
+
+ /*
+ * Execute one essa instruction in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /* migration mode when memslots have dirty tracking off should fail */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
+ errno = 0;
+
+ /* enable dirty tracking */
+ enable_dirty_tracking(vm);
+
+ /* enabling migration mode should work now */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, 0);
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ errno = 0;
+
+ /* execute another ESSA instruction to see this goes fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /*
+ * With migration mode on, create a new memslot with dirty tracking off.
+ * This should turn off migration mode.
+ */
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ vm_userspace_mem_region_add(vm,
+ VM_MEM_SRC_ANONYMOUS,
+ TEST_DATA_TWO_START_GFN << vm->page_shift,
+ TEST_DATA_TWO_MEMSLOT,
+ TEST_DATA_TWO_PAGE_COUNT,
+ 0
+ );
+ TEST_ASSERT(!is_migration_mode_on(vm),
+ "creating memslot without dirty tracking turns off migration mode"
+ );
+
+ /* ESSA instructions should still execute fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /*
+ * Turn on dirty tracking on the new memslot.
+ * It should be possible to turn migration mode back on again.
+ */
+ vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, 0);
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ errno = 0;
+
+ /*
+ * Turn off dirty tracking again, this time with just a flag change.
+ * Again, migration mode should turn off.
+ */
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
+ TEST_ASSERT(!is_migration_mode_on(vm),
+ "disabling dirty tracking should turn off migration mode"
+ );
+
+ /* ESSA instructions should still execute fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ kvm_vm_free(vm);
+}
+
+/**
+ * Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
+ * CMMA attributes of all pages in both memslots and nothing more dirty.
+ * This has the useful side effect of ensuring nothing is CMMA dirty after this
+ * function.
+ */
+static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
+{
+ struct kvm_s390_cmma_log args;
+
+ /*
+ * First iteration - everything should be dirty.
+ * Start at the main memslot...
+ */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
+ ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
+ ASSERT_EQ(args.start_gfn, 0);
+
+ /* ...and then - after a hole - the TEST_DATA memslot should follow */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = MAIN_PAGE_COUNT,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
+ ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
+ ASSERT_EQ(args.remaining, 0);
+
+ /* ...and nothing else should be there */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, 0);
+ ASSERT_EQ(args.start_gfn, 0);
+ ASSERT_EQ(args.remaining, 0);
+}
+
+/**
+ * Given a VM, assert no pages are CMMA dirty.
+ */
+static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
+{
+ struct kvm_s390_cmma_log args;
+
+ /* If we start from GFN 0 again, nothing should be dirty. */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ if (args.count || args.remaining || args.start_gfn)
+ TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
+ args.start_gfn,
+ args.count,
+ args.remaining
+ );
+}
+
+static void test_get_inital_dirty(void)
+{
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_vcpu *vcpu;
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+
+ /*
+ * Execute one essa instruction in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ assert_all_slots_cmma_dirty(vm);
+
+ /* Start from the beginning again and make sure nothing else is dirty */
+ assert_no_pages_cmma_dirty(vm);
+
+ kvm_vm_free(vm);
+}
+
+static void query_cmma_range(struct kvm_vm *vm,
+ u64 start_gfn, u64 gfn_count,
+ struct kvm_s390_cmma_log *res_out)
+{
+ *res_out = (struct kvm_s390_cmma_log){
+ .start_gfn = start_gfn,
+ .count = gfn_count,
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
+}
+
+/**
+ * Assert the given cmma_log struct that was executed by query_cmma_range()
+ * indicates the first dirty gfn is at first_dirty_gfn and contains exactly
+ * dirty_gfn_count CMMA values.
+ */
+static void assert_cmma_dirty(u64 first_dirty_gfn,
+ u64 dirty_gfn_count,
+ const struct kvm_s390_cmma_log *res)
+{
+ ASSERT_EQ(res->start_gfn, first_dirty_gfn);
+ ASSERT_EQ(res->count, dirty_gfn_count);
+ for (size_t i = 0; i < dirty_gfn_count; i++)
+ ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
+ ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
+}
+
+static void test_get_skip_holes(void)
+{
+ size_t gfn_offset;
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_s390_cmma_log log;
+ struct kvm_vcpu *vcpu;
+ u64 orig_psw;
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
+
+ orig_psw = vcpu->run->psw_addr;
+
+ /*
+ * Execute some essa instructions in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ /* un-dirty all pages */
+ assert_all_slots_cmma_dirty(vm);
+
+ /* Then, dirty just the TEST_DATA memslot */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+
+ gfn_offset = TEST_DATA_START_GFN;
+ /**
+ * Query CMMA attributes of one page, starting at page 0. Since the
+ * main memslot was not touched by the VM, this should yield the first
+ * page of the TEST_DATA memslot.
+ * The dirty bitmap should now look like this:
+ * 0: not dirty
+ * [0x1, 0x200): dirty
+ */
+ query_cmma_range(vm, 0, 1, &log);
+ assert_cmma_dirty(gfn_offset, 1, &log);
+ gfn_offset++;
+
+ /**
+ * Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
+ * memslot. This should wrap back to the beginning of the TEST_DATA
+ * memslot, page 1.
+ * The dirty bitmap should now look like this:
+ * [0, 0x21): not dirty
+ * [0x21, 0x200): dirty
+ */
+ query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
+ assert_cmma_dirty(gfn_offset, 0x20, &log);
+ gfn_offset += 0x20;
+
+ /* Skip 32 pages */
+ gfn_offset += 0x20;
+
+ /**
+ * After skipping 32 pages, query the next 32 (0x20) pages.
+ * The dirty bitmap should now look like this:
+ * [0, 0x21): not dirty
+ * [0x21, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ query_cmma_range(vm, gfn_offset, 0x20, &log);
+ assert_cmma_dirty(gfn_offset, 0x20, &log);
+ gfn_offset += 0x20;
+
+ /**
+ * Query 1 page from the beginning of the TEST_DATA memslot. This should
+ * yield page 0x21.
+ * The dirty bitmap should now look like this:
+ * [0, 0x22): not dirty
+ * [0x22, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
+ assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
+ gfn_offset++;
+
+ /**
+ * Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
+ * This should yield pages [0x23, 0x33).
+ * The dirty bitmap should now look like this:
+ * [0, 0x22): not dirty
+ * 0x22: dirty
+ * [0x23, 0x33): not dirty
+ * [0x33, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x23;
+ query_cmma_range(vm, gfn_offset, 15, &log);
+ assert_cmma_dirty(gfn_offset, 15, &log);
+
+ /**
+ * Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
+ * This should yield page [0x22, 0x33)
+ * The dirty bitmap should now look like this:
+ * [0, 0x33): not dirty
+ * [0x33, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x22;
+ query_cmma_range(vm, gfn_offset, 17, &log);
+ assert_cmma_dirty(gfn_offset, 17, &log);
+
+ /**
+ * Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
+ * This should yield page 0x40 and nothing more, since there are more
+ * than 16 non-dirty pages after page 0x40.
+ * The dirty bitmap should now look like this:
+ * [0, 0x33): not dirty
+ * [0x33, 0x40): dirty
+ * [0x40, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x40;
+ query_cmma_range(vm, gfn_offset, 25, &log);
+ assert_cmma_dirty(gfn_offset, 1, &log);
+
+ /**
+ * Query pages [0x33, 0x40).
+ * The dirty bitmap should now look like this:
+ * [0, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x33;
+ query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
+ assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
+
+ /**
+ * Query the remaining pages [0x61, 0x200).
+ */
+ gfn_offset = TEST_DATA_START_GFN;
+ query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
+ assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
+
+ assert_no_pages_cmma_dirty(vm);
+}
+
+struct testdef {
+ const char *name;
+ void (*test)(void);
+} testlist[] = {
+ { "migration mode and dirty tracking", test_migration_mode },
+ { "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
+ { "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
+ { "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
+};
+
+/**
+ * The kernel may support CMMA, but the machine may not (i.e. if running as
+ * guest-3).
+ *
+ * In this case, the CMMA capabilities are all there, but the CMMA-related
+ * ioctls fail. To find out whether the machine supports CMMA, create a
+ * temporary VM and then query the CMMA feature of the VM.
+ */
+static int machine_has_cmma(void)
+{
+ struct kvm_vm *vm = create_vm();
+ int r;
+
+ r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
+ kvm_vm_free(vm);
+
+ return r;
+}
+
+int main(int argc, char *argv[])
+{
+ int idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
+ TEST_REQUIRE(machine_has_cmma());
+
+ ksft_print_header();
+
+ ksft_set_plan(ARRAY_SIZE(testlist));
+
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ }
+
+ ksft_finished(); /* Print results and exit() accordingly */
+}
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index 2fc3ad9c887e..d3c3aa93f090 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -163,6 +163,25 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu)
ent->eax = eax;
}
+static void test_get_cpuid2(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent + 1);
+ int i, r;
+
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
+ TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent,
+ "KVM didn't update nent on success, wanted %u, got %u\n",
+ vcpu->cpuid->nent, cpuid->nent);
+
+ for (i = 0; i < vcpu->cpuid->nent; i++) {
+ cpuid->nent = i;
+ r = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
+ TEST_ASSERT(r && errno == E2BIG, KVM_IOCTL_ERROR(KVM_GET_CPUID2, r));
+ TEST_ASSERT(cpuid->nent == i, "KVM modified nent on failure");
+ }
+ free(cpuid);
+}
+
int main(void)
{
struct kvm_vcpu *vcpu;
@@ -183,5 +202,7 @@ int main(void)
set_cpuid_after_run(vcpu);
+ test_get_cpuid2(vcpu);
+
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
new file mode 100644
index 000000000000..beb7e2c10211
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM dirty logging page splitting test
+ *
+ * Based on dirty_log_perf.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2023, Google, Inc.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <linux/bitmap.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "memstress.h"
+#include "guest_modes.h"
+
+#define VCPUS 2
+#define SLOTS 2
+#define ITERATIONS 2
+
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
+static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB;
+
+static u64 dirty_log_manual_caps;
+static bool host_quit;
+static int iteration;
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+struct kvm_page_stats {
+ uint64_t pages_4k;
+ uint64_t pages_2m;
+ uint64_t pages_1g;
+ uint64_t hugepages;
+};
+
+static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
+{
+ stats->pages_4k = vm_get_stat(vm, "pages_4k");
+ stats->pages_2m = vm_get_stat(vm, "pages_2m");
+ stats->pages_1g = vm_get_stat(vm, "pages_1g");
+ stats->hugepages = stats->pages_2m + stats->pages_1g;
+
+ pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n",
+ stage, stats->pages_4k, stats->pages_2m, stats->pages_1g,
+ stats->hugepages);
+}
+
+static void run_vcpu_iteration(struct kvm_vm *vm)
+{
+ int i;
+
+ iteration++;
+ for (i = 0; i < VCPUS; i++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
+ iteration)
+ ;
+ }
+}
+
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
+{
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
+ int vcpu_idx = vcpu_args->vcpu_idx;
+
+ while (!READ_ONCE(host_quit)) {
+ int current_iteration = READ_ONCE(iteration);
+
+ vcpu_run(vcpu);
+
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+
+ vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
+
+ /* Wait for the start of the next iteration to be signaled. */
+ while (current_iteration == READ_ONCE(iteration) &&
+ READ_ONCE(iteration) >= 0 &&
+ !READ_ONCE(host_quit))
+ ;
+ }
+}
+
+static void run_test(enum vm_guest_mode mode, void *unused)
+{
+ struct kvm_vm *vm;
+ unsigned long **bitmaps;
+ uint64_t guest_num_pages;
+ uint64_t host_num_pages;
+ uint64_t pages_per_slot;
+ int i;
+ uint64_t total_4k_pages;
+ struct kvm_page_stats stats_populated;
+ struct kvm_page_stats stats_dirty_logging_enabled;
+ struct kvm_page_stats stats_dirty_pass[ITERATIONS];
+ struct kvm_page_stats stats_clear_pass[ITERATIONS];
+ struct kvm_page_stats stats_dirty_logging_disabled;
+ struct kvm_page_stats stats_repopulated;
+
+ vm = memstress_create_vm(mode, VCPUS, guest_percpu_mem_size,
+ SLOTS, backing_src, false);
+
+ guest_num_pages = (VCPUS * guest_percpu_mem_size) >> vm->page_shift;
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+ host_num_pages = vm_num_host_pages(mode, guest_num_pages);
+ pages_per_slot = host_num_pages / SLOTS;
+
+ bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
+
+ if (dirty_log_manual_caps)
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
+ dirty_log_manual_caps);
+
+ /* Start the iterations */
+ iteration = -1;
+ host_quit = false;
+
+ for (i = 0; i < VCPUS; i++)
+ vcpu_last_completed_iteration[i] = -1;
+
+ memstress_start_vcpu_threads(VCPUS, vcpu_worker);
+
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_populated, "populating memory");
+
+ /* Enable dirty logging */
+ memstress_enable_dirty_logging(vm, SLOTS);
+
+ get_page_stats(vm, &stats_dirty_logging_enabled, "enabling dirty logging");
+
+ while (iteration < ITERATIONS) {
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_dirty_pass[iteration - 1],
+ "dirtying memory");
+
+ memstress_get_dirty_log(vm, bitmaps, SLOTS);
+
+ if (dirty_log_manual_caps) {
+ memstress_clear_dirty_log(vm, bitmaps, SLOTS, pages_per_slot);
+
+ get_page_stats(vm, &stats_clear_pass[iteration - 1], "clearing dirty log");
+ }
+ }
+
+ /* Disable dirty logging */
+ memstress_disable_dirty_logging(vm, SLOTS);
+
+ get_page_stats(vm, &stats_dirty_logging_disabled, "disabling dirty logging");
+
+ /* Run vCPUs again to fault pages back in. */
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_repopulated, "repopulating memory");
+
+ /*
+ * Tell the vCPU threads to quit. No need to manually check that vCPUs
+ * have stopped running after disabling dirty logging, the join will
+ * wait for them to exit.
+ */
+ host_quit = true;
+ memstress_join_vcpu_threads(VCPUS);
+
+ memstress_free_bitmaps(bitmaps, SLOTS);
+ memstress_destroy_vm(vm);
+
+ /* Make assertions about the page counts. */
+ total_4k_pages = stats_populated.pages_4k;
+ total_4k_pages += stats_populated.pages_2m * 512;
+ total_4k_pages += stats_populated.pages_1g * 512 * 512;
+
+ /*
+ * Check that all huge pages were split. Since large pages can only
+ * exist in the data slot, and the vCPUs should have dirtied all pages
+ * in the data slot, there should be no huge pages left after splitting.
+ * Splitting happens at dirty log enable time without
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and after the first clear pass
+ * with that capability.
+ */
+ if (dirty_log_manual_caps) {
+ ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
+ ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
+ ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
+ } else {
+ ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
+ ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
+ }
+
+ /*
+ * Once dirty logging is disabled and the vCPUs have touched all their
+ * memory again, the page counts should be the same as they were
+ * right after initial population of memory.
+ */
+ ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
+ ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
+ ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-b vcpu bytes] [-s mem type]\n",
+ name);
+ puts("");
+ printf(" -b: specify the size of the memory region which should be\n"
+ " dirtied by each vCPU. e.g. 10M or 3G.\n"
+ " (default: 1G)\n");
+ backing_src_help("-s");
+ puts("");
+}
+
+int main(int argc, char *argv[])
+{
+ int opt;
+
+ TEST_REQUIRE(get_kvm_param_bool("eager_page_split"));
+ TEST_REQUIRE(get_kvm_param_bool("tdp_mmu"));
+
+ while ((opt = getopt(argc, argv, "b:hs:")) != -1) {
+ switch (opt) {
+ case 'b':
+ guest_percpu_mem_size = parse_size(optarg);
+ break;
+ case 'h':
+ help(argv[0]);
+ exit(0);
+ case 's':
+ backing_src = parse_backing_src_type(optarg);
+ break;
+ default:
+ help(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (!is_backing_src_hugetlb(backing_src)) {
+ pr_info("This test will only work reliably with HugeTLB memory. "
+ "It can work with THP, but that is best effort.\n");
+ }
+
+ guest_modes_append_default();
+
+ dirty_log_manual_caps = 0;
+ for_each_guest_mode(run_test, NULL);
+
+ dirty_log_manual_caps =
+ kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+
+ if (dirty_log_manual_caps) {
+ dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+ KVM_DIRTY_LOG_INITIALLY_SET);
+ for_each_guest_mode(run_test, NULL);
+ } else {
+ pr_info("Skipping testing with MANUAL_PROTECT as it is not supported");
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 251794f83719..7f36c32fa760 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -226,7 +226,7 @@ static void help(char *name)
puts("");
printf("usage: %s [-h] [-p period_ms] [-t token]\n", name);
puts("");
- printf(" -p: The NX reclaim period in miliseconds.\n");
+ printf(" -p: The NX reclaim period in milliseconds.\n");
printf(" -t: The magic token to indicate environment setup is done.\n");
printf(" -r: The test has reboot permissions and can disable NX huge pages.\n");
puts("");
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
index fa03c8d1ce4e..e710b6e7fb38 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -116,29 +116,21 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-static void stable_tsc_check_supported(void)
+static bool system_has_stable_tsc(void)
{
+ bool tsc_is_stable;
FILE *fp;
char buf[4];
fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
if (fp == NULL)
- goto skip_test;
+ return false;
- if (fgets(buf, sizeof(buf), fp) == NULL)
- goto close_fp;
+ tsc_is_stable = fgets(buf, sizeof(buf), fp) &&
+ !strncmp(buf, "tsc", sizeof(buf));
- if (strncmp(buf, "tsc", sizeof(buf)))
- goto close_fp;
-
- fclose(fp);
- return;
-
-close_fp:
fclose(fp);
-skip_test:
- print_skip("Kernel does not use TSC clocksource - assuming that host TSC is not stable");
- exit(KSFT_SKIP);
+ return tsc_is_stable;
}
int main(int argc, char *argv[])
@@ -156,7 +148,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
- stable_tsc_check_supported();
+ TEST_REQUIRE(system_has_stable_tsc());
/*
* We set L1's scale factor to be a random number from 2 to 10.
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index d1d421ec10a3..cd3cc52c59b4 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -50,3 +50,4 @@ CONFIG_CRYPTO_SM4_GENERIC=y
CONFIG_AMT=m
CONFIG_VXLAN=m
CONFIG_IP_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 6032f9b23c4c..e317c2e44dae 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -6,6 +6,7 @@ CONFIG_INET_DIAG=m
CONFIG_INET_MPTCP_DIAG=m
CONFIG_VETH=y
CONFIG_NET_SCH_NETEM=m
+CONFIG_SYN_COOKIES=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_NETLINK=m
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 13561e5bc0cd..bbae40882bfa 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -718,6 +718,7 @@ table inet mangle {
EOF
if [ $? -ne 0 ]; then
echo "SKIP: $msg, could not load nft ruleset"
+ mptcp_lib_fail_if_expected_feature "nft rules"
return
fi
@@ -733,6 +734,7 @@ EOF
if [ $? -ne 0 ]; then
ip netns exec "$listener_ns" nft flush ruleset
echo "SKIP: $msg, ip $r6flag rule failed"
+ mptcp_lib_fail_if_expected_feature "ip rule"
return
fi
@@ -741,6 +743,7 @@ EOF
ip netns exec "$listener_ns" nft flush ruleset
ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
echo "SKIP: $msg, ip route add local $local_addr failed"
+ mptcp_lib_fail_if_expected_feature "ip route"
return
fi
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index f295a371ff14..dc8d473fc82c 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -12,6 +12,8 @@ ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
mptcp_connect=""
+iptables="iptables"
+ip6tables="ip6tables"
sec=$(date +%s)
rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
@@ -25,7 +27,7 @@ add_mark_rules()
local m=$2
local t
- for t in iptables ip6tables; do
+ for t in ${iptables} ${ip6tables}; do
# just to debug: check we have multiple subflows connection requests
ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT
@@ -95,14 +97,14 @@ if [ $? -ne 0 ];then
exit $ksft_skip
fi
-iptables -V > /dev/null 2>&1
-if [ $? -ne 0 ];then
+# Use the legacy version if available to support old kernel versions
+if iptables-legacy -V &> /dev/null; then
+ iptables="iptables-legacy"
+ ip6tables="ip6tables-legacy"
+elif ! iptables -V &> /dev/null; then
echo "SKIP: Could not run all tests without iptables tool"
exit $ksft_skip
-fi
-
-ip6tables -V > /dev/null 2>&1
-if [ $? -ne 0 ];then
+elif ! ip6tables -V &> /dev/null; then
echo "SKIP: Could not run all tests without ip6tables tool"
exit $ksft_skip
fi
@@ -112,10 +114,10 @@ check_mark()
local ns=$1
local af=$2
- local tables=iptables
+ local tables=${iptables}
if [ $af -eq 6 ];then
- tables=ip6tables
+ tables=${ip6tables}
fi
local counters values
@@ -126,6 +128,7 @@ check_mark()
for v in $values; do
if [ $v -ne 0 ]; then
echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
+ ret=1
return 1
fi
done
@@ -225,11 +228,11 @@ do_transfer()
fi
if [ $local_addr = "::" ];then
- check_mark $listener_ns 6
- check_mark $connector_ns 6
+ check_mark $listener_ns 6 || retc=1
+ check_mark $connector_ns 6 || retc=1
else
- check_mark $listener_ns 4
- check_mark $connector_ns 4
+ check_mark $listener_ns 4 || retc=1
+ check_mark $connector_ns 4 || retc=1
fi
check_transfer $cin $sout "file received by server"
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index abddf4c63e79..1887bd61bd9a 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -425,7 +425,7 @@ int dsf(int fd, int pm_family, int argc, char *argv[])
}
/* token */
- token = atoi(params[4]);
+ token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -551,7 +551,7 @@ int csf(int fd, int pm_family, int argc, char *argv[])
}
/* token */
- token = atoi(params[4]);
+ token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -598,7 +598,7 @@ int remove_addr(int fd, int pm_family, int argc, char *argv[])
if (++arg >= argc)
error(1, 0, " missing token value");
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -710,7 +710,7 @@ int announce_addr(int fd, int pm_family, int argc, char *argv[])
if (++arg >= argc)
error(1, 0, " missing token value");
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
@@ -1347,7 +1347,7 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
error(1, 0, " missing token value");
/* token */
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
} else if (!strcmp(argv[arg], "flags")) {
char *tok, *str;
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 98d9e4d2d3fc..b180133a30af 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -423,6 +423,7 @@ test_remove()
stdbuf -o0 -e0 printf "[OK]\n"
else
stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
fi
# RM_ADDR using an invalid addr id should result in no action
@@ -437,6 +438,7 @@ test_remove()
stdbuf -o0 -e0 printf "[OK]\n"
else
stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
fi
# RM_ADDR from the client to server machine
@@ -848,7 +850,7 @@ test_prio()
local count
# Send MP_PRIO signal from client to server machine
- ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$server4_port"
+ ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$app4_port"
sleep 0.5
# Check TX
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 62f3b0f56e4d..d3cdc2d33d4b 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -655,4 +655,4 @@ fi
# Control buffer size: --bootargs trace_buf_size=3k
# Get trace-buffer dumps on all oopses: --bootargs ftrace_dump_on_oops
# Ditto, but dump only the oopsing CPU: --bootargs ftrace_dump_on_oops=orig_cpu
-# Heavy-handed way to also dump on warnings: --bootargs panic_on_warn
+# Heavy-handed way to also dump on warnings: --bootargs panic_on_warn=1
diff --git a/tools/testing/selftests/riscv/vector/.gitignore b/tools/testing/selftests/riscv/vector/.gitignore
index 4f2b4e8a3b08..9ae7964491d5 100644
--- a/tools/testing/selftests/riscv/vector/.gitignore
+++ b/tools/testing/selftests/riscv/vector/.gitignore
@@ -1,2 +1,3 @@
vstate_exec_nolibc
vstate_prctl
+v_initval_nolibc
diff --git a/tools/testing/selftests/riscv/vector/Makefile b/tools/testing/selftests/riscv/vector/Makefile
index cd6e80bf995d..bfff0ff4f3be 100644
--- a/tools/testing/selftests/riscv/vector/Makefile
+++ b/tools/testing/selftests/riscv/vector/Makefile
@@ -2,7 +2,7 @@
# Copyright (C) 2021 ARM Limited
# Originally tools/testing/arm64/abi/Makefile
-TEST_GEN_PROGS := vstate_prctl
+TEST_GEN_PROGS := vstate_prctl v_initval_nolibc
TEST_GEN_PROGS_EXTENDED := vstate_exec_nolibc
include ../../lib.mk
@@ -13,3 +13,7 @@ $(OUTPUT)/vstate_prctl: vstate_prctl.c ../hwprobe/sys_hwprobe.S
$(OUTPUT)/vstate_exec_nolibc: vstate_exec_nolibc.c
$(CC) -nostdlib -static -include ../../../../include/nolibc/nolibc.h \
-Wall $(CFLAGS) $(LDFLAGS) $^ -o $@ -lgcc
+
+$(OUTPUT)/v_initval_nolibc: v_initval_nolibc.c
+ $(CC) -nostdlib -static -include ../../../../include/nolibc/nolibc.h \
+ -Wall $(CFLAGS) $(LDFLAGS) $^ -o $@ -lgcc
diff --git a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
new file mode 100644
index 000000000000..66764edb0d52
--- /dev/null
+++ b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "../../kselftest.h"
+#define MAX_VSIZE (8192 * 32)
+
+void dump(char *ptr, int size)
+{
+ int i = 0;
+
+ for (i = 0; i < size; i++) {
+ if (i != 0) {
+ if (i % 16 == 0)
+ printf("\n");
+ else if (i % 8 == 0)
+ printf(" ");
+ }
+ printf("%02x ", ptr[i]);
+ }
+ printf("\n");
+}
+
+int main(void)
+{
+ int i;
+ unsigned long vl;
+ char *datap, *tmp;
+
+ datap = malloc(MAX_VSIZE);
+ if (!datap) {
+ ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
+ exit(-1);
+ }
+
+ tmp = datap;
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%2)\n\t"
+ "add %1, %2, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl), "=r" (tmp) : "r" (datap) : "memory");
+
+ ksft_print_msg("vl = %lu\n", vl);
+
+ if (datap[0] != 0x00 && datap[0] != 0xff) {
+ ksft_test_result_fail("v-regesters are not properly initialized\n");
+ dump(datap, vl * 4);
+ exit(-1);
+ }
+
+ for (i = 1; i < vl * 4; i++) {
+ if (datap[i] != datap[0]) {
+ ksft_test_result_fail("detect stale values on v-regesters\n");
+ dump(datap, vl * 4);
+ exit(-2);
+ }
+ }
+
+ free(datap);
+ ksft_exit_pass();
+ return 0;
+}
diff --git a/tools/testing/selftests/tty/.gitignore b/tools/testing/selftests/tty/.gitignore
new file mode 100644
index 000000000000..fe70462a4aad
--- /dev/null
+++ b/tools/testing/selftests/tty/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+tty_tstamp_update
diff --git a/tools/testing/selftests/tty/Makefile b/tools/testing/selftests/tty/Makefile
new file mode 100644
index 000000000000..50d7027b2ae3
--- /dev/null
+++ b/tools/testing/selftests/tty/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS = -O2 -Wall
+TEST_GEN_PROGS := tty_tstamp_update
+
+include ../lib.mk
diff --git a/tools/testing/selftests/tty/tty_tstamp_update.c b/tools/testing/selftests/tty/tty_tstamp_update.c
new file mode 100644
index 000000000000..0ee97943dccc
--- /dev/null
+++ b/tools/testing/selftests/tty/tty_tstamp_update.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <linux/limits.h>
+
+#include "../kselftest.h"
+
+#define MIN_TTY_PATH_LEN 8
+
+static bool tty_valid(char *tty)
+{
+ if (strlen(tty) < MIN_TTY_PATH_LEN)
+ return false;
+
+ if (strncmp(tty, "/dev/tty", MIN_TTY_PATH_LEN) == 0 ||
+ strncmp(tty, "/dev/pts", MIN_TTY_PATH_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+static int write_dev_tty(void)
+{
+ FILE *f;
+ int r = 0;
+
+ f = fopen("/dev/tty", "r+");
+ if (!f)
+ return -errno;
+
+ r = fprintf(f, "hello, world!\n");
+ if (r != strlen("hello, world!\n"))
+ r = -EIO;
+
+ fclose(f);
+ return r;
+}
+
+int main(int argc, char **argv)
+{
+ int r;
+ char tty[PATH_MAX] = {};
+ struct stat st1, st2;
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ r = readlink("/proc/self/fd/0", tty, PATH_MAX);
+ if (r < 0)
+ ksft_exit_fail_msg("readlink on /proc/self/fd/0 failed: %m\n");
+
+ if (!tty_valid(tty))
+ ksft_exit_skip("invalid tty path '%s'\n", tty);
+
+ r = stat(tty, &st1);
+ if (r < 0)
+ ksft_exit_fail_msg("stat failed on tty path '%s': %m\n", tty);
+
+ /* We need to wait at least 8 seconds in order to observe timestamp change */
+ /* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fbf47635315ab308c9b58a1ea0906e711a9228de */
+ sleep(10);
+
+ r = write_dev_tty();
+ if (r < 0)
+ ksft_exit_fail_msg("failed to write to /dev/tty: %s\n",
+ strerror(-r));
+
+ r = stat(tty, &st2);
+ if (r < 0)
+ ksft_exit_fail_msg("stat failed on tty path '%s': %m\n", tty);
+
+ /* We wrote to the terminal so timestamps should have been updated */
+ if (st1.st_atim.tv_sec == st2.st_atim.tv_sec &&
+ st1.st_mtim.tv_sec == st2.st_mtim.tv_sec) {
+ ksft_test_result_fail("tty timestamps not updated\n");
+ ksft_exit_fail();
+ }
+
+ ksft_test_result_pass(
+ "timestamps of terminal '%s' updated after write to /dev/tty\n", tty);
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 69c7796c7ca9..405ff262ca93 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -514,10 +514,32 @@ n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
n1 ping -W 1 -c 1 192.168.241.2
[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]]
-ip1 link del veth1
-ip1 link del veth3
-ip1 link del wg0
-ip2 link del wg0
+ip1 link del dev veth3
+ip1 link del dev wg0
+ip2 link del dev wg0
+
+# Make sure persistent keep alives are sent when an adapter comes up
+ip1 link add dev wg0 type wireguard
+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+ip1 link set dev wg0 up
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -gt 0 ]]
+ip1 link del dev wg0
+# This should also happen even if the private key is set later
+ip1 link add dev wg0 type wireguard
+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+ip1 link set dev wg0 up
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+n1 wg set wg0 private-key <(echo "$key1")
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -gt 0 ]]
+ip1 link del dev veth1
+ip1 link del dev wg0
# We test that Netlink/IPC is working properly by doing things that usually cause split responses
ip0 link add dev wg0 type wireguard
diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c
index 3ca7a3853943..245e9344932b 100644
--- a/tools/tracing/rtla/src/osnoise.c
+++ b/tools/tracing/rtla/src/osnoise.c
@@ -841,6 +841,67 @@ static void osnoise_put_irq_disable(struct osnoise_context *context)
context->orig_opt_irq_disable = OSNOISE_OPTION_INIT_VAL;
}
+static int osnoise_get_workload(struct osnoise_context *context)
+{
+ if (context->opt_workload != OSNOISE_OPTION_INIT_VAL)
+ return context->opt_workload;
+
+ if (context->orig_opt_workload != OSNOISE_OPTION_INIT_VAL)
+ return context->orig_opt_workload;
+
+ context->orig_opt_workload = osnoise_options_get_option("OSNOISE_WORKLOAD");
+
+ return context->orig_opt_workload;
+}
+
+int osnoise_set_workload(struct osnoise_context *context, bool onoff)
+{
+ int opt_workload = osnoise_get_workload(context);
+ int retval;
+
+ if (opt_workload == OSNOISE_OPTION_INIT_VAL)
+ return -1;
+
+ if (opt_workload == onoff)
+ return 0;
+
+ retval = osnoise_options_set_option("OSNOISE_WORKLOAD", onoff);
+ if (retval < 0)
+ return -1;
+
+ context->opt_workload = onoff;
+
+ return 0;
+}
+
+static void osnoise_restore_workload(struct osnoise_context *context)
+{
+ int retval;
+
+ if (context->orig_opt_workload == OSNOISE_OPTION_INIT_VAL)
+ return;
+
+ if (context->orig_opt_workload == context->opt_workload)
+ goto out_done;
+
+ retval = osnoise_options_set_option("OSNOISE_WORKLOAD", context->orig_opt_workload);
+ if (retval < 0)
+ err_msg("Could not restore original OSNOISE_WORKLOAD option\n");
+
+out_done:
+ context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL;
+}
+
+static void osnoise_put_workload(struct osnoise_context *context)
+{
+ osnoise_restore_workload(context);
+
+ if (context->orig_opt_workload == OSNOISE_OPTION_INIT_VAL)
+ return;
+
+ context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL;
+}
+
/*
* enable_osnoise - enable osnoise tracer in the trace_instance
*/
@@ -908,6 +969,9 @@ struct osnoise_context *osnoise_context_alloc(void)
context->orig_opt_irq_disable = OSNOISE_OPTION_INIT_VAL;
context->opt_irq_disable = OSNOISE_OPTION_INIT_VAL;
+ context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL;
+ context->opt_workload = OSNOISE_OPTION_INIT_VAL;
+
osnoise_get_context(context);
return context;
@@ -935,6 +999,7 @@ void osnoise_put_context(struct osnoise_context *context)
osnoise_put_print_stack(context);
osnoise_put_tracing_thresh(context);
osnoise_put_irq_disable(context);
+ osnoise_put_workload(context);
free(context);
}
diff --git a/tools/tracing/rtla/src/osnoise.h b/tools/tracing/rtla/src/osnoise.h
index 4dcf22ccd704..555f4f4903cc 100644
--- a/tools/tracing/rtla/src/osnoise.h
+++ b/tools/tracing/rtla/src/osnoise.h
@@ -42,6 +42,10 @@ struct osnoise_context {
/* -1 as init value because 0 is off */
int orig_opt_irq_disable;
int opt_irq_disable;
+
+ /* -1 as init value because 0 is off */
+ int orig_opt_workload;
+ int opt_workload;
};
/*
@@ -84,6 +88,7 @@ int osnoise_set_print_stack(struct osnoise_context *context,
long long print_stack);
int osnoise_set_irq_disable(struct osnoise_context *context, bool onoff);
+int osnoise_set_workload(struct osnoise_context *context, bool onoff);
/*
* osnoise_tool - osnoise based tool definition.
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index 13e1233690bb..8f81fa007364 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -3,6 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
+#define _GNU_SOURCE
#include <getopt.h>
#include <stdlib.h>
#include <string.h>
@@ -11,14 +12,16 @@
#include <errno.h>
#include <stdio.h>
#include <time.h>
+#include <sched.h>
#include "utils.h"
#include "osnoise.h"
struct osnoise_hist_params {
char *cpus;
- char *monitored_cpus;
+ cpu_set_t monitored_cpus;
char *trace_output;
+ char *cgroup_name;
unsigned long long runtime;
unsigned long long period;
long long threshold;
@@ -28,6 +31,9 @@ struct osnoise_hist_params {
int duration;
int set_sched;
int output_divisor;
+ int cgroup;
+ int hk_cpus;
+ cpu_set_t hk_cpu_set;
struct sched_attr sched_param;
struct trace_events *events;
@@ -268,7 +274,7 @@ static void osnoise_hist_header(struct osnoise_tool *tool)
trace_seq_printf(s, "Index");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -299,7 +305,7 @@ osnoise_print_summary(struct osnoise_hist_params *params,
trace_seq_printf(trace->seq, "count:");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -313,7 +319,7 @@ osnoise_print_summary(struct osnoise_hist_params *params,
trace_seq_printf(trace->seq, "min: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -328,7 +334,7 @@ osnoise_print_summary(struct osnoise_hist_params *params,
trace_seq_printf(trace->seq, "avg: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -346,7 +352,7 @@ osnoise_print_summary(struct osnoise_hist_params *params,
trace_seq_printf(trace->seq, "max: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -381,7 +387,7 @@ osnoise_print_stats(struct osnoise_hist_params *params, struct osnoise_tool *too
bucket * data->bucket_size);
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -405,7 +411,7 @@ osnoise_print_stats(struct osnoise_hist_params *params, struct osnoise_tool *too
trace_seq_printf(trace->seq, "over: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].count)
@@ -432,8 +438,8 @@ static void osnoise_hist_usage(char *usage)
"",
" usage: rtla osnoise hist [-h] [-D] [-d s] [-a us] [-p us] [-r us] [-s us] [-S us] \\",
" [-T us] [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] \\",
- " [-c cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] [--no-index] \\",
- " [--with-zeros]",
+ " [-c cpu-list] [-H cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] \\",
+ " [--no-index] [--with-zeros] [-C[=cgroup_name]]",
"",
" -h/--help: print this menu",
" -a/--auto: set automatic trace mode, stopping the session if argument in us sample is hit",
@@ -443,6 +449,8 @@ static void osnoise_hist_usage(char *usage)
" -S/--stop-total us: stop trace if the total sample is higher than the argument in us",
" -T/--threshold us: the minimum delta to be considered a noise",
" -c/--cpus cpu-list: list of cpus to run osnoise threads",
+ " -H/--house-keeping cpus: run rtla control threads only on the given cpus",
+ " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
" -d/--duration time[s|m|h|d]: duration of the session",
" -D/--debug: print debug info",
" -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]",
@@ -501,8 +509,10 @@ static struct osnoise_hist_params
{"bucket-size", required_argument, 0, 'b'},
{"entries", required_argument, 0, 'E'},
{"cpus", required_argument, 0, 'c'},
+ {"cgroup", optional_argument, 0, 'C'},
{"debug", no_argument, 0, 'D'},
{"duration", required_argument, 0, 'd'},
+ {"house-keeping", required_argument, 0, 'H'},
{"help", no_argument, 0, 'h'},
{"period", required_argument, 0, 'p'},
{"priority", required_argument, 0, 'P'},
@@ -524,7 +534,7 @@ static struct osnoise_hist_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "a:c:b:d:e:E:Dhp:P:r:s:S:t::T:01234:5:",
+ c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:p:P:r:s:S:t::T:01234:5:",
long_options, &option_index);
/* detect the end of the options. */
@@ -549,11 +559,21 @@ static struct osnoise_hist_params
osnoise_hist_usage("Bucket size needs to be > 0 and <= 1000000\n");
break;
case 'c':
- retval = parse_cpu_list(optarg, &params->monitored_cpus);
+ retval = parse_cpu_set(optarg, &params->monitored_cpus);
if (retval)
osnoise_hist_usage("\nInvalid -c cpu list\n");
params->cpus = optarg;
break;
+ case 'C':
+ params->cgroup = 1;
+ if (!optarg) {
+ /* will inherit this cgroup */
+ params->cgroup_name = NULL;
+ } else if (*optarg == '=') {
+ /* skip the = */
+ params->cgroup_name = ++optarg;
+ }
+ break;
case 'D':
config_debug = 1;
break;
@@ -583,6 +603,14 @@ static struct osnoise_hist_params
case '?':
osnoise_hist_usage(NULL);
break;
+ case 'H':
+ params->hk_cpus = 1;
+ retval = parse_cpu_set(optarg, &params->hk_cpu_set);
+ if (retval) {
+ err_msg("Error parsing house keeping CPUs\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
case 'p':
params->period = get_llong_from_str(optarg);
if (params->period > 10000000)
@@ -718,6 +746,24 @@ osnoise_hist_apply_config(struct osnoise_tool *tool, struct osnoise_hist_params
}
}
+ if (params->hk_cpus) {
+ retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set),
+ &params->hk_cpu_set);
+ if (retval == -1) {
+ err_msg("Failed to set rtla to the house keeping CPUs\n");
+ goto out_err;
+ }
+ } else if (params->cpus) {
+ /*
+ * Even if the user do not set a house-keeping CPU, try to
+ * move rtla to a CPU set different to the one where the user
+ * set the workload to run.
+ *
+ * No need to check results as this is an automatic attempt.
+ */
+ auto_house_keeping(&params->monitored_cpus);
+ }
+
return 0;
out_err:
@@ -816,7 +862,13 @@ int osnoise_hist_main(int argc, char *argv[])
}
}
- trace_instance_start(trace);
+ if (params->cgroup) {
+ retval = set_comm_cgroup("timerlat/", params->cgroup_name);
+ if (!retval) {
+ err_msg("Failed to move threads to cgroup\n");
+ goto out_free;
+ }
+ }
if (params->trace_output) {
record = osnoise_init_trace_tool("osnoise");
@@ -831,9 +883,19 @@ int osnoise_hist_main(int argc, char *argv[])
goto out_hist;
}
- trace_instance_start(&record->trace);
}
+ /*
+ * Start the tracer here, after having set all instances.
+ *
+ * Let the trace instance start first for the case of hitting a stop
+ * tracing while enabling other instances. The trace instance is the
+ * one with most valuable information.
+ */
+ if (params->trace_output)
+ trace_instance_start(&record->trace);
+ trace_instance_start(trace);
+
tool->start_time = time(NULL);
osnoise_hist_set_signals(params);
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index 562f2e4b18c5..f7c959be8677 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -3,6 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
+#define _GNU_SOURCE
#include <getopt.h>
#include <stdlib.h>
#include <string.h>
@@ -10,6 +11,7 @@
#include <unistd.h>
#include <stdio.h>
#include <time.h>
+#include <sched.h>
#include "osnoise.h"
#include "utils.h"
@@ -24,8 +26,9 @@ enum osnoise_mode {
*/
struct osnoise_top_params {
char *cpus;
- char *monitored_cpus;
+ cpu_set_t monitored_cpus;
char *trace_output;
+ char *cgroup_name;
unsigned long long runtime;
unsigned long long period;
long long threshold;
@@ -35,6 +38,9 @@ struct osnoise_top_params {
int duration;
int quiet;
int set_sched;
+ int cgroup;
+ int hk_cpus;
+ cpu_set_t hk_cpu_set;
struct sched_attr sched_param;
struct trace_events *events;
enum osnoise_mode mode;
@@ -257,7 +263,7 @@ osnoise_print_stats(struct osnoise_top_params *params, struct osnoise_tool *top)
osnoise_top_header(top);
for (i = 0; i < nr_cpus; i++) {
- if (params->cpus && !params->monitored_cpus[i])
+ if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus))
continue;
osnoise_top_print(top, i);
}
@@ -276,7 +282,7 @@ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage)
static const char * const msg[] = {
" [-h] [-q] [-D] [-d s] [-a us] [-p us] [-r us] [-s us] [-S us] \\",
" [-T us] [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] \\",
- " [-c cpu-list] [-P priority]",
+ " [-c cpu-list] [-H cpu-list] [-P priority] [-C[=cgroup_name]]",
"",
" -h/--help: print this menu",
" -a/--auto: set automatic trace mode, stopping the session if argument in us sample is hit",
@@ -286,6 +292,8 @@ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage)
" -S/--stop-total us: stop trace if the total sample is higher than the argument in us",
" -T/--threshold us: the minimum delta to be considered a noise",
" -c/--cpus cpu-list: list of cpus to run osnoise threads",
+ " -H/--house-keeping cpus: run rtla control threads only on the given cpus",
+ " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
" -d/--duration time[s|m|h|d]: duration of the session",
" -D/--debug: print debug info",
" -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]",
@@ -340,16 +348,24 @@ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv)
if (!params)
exit(1);
- if (strcmp(argv[0], "hwnoise") == 0)
+ if (strcmp(argv[0], "hwnoise") == 0) {
params->mode = MODE_HWNOISE;
+ /*
+ * Reduce CPU usage for 75% to avoid killing the system.
+ */
+ params->runtime = 750000;
+ params->period = 1000000;
+ }
while (1) {
static struct option long_options[] = {
{"auto", required_argument, 0, 'a'},
{"cpus", required_argument, 0, 'c'},
+ {"cgroup", optional_argument, 0, 'C'},
{"debug", no_argument, 0, 'D'},
{"duration", required_argument, 0, 'd'},
{"event", required_argument, 0, 'e'},
+ {"house-keeping", required_argument, 0, 'H'},
{"help", no_argument, 0, 'h'},
{"period", required_argument, 0, 'p'},
{"priority", required_argument, 0, 'P'},
@@ -367,7 +383,7 @@ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv)
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "a:c:d:De:hp:P:qr:s:S:t::T:0:1:",
+ c = getopt_long(argc, argv, "a:c:C::d:De:hH:p:P:qr:s:S:t::T:0:1:",
long_options, &option_index);
/* Detect the end of the options. */
@@ -387,11 +403,21 @@ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv)
break;
case 'c':
- retval = parse_cpu_list(optarg, &params->monitored_cpus);
+ retval = parse_cpu_set(optarg, &params->monitored_cpus);
if (retval)
osnoise_top_usage(params, "\nInvalid -c cpu list\n");
params->cpus = optarg;
break;
+ case 'C':
+ params->cgroup = 1;
+ if (!optarg) {
+ /* will inherit this cgroup */
+ params->cgroup_name = NULL;
+ } else if (*optarg == '=') {
+ /* skip the = */
+ params->cgroup_name = ++optarg;
+ }
+ break;
case 'D':
config_debug = 1;
break;
@@ -416,6 +442,14 @@ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv)
case '?':
osnoise_top_usage(params, NULL);
break;
+ case 'H':
+ params->hk_cpus = 1;
+ retval = parse_cpu_set(optarg, &params->hk_cpu_set);
+ if (retval) {
+ err_msg("Error parsing house keeping CPUs\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
case 'p':
params->period = get_llong_from_str(optarg);
if (params->period > 10000000)
@@ -547,6 +581,24 @@ osnoise_top_apply_config(struct osnoise_tool *tool, struct osnoise_top_params *p
}
}
+ if (params->hk_cpus) {
+ retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set),
+ &params->hk_cpu_set);
+ if (retval == -1) {
+ err_msg("Failed to set rtla to the house keeping CPUs\n");
+ goto out_err;
+ }
+ } else if (params->cpus) {
+ /*
+ * Even if the user do not set a house-keeping CPU, try to
+ * move rtla to a CPU set different to the one where the user
+ * set the workload to run.
+ *
+ * No need to check results as this is an automatic attempt.
+ */
+ auto_house_keeping(&params->monitored_cpus);
+ }
+
return 0;
out_err:
@@ -643,7 +695,13 @@ int osnoise_top_main(int argc, char **argv)
}
}
- trace_instance_start(trace);
+ if (params->cgroup) {
+ retval = set_comm_cgroup("osnoise/", params->cgroup_name);
+ if (!retval) {
+ err_msg("Failed to move threads to cgroup\n");
+ goto out_free;
+ }
+ }
if (params->trace_output) {
record = osnoise_init_trace_tool("osnoise");
@@ -657,9 +715,18 @@ int osnoise_top_main(int argc, char **argv)
if (retval)
goto out_top;
}
+ }
+ /*
+ * Start the tracer here, after having set all instances.
+ *
+ * Let the trace instance start first for the case of hitting a stop
+ * tracing while enabling other instances. The trace instance is the
+ * one with most valuable information.
+ */
+ if (params->trace_output)
trace_instance_start(&record->trace);
- }
+ trace_instance_start(trace);
tool->start_time = time(NULL);
osnoise_top_set_signals(params);
diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
index 1843fff66da5..e0ffe69c271c 100644
--- a/tools/tracing/rtla/src/timerlat_aa.c
+++ b/tools/tracing/rtla/src/timerlat_aa.c
@@ -8,6 +8,7 @@
#include "utils.h"
#include "osnoise.h"
#include "timerlat.h"
+#include <unistd.h>
enum timelat_state {
TIMERLAT_INIT = 0,
@@ -233,7 +234,7 @@ static int timerlat_aa_thread_latency(struct timerlat_aa_data *taa_data,
*
* Returns 0 on success, -1 otherwise.
*/
-int timerlat_aa_handler(struct trace_seq *s, struct tep_record *record,
+static int timerlat_aa_handler(struct trace_seq *s, struct tep_record *record,
struct tep_event *event, void *context)
{
struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx();
@@ -665,6 +666,25 @@ print_total:
ns_to_usf(total));
}
+static int timerlat_auto_analysis_collect_trace(struct timerlat_aa_context *taa_ctx)
+{
+ struct trace_instance *trace = &taa_ctx->tool->trace;
+ int retval;
+
+ retval = tracefs_iterate_raw_events(trace->tep,
+ trace->inst,
+ NULL,
+ 0,
+ collect_registered_events,
+ trace);
+ if (retval < 0) {
+ err_msg("Error iterating on events\n");
+ return 0;
+ }
+
+ return 1;
+}
+
/**
* timerlat_auto_analysis - Analyze the collected data
*/
@@ -677,6 +697,8 @@ void timerlat_auto_analysis(int irq_thresh, int thread_thresh)
struct tep_handle *tep;
int cpu;
+ timerlat_auto_analysis_collect_trace(taa_ctx);
+
/* bring stop tracing to the ns scale */
irq_thresh = irq_thresh * 1000;
thread_thresh = thread_thresh * 1000;
@@ -838,6 +860,10 @@ out_err:
*/
static void timerlat_aa_unregister_events(struct osnoise_tool *tool, int dump_tasks)
{
+
+ tep_unregister_event_handler(tool->trace.tep, -1, "ftrace", "timerlat",
+ timerlat_aa_handler, tool);
+
tracefs_event_disable(tool->trace.inst, "osnoise", NULL);
tep_unregister_event_handler(tool->trace.tep, -1, "osnoise", "nmi_noise",
@@ -875,6 +901,10 @@ static int timerlat_aa_register_events(struct osnoise_tool *tool, int dump_tasks
{
int retval;
+ tep_register_event_handler(tool->trace.tep, -1, "ftrace", "timerlat",
+ timerlat_aa_handler, tool);
+
+
/*
* register auto-analysis handlers.
*/
@@ -955,8 +985,9 @@ out_ctx:
*
* Returns 0 on success, -1 otherwise.
*/
-int timerlat_aa_init(struct osnoise_tool *tool, int nr_cpus, int dump_tasks)
+int timerlat_aa_init(struct osnoise_tool *tool, int dump_tasks)
{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
struct timerlat_aa_context *taa_ctx;
int retval;
diff --git a/tools/tracing/rtla/src/timerlat_aa.h b/tools/tracing/rtla/src/timerlat_aa.h
index d4f6ca7e342a..cea4bb1531a8 100644
--- a/tools/tracing/rtla/src/timerlat_aa.h
+++ b/tools/tracing/rtla/src/timerlat_aa.h
@@ -3,10 +3,7 @@
* Copyright (C) 2023 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
-int timerlat_aa_init(struct osnoise_tool *tool, int nr_cpus, int dump_task);
+int timerlat_aa_init(struct osnoise_tool *tool, int dump_task);
void timerlat_aa_destroy(void);
-int timerlat_aa_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context);
-
void timerlat_auto_analysis(int irq_thresh, int thread_thresh);
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 4b48af8a8309..47d3d8b53cb2 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -3,6 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
+#define _GNU_SOURCE
#include <getopt.h>
#include <stdlib.h>
#include <string.h>
@@ -10,15 +11,20 @@
#include <unistd.h>
#include <stdio.h>
#include <time.h>
+#include <sched.h>
+#include <pthread.h>
#include "utils.h"
#include "osnoise.h"
#include "timerlat.h"
+#include "timerlat_aa.h"
+#include "timerlat_u.h"
struct timerlat_hist_params {
char *cpus;
- char *monitored_cpus;
+ cpu_set_t monitored_cpus;
char *trace_output;
+ char *cgroup_name;
unsigned long long runtime;
long long stop_us;
long long stop_total_us;
@@ -29,9 +35,14 @@ struct timerlat_hist_params {
int duration;
int set_sched;
int dma_latency;
+ int cgroup;
+ int hk_cpus;
+ int no_aa;
+ int dump_tasks;
+ int user_hist;
+ cpu_set_t hk_cpu_set;
struct sched_attr sched_param;
struct trace_events *events;
-
char no_irq;
char no_thread;
char no_header;
@@ -45,9 +56,11 @@ struct timerlat_hist_params {
struct timerlat_hist_cpu {
int *irq;
int *thread;
+ int *user;
int irq_count;
int thread_count;
+ int user_count;
unsigned long long min_irq;
unsigned long long sum_irq;
@@ -56,6 +69,10 @@ struct timerlat_hist_cpu {
unsigned long long min_thread;
unsigned long long sum_thread;
unsigned long long max_thread;
+
+ unsigned long long min_user;
+ unsigned long long sum_user;
+ unsigned long long max_user;
};
struct timerlat_hist_data {
@@ -80,6 +97,10 @@ timerlat_free_histogram(struct timerlat_hist_data *data)
if (data->hist[cpu].thread)
free(data->hist[cpu].thread);
+
+ if (data->hist[cpu].user)
+ free(data->hist[cpu].user);
+
}
/* one set of histograms per CPU */
@@ -116,15 +137,21 @@ static struct timerlat_hist_data
data->hist[cpu].irq = calloc(1, sizeof(*data->hist->irq) * (entries + 1));
if (!data->hist[cpu].irq)
goto cleanup;
+
data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1));
if (!data->hist[cpu].thread)
goto cleanup;
+
+ data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1));
+ if (!data->hist[cpu].user)
+ goto cleanup;
}
/* set the min to max */
for (cpu = 0; cpu < nr_cpus; cpu++) {
data->hist[cpu].min_irq = ~0;
data->hist[cpu].min_thread = ~0;
+ data->hist[cpu].min_user = ~0;
}
return data;
@@ -139,7 +166,7 @@ cleanup:
*/
static void
timerlat_hist_update(struct osnoise_tool *tool, int cpu,
- unsigned long long thread,
+ unsigned long long context,
unsigned long long latency)
{
struct timerlat_hist_params *params = tool->params;
@@ -154,18 +181,24 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu,
if (data->bucket_size)
bucket = latency / data->bucket_size;
- if (!thread) {
+ if (!context) {
hist = data->hist[cpu].irq;
data->hist[cpu].irq_count++;
update_min(&data->hist[cpu].min_irq, &latency);
update_sum(&data->hist[cpu].sum_irq, &latency);
update_max(&data->hist[cpu].max_irq, &latency);
- } else {
+ } else if (context == 1) {
hist = data->hist[cpu].thread;
data->hist[cpu].thread_count++;
update_min(&data->hist[cpu].min_thread, &latency);
update_sum(&data->hist[cpu].sum_thread, &latency);
update_max(&data->hist[cpu].max_thread, &latency);
+ } else { /* user */
+ hist = data->hist[cpu].user;
+ data->hist[cpu].user_count++;
+ update_min(&data->hist[cpu].min_user, &latency);
+ update_sum(&data->hist[cpu].sum_user, &latency);
+ update_max(&data->hist[cpu].max_user, &latency);
}
if (bucket < entries)
@@ -182,16 +215,16 @@ timerlat_hist_handler(struct trace_seq *s, struct tep_record *record,
struct tep_event *event, void *data)
{
struct trace_instance *trace = data;
- unsigned long long thread, latency;
+ unsigned long long context, latency;
struct osnoise_tool *tool;
int cpu = record->cpu;
tool = container_of(trace, struct osnoise_tool, trace);
- tep_get_field_val(s, event, "context", record, &thread, 1);
+ tep_get_field_val(s, event, "context", record, &context, 1);
tep_get_field_val(s, event, "timer_latency", record, &latency, 1);
- timerlat_hist_update(tool, cpu, thread, latency);
+ timerlat_hist_update(tool, cpu, context, latency);
return 0;
}
@@ -222,7 +255,7 @@ static void timerlat_hist_header(struct osnoise_tool *tool)
trace_seq_printf(s, "Index");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -233,6 +266,9 @@ static void timerlat_hist_header(struct osnoise_tool *tool)
if (!params->no_thread)
trace_seq_printf(s, " Thr-%03d", cpu);
+
+ if (params->user_hist)
+ trace_seq_printf(s, " Usr-%03d", cpu);
}
trace_seq_printf(s, "\n");
@@ -258,7 +294,7 @@ timerlat_print_summary(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "count:");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -271,6 +307,10 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!params->no_thread)
trace_seq_printf(trace->seq, "%9d ",
data->hist[cpu].thread_count);
+
+ if (params->user_hist)
+ trace_seq_printf(trace->seq, "%9d ",
+ data->hist[cpu].user_count);
}
trace_seq_printf(trace->seq, "\n");
@@ -278,7 +318,7 @@ timerlat_print_summary(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "min: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -291,6 +331,10 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!params->no_thread)
trace_seq_printf(trace->seq, "%9llu ",
data->hist[cpu].min_thread);
+
+ if (params->user_hist)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].min_user);
}
trace_seq_printf(trace->seq, "\n");
@@ -298,7 +342,7 @@ timerlat_print_summary(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "avg: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -315,7 +359,15 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!params->no_thread) {
if (data->hist[cpu].thread_count)
trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].sum_thread / data->hist[cpu].thread_count);
+ data->hist[cpu].sum_thread / data->hist[cpu].thread_count);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
+
+ if (params->user_hist) {
+ if (data->hist[cpu].user_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].sum_user / data->hist[cpu].user_count);
else
trace_seq_printf(trace->seq, " - ");
}
@@ -326,7 +378,7 @@ timerlat_print_summary(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "max: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -339,6 +391,10 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!params->no_thread)
trace_seq_printf(trace->seq, "%9llu ",
data->hist[cpu].max_thread);
+
+ if (params->user_hist)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].max_user);
}
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
@@ -366,7 +422,7 @@ timerlat_print_stats(struct timerlat_hist_params *params, struct osnoise_tool *t
bucket * data->bucket_size);
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -384,6 +440,12 @@ timerlat_print_stats(struct timerlat_hist_params *params, struct osnoise_tool *t
data->hist[cpu].thread[bucket]);
}
+ if (params->user_hist) {
+ total += data->hist[cpu].user[bucket];
+ trace_seq_printf(trace->seq, "%9d ",
+ data->hist[cpu].user[bucket]);
+ }
+
}
if (total == 0 && !params->with_zeros) {
@@ -400,7 +462,7 @@ timerlat_print_stats(struct timerlat_hist_params *params, struct osnoise_tool *t
trace_seq_printf(trace->seq, "over: ");
for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->cpus && !params->monitored_cpus[cpu])
+ if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
continue;
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
@@ -413,6 +475,10 @@ timerlat_print_stats(struct timerlat_hist_params *params, struct osnoise_tool *t
if (!params->no_thread)
trace_seq_printf(trace->seq, "%9d ",
data->hist[cpu].thread[data->entries]);
+
+ if (params->user_hist)
+ trace_seq_printf(trace->seq, "%9d ",
+ data->hist[cpu].user[data->entries]);
}
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
@@ -431,9 +497,9 @@ static void timerlat_hist_usage(char *usage)
char *msg[] = {
"",
" usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-a us] [-p us] [-i us] [-T us] [-s us] \\",
- " [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] \\",
+ " [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] [-H cpu-list]\\",
" [-P priority] [-E N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
- " [--no-index] [--with-zeros] [--dma-latency us]",
+ " [--no-index] [--with-zeros] [--dma-latency us] [-C[=cgroup_name]] [--no-aa] [--dump-task] [-u]",
"",
" -h/--help: print this menu",
" -a/--auto: set automatic trace mode, stopping the session if argument in us latency is hit",
@@ -442,13 +508,17 @@ static void timerlat_hist_usage(char *usage)
" -T/--thread us: stop trace if the thread latency is higher than the argument in us",
" -s/--stack us: save the stack trace at the IRQ if a thread latency is higher than the argument in us",
" -c/--cpus cpus: run the tracer only on the given cpus",
+ " -H/--house-keeping cpus: run rtla control threads only on the given cpus",
+ " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
" -d/--duration time[m|h|d]: duration of the session in seconds",
+ " --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)",
" -D/--debug: print debug info",
" -t/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]",
" -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed",
" --filter <filter>: enable a trace event filter to the previous -e event",
" --trigger <trigger>: enable a trace event trigger to the previous -e event",
" -n/--nano: display data in nanoseconds",
+ " --no-aa: disable auto-analysis, reducing rtla timerlat cpu usage",
" -b/--bucket-size N: set the histogram bucket size (default 1)",
" -E/--entries N: set the number of entries of the histogram (default 256)",
" --no-irq: ignore IRQ latencies",
@@ -464,6 +534,7 @@ static void timerlat_hist_usage(char *usage)
" f:prio - use SCHED_FIFO with prio",
" d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period",
" in nanoseconds",
+ " -u/--user-threads: use rtla user-space threads instead of in-kernel timerlat threads",
NULL,
};
@@ -506,10 +577,12 @@ static struct timerlat_hist_params
static struct option long_options[] = {
{"auto", required_argument, 0, 'a'},
{"cpus", required_argument, 0, 'c'},
+ {"cgroup", optional_argument, 0, 'C'},
{"bucket-size", required_argument, 0, 'b'},
{"debug", no_argument, 0, 'D'},
{"entries", required_argument, 0, 'E'},
{"duration", required_argument, 0, 'd'},
+ {"house-keeping", required_argument, 0, 'H'},
{"help", no_argument, 0, 'h'},
{"irq", required_argument, 0, 'i'},
{"nano", no_argument, 0, 'n'},
@@ -518,6 +591,7 @@ static struct timerlat_hist_params
{"stack", required_argument, 0, 's'},
{"thread", required_argument, 0, 'T'},
{"trace", optional_argument, 0, 't'},
+ {"user-threads", no_argument, 0, 'u'},
{"event", required_argument, 0, 'e'},
{"no-irq", no_argument, 0, '0'},
{"no-thread", no_argument, 0, '1'},
@@ -528,13 +602,15 @@ static struct timerlat_hist_params
{"trigger", required_argument, 0, '6'},
{"filter", required_argument, 0, '7'},
{"dma-latency", required_argument, 0, '8'},
+ {"no-aa", no_argument, 0, '9'},
+ {"dump-task", no_argument, 0, '\1'},
{0, 0, 0, 0}
};
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "a:c:b:d:e:E:Dhi:np:P:s:t::T:0123456:7:8:",
+ c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:i:np:P:s:t::T:u0123456:7:8:9\1",
long_options, &option_index);
/* detect the end of the options. */
@@ -547,6 +623,7 @@ static struct timerlat_hist_params
/* set thread stop to auto_thresh */
params->stop_total_us = auto_thresh;
+ params->stop_us = auto_thresh;
/* get stack trace */
params->print_stack = auto_thresh;
@@ -556,11 +633,21 @@ static struct timerlat_hist_params
break;
case 'c':
- retval = parse_cpu_list(optarg, &params->monitored_cpus);
+ retval = parse_cpu_set(optarg, &params->monitored_cpus);
if (retval)
timerlat_hist_usage("\nInvalid -c cpu list\n");
params->cpus = optarg;
break;
+ case 'C':
+ params->cgroup = 1;
+ if (!optarg) {
+ /* will inherit this cgroup */
+ params->cgroup_name = NULL;
+ } else if (*optarg == '=') {
+ /* skip the = */
+ params->cgroup_name = ++optarg;
+ }
+ break;
case 'b':
params->bucket_size = get_llong_from_str(optarg);
if ((params->bucket_size == 0) || (params->bucket_size >= 1000000))
@@ -595,6 +682,14 @@ static struct timerlat_hist_params
case '?':
timerlat_hist_usage(NULL);
break;
+ case 'H':
+ params->hk_cpus = 1;
+ retval = parse_cpu_set(optarg, &params->hk_cpu_set);
+ if (retval) {
+ err_msg("Error parsing house keeping CPUs\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
case 'i':
params->stop_us = get_llong_from_str(optarg);
break;
@@ -625,6 +720,9 @@ static struct timerlat_hist_params
else
params->trace_output = "timerlat_trace.txt";
break;
+ case 'u':
+ params->user_hist = 1;
+ break;
case '0': /* no irq */
params->no_irq = 1;
break;
@@ -672,6 +770,12 @@ static struct timerlat_hist_params
exit(EXIT_FAILURE);
}
break;
+ case '9':
+ params->no_aa = 1;
+ break;
+ case '\1':
+ params->dump_tasks = 1;
+ break;
default:
timerlat_hist_usage("Invalid option");
}
@@ -688,6 +792,12 @@ static struct timerlat_hist_params
if (params->no_index && !params->with_zeros)
timerlat_hist_usage("no-index set with with-zeros is not set - it does not make sense");
+ /*
+ * Auto analysis only happens if stop tracing, thus:
+ */
+ if (!params->stop_us && !params->stop_total_us)
+ params->no_aa = 1;
+
return params;
}
@@ -697,7 +807,7 @@ static struct timerlat_hist_params
static int
timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_params *params)
{
- int retval;
+ int retval, i;
if (!params->sleep_time)
params->sleep_time = 1;
@@ -708,6 +818,9 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
err_msg("Failed to apply CPUs config\n");
goto out_err;
}
+ } else {
+ for (i = 0; i < sysconf(_SC_NPROCESSORS_CONF); i++)
+ CPU_SET(i, &params->monitored_cpus);
}
if (params->stop_us) {
@@ -742,6 +855,32 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
}
}
+ if (params->hk_cpus) {
+ retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set),
+ &params->hk_cpu_set);
+ if (retval == -1) {
+ err_msg("Failed to set rtla to the house keeping CPUs\n");
+ goto out_err;
+ }
+ } else if (params->cpus) {
+ /*
+ * Even if the user do not set a house-keeping CPU, try to
+ * move rtla to a CPU set different to the one where the user
+ * set the workload to run.
+ *
+ * No need to check results as this is an automatic attempt.
+ */
+ auto_house_keeping(&params->monitored_cpus);
+ }
+
+ if (params->user_hist) {
+ retval = osnoise_set_workload(tool->context, 0);
+ if (retval) {
+ err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+ goto out_err;
+ }
+ }
+
return 0;
out_err:
@@ -802,10 +941,13 @@ int timerlat_hist_main(int argc, char *argv[])
{
struct timerlat_hist_params *params;
struct osnoise_tool *record = NULL;
+ struct timerlat_u_params params_u;
struct osnoise_tool *tool = NULL;
+ struct osnoise_tool *aa = NULL;
struct trace_instance *trace;
int dma_latency_fd = -1;
int return_value = 1;
+ pthread_t timerlat_u;
int retval;
params = timerlat_hist_parse_args(argc, argv);
@@ -840,6 +982,14 @@ int timerlat_hist_main(int argc, char *argv[])
}
}
+ if (params->cgroup && !params->user_hist) {
+ retval = set_comm_cgroup("timerlat/", params->cgroup_name);
+ if (!retval) {
+ err_msg("Failed to move threads to cgroup\n");
+ goto out_free;
+ }
+ }
+
if (params->dma_latency >= 0) {
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
@@ -848,8 +998,6 @@ int timerlat_hist_main(int argc, char *argv[])
}
}
- trace_instance_start(trace);
-
if (params->trace_output) {
record = osnoise_init_trace_tool("timerlat");
if (!record) {
@@ -862,13 +1010,61 @@ int timerlat_hist_main(int argc, char *argv[])
if (retval)
goto out_hist;
}
+ }
- trace_instance_start(&record->trace);
+ if (!params->no_aa) {
+ aa = osnoise_init_tool("timerlat_aa");
+ if (!aa)
+ goto out_hist;
+
+ retval = timerlat_aa_init(aa, params->dump_tasks);
+ if (retval) {
+ err_msg("Failed to enable the auto analysis instance\n");
+ goto out_hist;
+ }
+
+ retval = enable_timerlat(&aa->trace);
+ if (retval) {
+ err_msg("Failed to enable timerlat tracer\n");
+ goto out_hist;
+ }
}
+ /*
+ * Start the tracers here, after having set all instances.
+ *
+ * Let the trace instance start first for the case of hitting a stop
+ * tracing while enabling other instances. The trace instance is the
+ * one with most valuable information.
+ */
+ if (params->trace_output)
+ trace_instance_start(&record->trace);
+ if (!params->no_aa)
+ trace_instance_start(&aa->trace);
+ trace_instance_start(trace);
+
tool->start_time = time(NULL);
timerlat_hist_set_signals(params);
+ if (params->user_hist) {
+ /* rtla asked to stop */
+ params_u.should_run = 1;
+ /* all threads left */
+ params_u.stopped_running = 0;
+
+ params_u.set = &params->monitored_cpus;
+ if (params->set_sched)
+ params_u.sched_param = &params->sched_param;
+ else
+ params_u.sched_param = NULL;
+
+ params_u.cgroup_name = params->cgroup_name;
+
+ retval = pthread_create(&timerlat_u, NULL, timerlat_u_dispatcher, &params_u);
+ if (retval)
+ err_msg("Error creating timerlat user-space threads\n");
+ }
+
while (!stop_tracing) {
sleep(params->sleep_time);
@@ -885,6 +1081,18 @@ int timerlat_hist_main(int argc, char *argv[])
if (trace_is_off(&tool->trace, &record->trace))
break;
+
+ /* is there still any user-threads ? */
+ if (params->user_hist) {
+ if (params_u.stopped_running) {
+ debug_msg("timerlat user-space threads stopped!\n");
+ break;
+ }
+ }
+ }
+ if (params->user_hist && !params_u.stopped_running) {
+ params_u.should_run = 0;
+ sleep(1);
}
timerlat_print_stats(params, tool);
@@ -893,6 +1101,10 @@ int timerlat_hist_main(int argc, char *argv[])
if (trace_is_off(&tool->trace, &record->trace)) {
printf("rtla timerlat hit stop tracing\n");
+
+ if (!params->no_aa)
+ timerlat_auto_analysis(params->stop_us, params->stop_total_us);
+
if (params->trace_output) {
printf(" Saving trace to %s\n", params->trace_output);
save_trace_to_file(record->trace.inst, params->trace_output);
@@ -900,12 +1112,14 @@ int timerlat_hist_main(int argc, char *argv[])
}
out_hist:
+ timerlat_aa_destroy();
if (dma_latency_fd >= 0)
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
out_free:
timerlat_free_histogram(tool->data);
+ osnoise_destroy_tool(aa);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
free(params);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 92c658c64f28..1640f121baca 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -3,6 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
+#define _GNU_SOURCE
#include <getopt.h>
#include <stdlib.h>
#include <string.h>
@@ -11,16 +12,20 @@
#include <stdio.h>
#include <time.h>
#include <errno.h>
+#include <sched.h>
+#include <pthread.h>
#include "utils.h"
#include "osnoise.h"
#include "timerlat.h"
#include "timerlat_aa.h"
+#include "timerlat_u.h"
struct timerlat_top_params {
char *cpus;
- char *monitored_cpus;
+ cpu_set_t monitored_cpus;
char *trace_output;
+ char *cgroup_name;
unsigned long long runtime;
long long stop_us;
long long stop_total_us;
@@ -35,6 +40,10 @@ struct timerlat_top_params {
int no_aa;
int aa_only;
int dump_tasks;
+ int cgroup;
+ int hk_cpus;
+ int user_top;
+ cpu_set_t hk_cpu_set;
struct sched_attr sched_param;
struct trace_events *events;
};
@@ -42,6 +51,7 @@ struct timerlat_top_params {
struct timerlat_top_cpu {
int irq_count;
int thread_count;
+ int user_count;
unsigned long long cur_irq;
unsigned long long min_irq;
@@ -52,6 +62,11 @@ struct timerlat_top_cpu {
unsigned long long min_thread;
unsigned long long sum_thread;
unsigned long long max_thread;
+
+ unsigned long long cur_user;
+ unsigned long long min_user;
+ unsigned long long sum_user;
+ unsigned long long max_user;
};
struct timerlat_top_data {
@@ -92,6 +107,7 @@ static struct timerlat_top_data *timerlat_alloc_top(int nr_cpus)
for (cpu = 0; cpu < nr_cpus; cpu++) {
data->cpu_data[cpu].min_irq = ~0;
data->cpu_data[cpu].min_thread = ~0;
+ data->cpu_data[cpu].min_user = ~0;
}
return data;
@@ -118,12 +134,18 @@ timerlat_top_update(struct osnoise_tool *tool, int cpu,
update_min(&cpu_data->min_irq, &latency);
update_sum(&cpu_data->sum_irq, &latency);
update_max(&cpu_data->max_irq, &latency);
- } else {
+ } else if (thread == 1) {
cpu_data->thread_count++;
cpu_data->cur_thread = latency;
update_min(&cpu_data->min_thread, &latency);
update_sum(&cpu_data->sum_thread, &latency);
update_max(&cpu_data->max_thread, &latency);
+ } else {
+ cpu_data->user_count++;
+ cpu_data->cur_user = latency;
+ update_min(&cpu_data->min_user, &latency);
+ update_sum(&cpu_data->sum_user, &latency);
+ update_max(&cpu_data->max_user, &latency);
}
}
@@ -150,9 +172,6 @@ timerlat_top_handler(struct trace_seq *s, struct tep_record *record,
timerlat_top_update(top, cpu, thread, latency);
}
- if (!params->no_aa)
- timerlat_aa_handler(s, record, event, context);
-
return 0;
}
@@ -169,15 +188,25 @@ static void timerlat_top_header(struct osnoise_tool *top)
trace_seq_printf(s, "\033[2;37;40m");
trace_seq_printf(s, " Timer Latency ");
+ if (params->user_top)
+ trace_seq_printf(s, " ");
trace_seq_printf(s, "\033[0;0;0m");
trace_seq_printf(s, "\n");
- trace_seq_printf(s, "%-6s | IRQ Timer Latency (%s) | Thread Timer Latency (%s)\n", duration,
+ trace_seq_printf(s, "%-6s | IRQ Timer Latency (%s) | Thread Timer Latency (%s)", duration,
params->output_divisor == 1 ? "ns" : "us",
params->output_divisor == 1 ? "ns" : "us");
+ if (params->user_top) {
+ trace_seq_printf(s, " | Ret user Timer Latency (%s)",
+ params->output_divisor == 1 ? "ns" : "us");
+ }
+
+ trace_seq_printf(s, "\n");
trace_seq_printf(s, "\033[2;30;47m");
trace_seq_printf(s, "CPU COUNT | cur min avg max | cur min avg max");
+ if (params->user_top)
+ trace_seq_printf(s, " | cur min avg max");
trace_seq_printf(s, "\033[0;0;0m");
trace_seq_printf(s, "\n");
}
@@ -230,7 +259,27 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
trace_seq_printf(s, "%9llu ", cpu_data->min_thread / divisor);
trace_seq_printf(s, "%9llu ",
(cpu_data->sum_thread / cpu_data->thread_count) / divisor);
- trace_seq_printf(s, "%9llu\n", cpu_data->max_thread / divisor);
+ trace_seq_printf(s, "%9llu", cpu_data->max_thread / divisor);
+ }
+
+ if (!params->user_top) {
+ trace_seq_printf(s, "\n");
+ return;
+ }
+
+ trace_seq_printf(s, " |");
+
+ if (!cpu_data->user_count) {
+ trace_seq_printf(s, " - ");
+ trace_seq_printf(s, " - ");
+ trace_seq_printf(s, " - ");
+ trace_seq_printf(s, " -\n");
+ } else {
+ trace_seq_printf(s, "%9llu ", cpu_data->cur_user / divisor);
+ trace_seq_printf(s, "%9llu ", cpu_data->min_user / divisor);
+ trace_seq_printf(s, "%9llu ",
+ (cpu_data->sum_user / cpu_data->user_count) / divisor);
+ trace_seq_printf(s, "%9llu\n", cpu_data->max_user / divisor);
}
}
@@ -265,7 +314,7 @@ timerlat_print_stats(struct timerlat_top_params *params, struct osnoise_tool *to
timerlat_top_header(top);
for (i = 0; i < nr_cpus; i++) {
- if (params->cpus && !params->monitored_cpus[i])
+ if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus))
continue;
timerlat_top_print(top, i);
}
@@ -284,8 +333,8 @@ static void timerlat_top_usage(char *usage)
static const char *const msg[] = {
"",
" usage: rtla timerlat [top] [-h] [-q] [-a us] [-d s] [-D] [-n] [-p us] [-i us] [-T us] [-s us] \\",
- " [[-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] \\",
- " [-P priority] [--dma-latency us] [--aa-only us]",
+ " [[-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] [-H cpu-list]\\",
+ " [-P priority] [--dma-latency us] [--aa-only us] [-C[=cgroup_name]] [-u]",
"",
" -h/--help: print this menu",
" -a/--auto: set automatic trace mode, stopping the session if argument in us latency is hit",
@@ -295,6 +344,8 @@ static void timerlat_top_usage(char *usage)
" -T/--thread us: stop trace if the thread latency is higher than the argument in us",
" -s/--stack us: save the stack trace at the IRQ if a thread latency is higher than the argument in us",
" -c/--cpus cpus: run the tracer only on the given cpus",
+ " -H/--house-keeping cpus: run rtla control threads only on the given cpus",
+ " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
" -d/--duration time[m|h|d]: duration of the session in seconds",
" -D/--debug: print debug info",
" --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)",
@@ -312,6 +363,7 @@ static void timerlat_top_usage(char *usage)
" f:prio - use SCHED_FIFO with prio",
" d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period",
" in nanoseconds",
+ " -u/--user-threads: use rtla user-space threads instead of in-kernel timerlat threads",
NULL,
};
@@ -352,10 +404,12 @@ static struct timerlat_top_params
static struct option long_options[] = {
{"auto", required_argument, 0, 'a'},
{"cpus", required_argument, 0, 'c'},
+ {"cgroup", optional_argument, 0, 'C'},
{"debug", no_argument, 0, 'D'},
{"duration", required_argument, 0, 'd'},
{"event", required_argument, 0, 'e'},
{"help", no_argument, 0, 'h'},
+ {"house-keeping", required_argument, 0, 'H'},
{"irq", required_argument, 0, 'i'},
{"nano", no_argument, 0, 'n'},
{"period", required_argument, 0, 'p'},
@@ -364,6 +418,7 @@ static struct timerlat_top_params
{"stack", required_argument, 0, 's'},
{"thread", required_argument, 0, 'T'},
{"trace", optional_argument, 0, 't'},
+ {"user-threads", no_argument, 0, 'u'},
{"trigger", required_argument, 0, '0'},
{"filter", required_argument, 0, '1'},
{"dma-latency", required_argument, 0, '2'},
@@ -376,7 +431,7 @@ static struct timerlat_top_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "a:c:d:De:hi:np:P:qs:t::T:0:1:2:345:",
+ c = getopt_long(argc, argv, "a:c:C::d:De:hH:i:np:P:qs:t::T:u0:1:2:345:",
long_options, &option_index);
/* detect the end of the options. */
@@ -412,11 +467,21 @@ static struct timerlat_top_params
params->aa_only = 1;
break;
case 'c':
- retval = parse_cpu_list(optarg, &params->monitored_cpus);
+ retval = parse_cpu_set(optarg, &params->monitored_cpus);
if (retval)
timerlat_top_usage("\nInvalid -c cpu list\n");
params->cpus = optarg;
break;
+ case 'C':
+ params->cgroup = 1;
+ if (!optarg) {
+ /* will inherit this cgroup */
+ params->cgroup_name = NULL;
+ } else if (*optarg == '=') {
+ /* skip the = */
+ params->cgroup_name = ++optarg;
+ }
+ break;
case 'D':
config_debug = 1;
break;
@@ -440,6 +505,14 @@ static struct timerlat_top_params
case '?':
timerlat_top_usage(NULL);
break;
+ case 'H':
+ params->hk_cpus = 1;
+ retval = parse_cpu_set(optarg, &params->hk_cpu_set);
+ if (retval) {
+ err_msg("Error parsing house keeping CPUs\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
case 'i':
params->stop_us = get_llong_from_str(optarg);
break;
@@ -474,6 +547,9 @@ static struct timerlat_top_params
params->trace_output = "timerlat_trace.txt";
break;
+ case 'u':
+ params->user_top = true;
+ break;
case '0': /* trigger */
if (params->events) {
retval = trace_event_add_trigger(params->events, optarg);
@@ -538,6 +614,7 @@ static int
timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *params)
{
int retval;
+ int i;
if (!params->sleep_time)
params->sleep_time = 1;
@@ -548,6 +625,9 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
err_msg("Failed to apply CPUs config\n");
goto out_err;
}
+ } else {
+ for (i = 0; i < sysconf(_SC_NPROCESSORS_CONF); i++)
+ CPU_SET(i, &params->monitored_cpus);
}
if (params->stop_us) {
@@ -584,6 +664,32 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
}
}
+ if (params->hk_cpus) {
+ retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set),
+ &params->hk_cpu_set);
+ if (retval == -1) {
+ err_msg("Failed to set rtla to the house keeping CPUs\n");
+ goto out_err;
+ }
+ } else if (params->cpus) {
+ /*
+ * Even if the user do not set a house-keeping CPU, try to
+ * move rtla to a CPU set different to the one where the user
+ * set the workload to run.
+ *
+ * No need to check results as this is an automatic attempt.
+ */
+ auto_house_keeping(&params->monitored_cpus);
+ }
+
+ if (params->user_top) {
+ retval = osnoise_set_workload(top->context, 0);
+ if (retval) {
+ err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+ goto out_err;
+ }
+ }
+
return 0;
out_err:
@@ -598,7 +704,6 @@ static struct osnoise_tool
{
struct osnoise_tool *top;
int nr_cpus;
- int retval;
nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -615,16 +720,6 @@ static struct osnoise_tool
tep_register_event_handler(top->trace.tep, -1, "ftrace", "timerlat",
timerlat_top_handler, top);
- /*
- * If no auto analysis, we are ready.
- */
- if (params->no_aa)
- return top;
-
- retval = timerlat_aa_init(top, nr_cpus, params->dump_tasks);
- if (retval)
- goto out_err;
-
return top;
out_err:
@@ -655,9 +750,12 @@ int timerlat_top_main(int argc, char *argv[])
{
struct timerlat_top_params *params;
struct osnoise_tool *record = NULL;
+ struct timerlat_u_params params_u;
struct osnoise_tool *top = NULL;
+ struct osnoise_tool *aa = NULL;
struct trace_instance *trace;
int dma_latency_fd = -1;
+ pthread_t timerlat_u;
int return_value = 1;
char *max_lat;
int retval;
@@ -694,6 +792,14 @@ int timerlat_top_main(int argc, char *argv[])
}
}
+ if (params->cgroup && !params->user_top) {
+ retval = set_comm_cgroup("timerlat/", params->cgroup_name);
+ if (!retval) {
+ err_msg("Failed to move threads to cgroup\n");
+ goto out_free;
+ }
+ }
+
if (params->dma_latency >= 0) {
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
@@ -702,8 +808,6 @@ int timerlat_top_main(int argc, char *argv[])
}
}
- trace_instance_start(trace);
-
if (params->trace_output) {
record = osnoise_init_trace_tool("timerlat");
if (!record) {
@@ -716,13 +820,70 @@ int timerlat_top_main(int argc, char *argv[])
if (retval)
goto out_top;
}
+ }
- trace_instance_start(&record->trace);
+ if (!params->no_aa) {
+ if (params->aa_only) {
+ /* as top is not used for display, use it for aa */
+ aa = top;
+ } else {
+ /* otherwise, a new instance is needed */
+ aa = osnoise_init_tool("timerlat_aa");
+ if (!aa)
+ goto out_top;
+ }
+
+ retval = timerlat_aa_init(aa, params->dump_tasks);
+ if (retval) {
+ err_msg("Failed to enable the auto analysis instance\n");
+ goto out_top;
+ }
+
+ /* if it is re-using the main instance, there is no need to start it */
+ if (aa != top) {
+ retval = enable_timerlat(&aa->trace);
+ if (retval) {
+ err_msg("Failed to enable timerlat tracer\n");
+ goto out_top;
+ }
+ }
}
+ /*
+ * Start the tracers here, after having set all instances.
+ *
+ * Let the trace instance start first for the case of hitting a stop
+ * tracing while enabling other instances. The trace instance is the
+ * one with most valuable information.
+ */
+ if (params->trace_output)
+ trace_instance_start(&record->trace);
+ if (!params->no_aa && aa != top)
+ trace_instance_start(&aa->trace);
+ trace_instance_start(trace);
+
top->start_time = time(NULL);
timerlat_top_set_signals(params);
+ if (params->user_top) {
+ /* rtla asked to stop */
+ params_u.should_run = 1;
+ /* all threads left */
+ params_u.stopped_running = 0;
+
+ params_u.set = &params->monitored_cpus;
+ if (params->set_sched)
+ params_u.sched_param = &params->sched_param;
+ else
+ params_u.sched_param = NULL;
+
+ params_u.cgroup_name = params->cgroup_name;
+
+ retval = pthread_create(&timerlat_u, NULL, timerlat_u_dispatcher, &params_u);
+ if (retval)
+ err_msg("Error creating timerlat user-space threads\n");
+ }
+
while (!stop_tracing) {
sleep(params->sleep_time);
@@ -746,6 +907,18 @@ int timerlat_top_main(int argc, char *argv[])
if (trace_is_off(&top->trace, &record->trace))
break;
+ /* is there still any user-threads ? */
+ if (params->user_top) {
+ if (params_u.stopped_running) {
+ debug_msg("timerlat user space threads stopped!\n");
+ break;
+ }
+ }
+ }
+
+ if (params->user_top && !params_u.stopped_running) {
+ params_u.should_run = 0;
+ sleep(1);
}
timerlat_print_stats(params, top);
@@ -775,13 +948,15 @@ int timerlat_top_main(int argc, char *argv[])
}
out_top:
+ timerlat_aa_destroy();
if (dma_latency_fd >= 0)
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
out_free:
timerlat_free_top(top->data);
- timerlat_aa_destroy();
+ if (aa && aa != top)
+ osnoise_destroy_tool(aa);
osnoise_destroy_tool(record);
osnoise_destroy_tool(top);
free(params);
diff --git a/tools/tracing/rtla/src/timerlat_u.c b/tools/tracing/rtla/src/timerlat_u.c
new file mode 100644
index 000000000000..05e310696dd5
--- /dev/null
+++ b/tools/tracing/rtla/src/timerlat_u.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <tracefs.h>
+#include <pthread.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
+
+#include "utils.h"
+#include "timerlat_u.h"
+
+/*
+ * This is the user-space main for the tool timerlatu/ threads.
+ *
+ * It is as simple as this:
+ * - set affinity
+ * - set priority
+ * - open tracer fd
+ * - spin
+ * - close
+ */
+static int timerlat_u_main(int cpu, struct timerlat_u_params *params)
+{
+ struct sched_param sp = { .sched_priority = 95 };
+ char buffer[1024];
+ int timerlat_fd;
+ cpu_set_t set;
+ int retval;
+
+ /*
+ * This all is only setting up the tool.
+ */
+ CPU_ZERO(&set);
+ CPU_SET(cpu, &set);
+
+ retval = sched_setaffinity(gettid(), sizeof(set), &set);
+ if (retval == -1) {
+ err_msg("Error setting user thread affinity\n");
+ exit(1);
+ }
+
+ if (!params->sched_param) {
+ retval = sched_setscheduler(0, SCHED_FIFO, &sp);
+ if (retval < 0) {
+ err_msg("Error setting timerlat u default priority: %s\n", strerror(errno));
+ exit(1);
+ }
+ } else {
+ retval = __set_sched_attr(getpid(), params->sched_param);
+ if (retval) {
+ /* __set_sched_attr prints an error message, so */
+ exit(0);
+ }
+ }
+
+ if (params->cgroup_name) {
+ retval = set_pid_cgroup(gettid(), params->cgroup_name);
+ if (!retval) {
+ err_msg("Error setting timerlat u cgroup pid\n");
+ pthread_exit(&retval);
+ }
+ }
+
+ /*
+ * This is the tool's loop. If you want to use as base for your own tool...
+ * go ahead.
+ */
+ snprintf(buffer, sizeof(buffer), "osnoise/per_cpu/cpu%d/timerlat_fd", cpu);
+
+ timerlat_fd = tracefs_instance_file_open(NULL, buffer, O_RDONLY);
+ if (timerlat_fd < 0) {
+ err_msg("Error opening %s:%s\n", buffer, strerror(errno));
+ exit(1);
+ }
+
+ debug_msg("User-space timerlat pid %d on cpu %d\n", gettid(), cpu);
+
+ /* add should continue with a signal handler */
+ while (true) {
+ retval = read(timerlat_fd, buffer, 1024);
+ if (retval < 0)
+ break;
+ }
+
+ close(timerlat_fd);
+
+ debug_msg("Leaving timerlat pid %d on cpu %d\n", gettid(), cpu);
+ exit(0);
+}
+
+/*
+ * timerlat_u_send_kill - send a kill signal for all processes
+ *
+ * Return the number of processes that received the kill.
+ */
+static int timerlat_u_send_kill(pid_t *procs, int nr_cpus)
+{
+ int killed = 0;
+ int i, retval;
+
+ for (i = 0; i < nr_cpus; i++) {
+ if (!procs[i])
+ continue;
+ retval = kill(procs[i], SIGKILL);
+ if (!retval)
+ killed++;
+ else
+ err_msg("Error killing child process %d\n", procs[i]);
+ }
+
+ return killed;
+}
+
+/**
+ * timerlat_u_dispatcher - dispatch one timerlatu/ process per monitored CPU
+ *
+ * This is a thread main that will fork one new process for each monitored
+ * CPU. It will wait for:
+ *
+ * - rtla to tell to kill the child processes
+ * - some child process to die, and the cleanup all the processes
+ *
+ * whichever comes first.
+ *
+ */
+void *timerlat_u_dispatcher(void *data)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ struct timerlat_u_params *params = data;
+ char proc_name[128];
+ int procs_count = 0;
+ int retval = 1;
+ pid_t *procs;
+ int wstatus;
+ pid_t pid;
+ int i;
+
+ debug_msg("Dispatching timerlat u procs\n");
+
+ procs = calloc(nr_cpus, sizeof(pid_t));
+ if (!procs)
+ pthread_exit(&retval);
+
+ for (i = 0; i < nr_cpus; i++) {
+ if (params->set && !CPU_ISSET(i, params->set))
+ continue;
+
+ pid = fork();
+
+ /* child */
+ if (!pid) {
+
+ /*
+ * rename the process
+ */
+ snprintf(proc_name, sizeof(proc_name), "timerlatu/%d", i);
+ pthread_setname_np(pthread_self(), proc_name);
+ prctl(PR_SET_NAME, (unsigned long)proc_name, 0, 0, 0);
+
+ timerlat_u_main(i, params);
+ /* timerlat_u_main should exit()! Anyways... */
+ pthread_exit(&retval);
+ }
+
+ /* parent */
+ if (pid == -1) {
+ timerlat_u_send_kill(procs, nr_cpus);
+ debug_msg("Failed to create child processes");
+ pthread_exit(&retval);
+ }
+
+ procs_count++;
+ procs[i] = pid;
+ }
+
+ while (params->should_run) {
+ /* check if processes died */
+ pid = waitpid(-1, &wstatus, WNOHANG);
+ if (pid != 0) {
+ for (i = 0; i < nr_cpus; i++) {
+ if (procs[i] == pid) {
+ procs[i] = 0;
+ procs_count--;
+ }
+ }
+ break;
+ }
+
+ sleep(1);
+ }
+
+ timerlat_u_send_kill(procs, nr_cpus);
+
+ while (procs_count) {
+ pid = waitpid(-1, &wstatus, 0);
+ if (pid == -1) {
+ err_msg("Failed to monitor child processes");
+ pthread_exit(&retval);
+ }
+ for (i = 0; i < nr_cpus; i++) {
+ if (procs[i] == pid) {
+ procs[i] = 0;
+ procs_count--;
+ }
+ }
+ }
+
+ params->stopped_running = 1;
+
+ free(procs);
+ retval = 0;
+ pthread_exit(&retval);
+
+}
diff --git a/tools/tracing/rtla/src/timerlat_u.h b/tools/tracing/rtla/src/timerlat_u.h
new file mode 100644
index 000000000000..661511908957
--- /dev/null
+++ b/tools/tracing/rtla/src/timerlat_u.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
+ */
+
+struct timerlat_u_params {
+ /* timerlat -> timerlat_u: user-space threads can keep running */
+ int should_run;
+ /* timerlat_u -> timerlat: all timerlat_u threads left, no reason to continue */
+ int stopped_running;
+
+ /* threads config */
+ cpu_set_t *set;
+ char *cgroup_name;
+ struct sched_attr *sched_param;
+};
+
+void *timerlat_u_dispatcher(void *data);
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index 663a047f794d..623a38908ed5 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -3,6 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
+#define _GNU_SOURCE
#include <dirent.h>
#include <stdarg.h>
#include <stdlib.h>
@@ -88,27 +89,24 @@ void get_duration(time_t start_time, char *output, int output_size)
}
/*
- * parse_cpu_list - parse a cpu_list filling a char vector with cpus set
+ * parse_cpu_set - parse a cpu_list filling cpu_set_t argument
*
- * Receives a cpu list, like 1-3,5 (cpus 1, 2, 3, 5), and then set the char
- * in the monitored_cpus.
+ * Receives a cpu list, like 1-3,5 (cpus 1, 2, 3, 5), and then set
+ * filling cpu_set_t argument.
*
- * XXX: convert to a bitmask.
+ * Returns 1 on success, 0 otherwise.
*/
-int parse_cpu_list(char *cpu_list, char **monitored_cpus)
+int parse_cpu_set(char *cpu_list, cpu_set_t *set)
{
- char *mon_cpus;
const char *p;
int end_cpu;
int nr_cpus;
int cpu;
int i;
- nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ CPU_ZERO(set);
- mon_cpus = calloc(nr_cpus, sizeof(char));
- if (!mon_cpus)
- goto err;
+ nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
for (p = cpu_list; *p; ) {
cpu = atoi(p);
@@ -128,12 +126,12 @@ int parse_cpu_list(char *cpu_list, char **monitored_cpus)
end_cpu = cpu;
if (cpu == end_cpu) {
- debug_msg("cpu_list: adding cpu %d\n", cpu);
- mon_cpus[cpu] = 1;
+ debug_msg("cpu_set: adding cpu %d\n", cpu);
+ CPU_SET(cpu, set);
} else {
for (i = cpu; i <= end_cpu; i++) {
- debug_msg("cpu_list: adding cpu %d\n", i);
- mon_cpus[i] = 1;
+ debug_msg("cpu_set: adding cpu %d\n", i);
+ CPU_SET(i, set);
}
}
@@ -141,12 +139,9 @@ int parse_cpu_list(char *cpu_list, char **monitored_cpus)
p++;
}
- *monitored_cpus = mon_cpus;
-
return 0;
-
err:
- debug_msg("Error parsing the cpu list %s", cpu_list);
+ debug_msg("Error parsing the cpu set %s\n", cpu_list);
return 1;
}
@@ -529,3 +524,296 @@ int set_cpu_dma_latency(int32_t latency)
return fd;
}
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+/*
+ * find_mount - find a the mount point of a given fs
+ *
+ * Returns 0 if mount is not found, otherwise return 1 and fill mp
+ * with the mount point.
+ */
+static const int find_mount(const char *fs, char *mp, int sizeof_mp)
+{
+ char mount_point[MAX_PATH];
+ char type[100];
+ int found;
+ FILE *fp;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return 0;
+
+ while (fscanf(fp, "%*s %" STR(MAX_PATH) "s %99s %*s %*d %*d\n", mount_point, type) == 2) {
+ if (strcmp(type, fs) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ fclose(fp);
+
+ if (!found)
+ return 0;
+
+ memset(mp, 0, sizeof_mp);
+ strncpy(mp, mount_point, sizeof_mp - 1);
+
+ debug_msg("Fs %s found at %s\n", fs, mp);
+ return 1;
+}
+
+/*
+ * get_self_cgroup - get the current thread cgroup path
+ *
+ * Parse /proc/$$/cgroup file to get the thread's cgroup. As an example of line to parse:
+ *
+ * 0::/user.slice/user-0.slice/session-3.scope'\n'
+ *
+ * This function is interested in the content after the second : and before the '\n'.
+ *
+ * Returns 1 if a string was found, 0 otherwise.
+ */
+static int get_self_cgroup(char *self_cg, int sizeof_self_cg)
+{
+ char path[MAX_PATH], *start;
+ int fd, retval;
+
+ snprintf(path, MAX_PATH, "/proc/%d/cgroup", getpid());
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+ retval = read(fd, path, MAX_PATH);
+
+ close(fd);
+
+ if (retval <= 0)
+ return 0;
+
+ start = path;
+
+ start = strstr(start, ":");
+ if (!start)
+ return 0;
+
+ /* skip ":" */
+ start++;
+
+ start = strstr(start, ":");
+ if (!start)
+ return 0;
+
+ /* skip ":" */
+ start++;
+
+ if (strlen(start) >= sizeof_self_cg)
+ return 0;
+
+ snprintf(self_cg, sizeof_self_cg, "%s", start);
+
+ /* Swap '\n' with '\0' */
+ start = strstr(self_cg, "\n");
+
+ /* there must be '\n' */
+ if (!start)
+ return 0;
+
+ /* ok, it found a string after the second : and before the \n */
+ *start = '\0';
+
+ return 1;
+}
+
+/*
+ * set_comm_cgroup - Set cgroup to pid_t pid
+ *
+ * If cgroup argument is not NULL, the threads will move to the given cgroup.
+ * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used.
+ *
+ * Supports cgroup v2.
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+int set_pid_cgroup(pid_t pid, const char *cgroup)
+{
+ char cgroup_path[MAX_PATH - strlen("/cgroup.procs")];
+ char cgroup_procs[MAX_PATH];
+ char pid_str[24];
+ int retval;
+ int cg_fd;
+
+ retval = find_mount("cgroup2", cgroup_path, sizeof(cgroup_path));
+ if (!retval) {
+ err_msg("Did not find cgroupv2 mount point\n");
+ return 0;
+ }
+
+ if (!cgroup) {
+ retval = get_self_cgroup(&cgroup_path[strlen(cgroup_path)],
+ sizeof(cgroup_path) - strlen(cgroup_path));
+ if (!retval) {
+ err_msg("Did not find self cgroup\n");
+ return 0;
+ }
+ } else {
+ snprintf(&cgroup_path[strlen(cgroup_path)],
+ sizeof(cgroup_path) - strlen(cgroup_path), "%s/", cgroup);
+ }
+
+ snprintf(cgroup_procs, MAX_PATH, "%s/cgroup.procs", cgroup_path);
+
+ debug_msg("Using cgroup path at: %s\n", cgroup_procs);
+
+ cg_fd = open(cgroup_procs, O_RDWR);
+ if (cg_fd < 0)
+ return 0;
+
+ snprintf(pid_str, sizeof(pid_str), "%d\n", pid);
+
+ retval = write(cg_fd, pid_str, strlen(pid_str));
+ if (retval < 0)
+ err_msg("Error setting cgroup attributes for pid:%s - %s\n",
+ pid_str, strerror(errno));
+ else
+ debug_msg("Set cgroup attributes for pid:%s\n", pid_str);
+
+ close(cg_fd);
+
+ return (retval >= 0);
+}
+
+/**
+ * set_comm_cgroup - Set cgroup to threads starting with char *comm_prefix
+ *
+ * If cgroup argument is not NULL, the threads will move to the given cgroup.
+ * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used.
+ *
+ * Supports cgroup v2.
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+int set_comm_cgroup(const char *comm_prefix, const char *cgroup)
+{
+ char cgroup_path[MAX_PATH - strlen("/cgroup.procs")];
+ char cgroup_procs[MAX_PATH];
+ struct dirent *proc_entry;
+ DIR *procfs;
+ int retval;
+ int cg_fd;
+
+ if (strlen(comm_prefix) >= MAX_PATH) {
+ err_msg("Command prefix is too long: %d < strlen(%s)\n",
+ MAX_PATH, comm_prefix);
+ return 0;
+ }
+
+ retval = find_mount("cgroup2", cgroup_path, sizeof(cgroup_path));
+ if (!retval) {
+ err_msg("Did not find cgroupv2 mount point\n");
+ return 0;
+ }
+
+ if (!cgroup) {
+ retval = get_self_cgroup(&cgroup_path[strlen(cgroup_path)],
+ sizeof(cgroup_path) - strlen(cgroup_path));
+ if (!retval) {
+ err_msg("Did not find self cgroup\n");
+ return 0;
+ }
+ } else {
+ snprintf(&cgroup_path[strlen(cgroup_path)],
+ sizeof(cgroup_path) - strlen(cgroup_path), "%s/", cgroup);
+ }
+
+ snprintf(cgroup_procs, MAX_PATH, "%s/cgroup.procs", cgroup_path);
+
+ debug_msg("Using cgroup path at: %s\n", cgroup_procs);
+
+ cg_fd = open(cgroup_procs, O_RDWR);
+ if (cg_fd < 0)
+ return 0;
+
+ procfs = opendir("/proc");
+ if (!procfs) {
+ err_msg("Could not open procfs\n");
+ goto out_cg;
+ }
+
+ while ((proc_entry = readdir(procfs))) {
+
+ retval = procfs_is_workload_pid(comm_prefix, proc_entry);
+ if (!retval)
+ continue;
+
+ retval = write(cg_fd, proc_entry->d_name, strlen(proc_entry->d_name));
+ if (retval < 0) {
+ err_msg("Error setting cgroup attributes for pid:%s - %s\n",
+ proc_entry->d_name, strerror(errno));
+ goto out_procfs;
+ }
+
+ debug_msg("Set cgroup attributes for pid:%s\n", proc_entry->d_name);
+ }
+
+ closedir(procfs);
+ close(cg_fd);
+ return 1;
+
+out_procfs:
+ closedir(procfs);
+out_cg:
+ close(cg_fd);
+ return 0;
+}
+
+/**
+ * auto_house_keeping - Automatically move rtla out of measurement threads
+ *
+ * Try to move rtla away from the tracer, if possible.
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+int auto_house_keeping(cpu_set_t *monitored_cpus)
+{
+ cpu_set_t rtla_cpus, house_keeping_cpus;
+ int retval;
+
+ /* first get the CPUs in which rtla can actually run. */
+ retval = sched_getaffinity(getpid(), sizeof(rtla_cpus), &rtla_cpus);
+ if (retval == -1) {
+ debug_msg("Could not get rtla affinity, rtla might run with the threads!\n");
+ return 0;
+ }
+
+ /* then check if the existing setup is already good. */
+ CPU_AND(&house_keeping_cpus, &rtla_cpus, monitored_cpus);
+ if (!CPU_COUNT(&house_keeping_cpus)) {
+ debug_msg("rtla and the monitored CPUs do not share CPUs.");
+ debug_msg("Skipping auto house-keeping\n");
+ return 1;
+ }
+
+ /* remove the intersection */
+ CPU_XOR(&house_keeping_cpus, &rtla_cpus, monitored_cpus);
+
+ /* get only those that rtla can run */
+ CPU_AND(&house_keeping_cpus, &house_keeping_cpus, &rtla_cpus);
+
+ /* is there any cpu left? */
+ if (!CPU_COUNT(&house_keeping_cpus)) {
+ debug_msg("Could not find any CPU for auto house-keeping\n");
+ return 0;
+ }
+
+ retval = sched_setaffinity(getpid(), sizeof(house_keeping_cpus), &house_keeping_cpus);
+ if (retval == -1) {
+ debug_msg("Could not set affinity for auto house-keeping\n");
+ return 0;
+ }
+
+ debug_msg("rtla automatically moved to an auto house-keeping cpu set\n");
+
+ return 1;
+}
diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
index 90e4f52a030b..04ed1e650495 100644
--- a/tools/tracing/rtla/src/utils.h
+++ b/tools/tracing/rtla/src/utils.h
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+
#include <stdint.h>
#include <time.h>
+#include <sched.h>
/*
* '18446744073709551615\0'
@@ -54,8 +56,13 @@ struct sched_attr {
};
int parse_prio(char *arg, struct sched_attr *sched_param);
+int parse_cpu_set(char *cpu_list, cpu_set_t *set);
+int __set_sched_attr(int pid, struct sched_attr *attr);
int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr);
+int set_comm_cgroup(const char *comm_prefix, const char *cgroup);
+int set_pid_cgroup(pid_t pid, const char *cgroup);
int set_cpu_dma_latency(int32_t latency);
+int auto_house_keeping(cpu_set_t *monitored_cpus);
#define ns_to_usf(x) (((double)x/1000))
#define ns_to_per(total, part) ((part * 100) / (double)total)
diff --git a/tools/usb/usbip/configure.ac b/tools/usb/usbip/configure.ac
index 607d05c5ccfd..8debf934f8b7 100644
--- a/tools/usb/usbip/configure.ac
+++ b/tools/usb/usbip/configure.ac
@@ -94,11 +94,11 @@ AC_SUBST([USBIDS_DIR])
AC_MSG_CHECKING([whether to use fortify])
AC_ARG_WITH([fortify],
[AS_HELP_STRING([--with-fortify],
- [use _FORTIFY_SROUCE option when compiling)])],
+ [use _FORTIFY_SOURCE=2 option when compiling)])],
dnl [ACTION-IF-GIVEN]
[if test "$withval" = "yes"; then
AC_MSG_RESULT([yes])
- CFLAGS="$CFLAGS -D_FORTIFY_SOURCE -O"
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=2 -O"
else
AC_MSG_RESULT([no])
CFLAGS="$CFLAGS -U_FORTIFY_SOURCE"
diff --git a/tools/usb/usbip/src/usbip_attach.c b/tools/usb/usbip/src/usbip_attach.c
index b4aeb9f1f493..531a415538f9 100644
--- a/tools/usb/usbip/src/usbip_attach.c
+++ b/tools/usb/usbip/src/usbip_attach.c
@@ -86,7 +86,7 @@ static int import_device(int sockfd, struct usbip_usb_device *udev)
rc = usbip_vhci_driver_open();
if (rc < 0) {
- err("open vhci_driver");
+ err("open vhci_driver (is vhci_hcd loaded?)");
goto err_out;
}
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index aec993159036..b29101986b5a 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -50,7 +50,7 @@ static int detach_port(char *port)
ret = usbip_vhci_driver_open();
if (ret < 0) {
- err("open vhci_driver");
+ err("open vhci_driver (is vhci_hcd loaded?)");
return -1;
}
diff --git a/tools/usb/usbip/src/usbip_port.c b/tools/usb/usbip/src/usbip_port.c
index 4d14387df13d..21a20e378419 100644
--- a/tools/usb/usbip/src/usbip_port.c
+++ b/tools/usb/usbip/src/usbip_port.c
@@ -18,7 +18,7 @@ static int list_imported_devices(void)
ret = usbip_vhci_driver_open();
if (ret < 0) {
- err("open vhci_driver");
+ err("open vhci_driver (is vhci_hcd loaded?)");
goto err_names_free;
}
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index 7b7139d97d74..d128925980e0 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -4,7 +4,18 @@ test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o
-CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -mfunction-return=thunk -fcf-protection=none -mindirect-branch-register
+try-run = $(shell set -e; \
+ if ($(1)) >/dev/null 2>&1; \
+ then echo "$(2)"; \
+ else echo "$(3)"; \
+ fi)
+
+__cc-option = $(call try-run,\
+ $(1) -Werror $(2) -c -x c /dev/null -o /dev/null,$(2),)
+cc-option = $(call __cc-option, $(CC),$(1))
+
+CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h $(call cc-option,-mfunction-return=thunk) $(call cc-option,-fcf-protection=none) $(call cc-option,-mindirect-branch-register)
+
CFLAGS += -pthread
LDFLAGS += -pthread
vpath %.c ../../drivers/virtio ../../drivers/vhost
diff --git a/usr/Makefile b/usr/Makefile
index 59d9e8b07a01..f8e1ad19e05c 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -64,6 +64,7 @@ quiet_cmd_initfs = GEN $@
$(CONFIG_SHELL) $< -o $@ -l $(obj)/.initramfs_data.cpio.d \
$(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
$(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) \
+ $(if $(KBUILD_BUILD_TIMESTAMP), -d "$(KBUILD_BUILD_TIMESTAMP)") \
$(ramfs-input)
# We rebuild initramfs_data.cpio if:
diff --git a/usr/gen_initramfs.sh b/usr/gen_initramfs.sh
index 63476bb70b41..14b5782f961a 100755
--- a/usr/gen_initramfs.sh
+++ b/usr/gen_initramfs.sh
@@ -23,6 +23,7 @@ $0 [-o <file>] [-l <dep_list>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
-g <gid> Group ID to map to group ID 0 (root).
<gid> is only meaningful if <cpio_source> is a
directory. "squash" forces all files to gid 0.
+ -d <date> Use date for all file mtime values
<cpio_source> File list or directory for cpio archive.
If <cpio_source> is a .cpio file it will be used
as direct input to initramfs.
@@ -190,6 +191,7 @@ prog=$0
root_uid=0
root_gid=0
dep_list=
+timestamp=
cpio_list=$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)
output="/dev/stdout"
@@ -218,6 +220,13 @@ while [ $# -gt 0 ]; do
[ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0)
shift
;;
+ "-d") # date for file mtimes
+ timestamp="$(date -d"$1" +%s || :)"
+ if test -n "$timestamp"; then
+ timestamp="-t $timestamp"
+ fi
+ shift
+ ;;
"-h")
usage
exit 0
@@ -237,11 +246,4 @@ done
# If output_file is set we will generate cpio archive
# we are careful to delete tmp files
-timestamp=
-if test -n "$KBUILD_BUILD_TIMESTAMP"; then
- timestamp="$(date -d"$KBUILD_BUILD_TIMESTAMP" +%s || :)"
- if test -n "$timestamp"; then
- timestamp="-t $timestamp"
- fi
-fi
usr/gen_init_cpio $timestamp $cpio_list > $output
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 5ef88f5a0864..1b90acb6e3fe 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -186,15 +186,10 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
-
- kvm_iodevice_destructor(&dev->dev);
-
/*
* On failure, unregister destroys all devices on the
- * bus _except_ the target device, i.e. coalesced_zones
- * has been modified. Bail after destroying the target
- * device, there's no need to restart the walk as there
- * aren't any zones left.
+ * bus, including the target device. There's no need
+ * to restart the walk as there aren't any zones left.
*/
if (r)
break;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b0af834ffa95..89912a17f5d5 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -889,9 +889,9 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
unlock_fail:
mutex_unlock(&kvm->slots_lock);
+ kfree(p);
fail:
- kfree(p);
eventfd_ctx_put(eventfd);
return ret;
@@ -901,7 +901,7 @@ static int
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_ioeventfd *args)
{
- struct _ioeventfd *p, *tmp;
+ struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus;
int ret = -ENOENT;
@@ -915,8 +915,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
mutex_lock(&kvm->slots_lock);
- list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
-
+ list_for_each_entry(p, &kvm->ioeventfds, list) {
if (p->bus_idx != bus_idx ||
p->eventfd != eventfd ||
p->addr != args->addr ||
@@ -931,7 +930,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
bus = kvm_get_bus(kvm, bus_idx);
if (bus)
bus->ioeventfd_count--;
- ioeventfd_release(p);
ret = 0;
break;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 19f301ef23c9..dfbaafbe3a00 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -154,11 +154,6 @@ static unsigned long long kvm_active_vms;
static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
-__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
-{
-}
-
__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{
}
@@ -521,18 +516,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier);
}
-static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
@@ -910,7 +893,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
- .invalidate_range = kvm_mmu_notifier_invalidate_range,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -3891,7 +3873,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int vcpu_get_pid(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = data;
- *val = pid_nr(rcu_access_pointer(vcpu->pid));
+
+ rcu_read_lock();
+ *val = pid_nr(rcu_dereference(vcpu->pid));
+ rcu_read_unlock();
return 0;
}
@@ -3993,7 +3978,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r < 0)
goto kvm_put_xa_release;
- if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
+ if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
r = -EINVAL;
goto kvm_put_xa_release;
}
@@ -4623,7 +4608,7 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
return -EINVAL;
}
-static bool kvm_are_all_memslots_empty(struct kvm *kvm)
+bool kvm_are_all_memslots_empty(struct kvm *kvm)
{
int i;
@@ -4636,6 +4621,7 @@ static bool kvm_are_all_memslots_empty(struct kvm *kvm)
return true;
}
+EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
@@ -5315,6 +5301,12 @@ static void hardware_disable_all(void)
}
#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
+static void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+ if (dev->ops->destructor)
+ dev->ops->destructor(dev);
+}
+
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;
@@ -5538,7 +5530,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
- int i, j;
+ int i;
struct kvm_io_bus *new_bus, *bus;
lockdep_assert_held(&kvm->slots_lock);
@@ -5568,18 +5560,19 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
- /* Destroy the old bus _after_ installing the (null) bus. */
+ /*
+ * If NULL bus is installed, destroy the old bus, including all the
+ * attached devices. Otherwise, destroy the caller's device only.
+ */
if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n");
- for (j = 0; j < bus->dev_count; j++) {
- if (j == i)
- continue;
- kvm_iodevice_destructor(bus->range[j].dev);
- }
+ kvm_io_bus_destroy(bus);
+ return -ENOMEM;
}
+ kvm_iodevice_destructor(dev);
kfree(bus);
- return new_bus ? 0 : -ENOMEM;
+ return 0;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,